1use core::{mem, slice};
6
7use crate::utils::{Buffer, get_unaligned_chunk, get_aligned_chunk};
8use crate::xxh64_common::*;
9
10fn finalize(mut input: u64, mut data: &[u8], is_aligned: bool) -> u64 {
11 let read_chunk = if is_aligned {
12 get_aligned_chunk::<u64>
13 } else {
14 get_unaligned_chunk::<u64>
15 };
16 while data.len() >= 8 {
17 input ^= round(0, read_chunk(data, 0).to_le());
18 data = &data[8..];
19 input = input.rotate_left(27).wrapping_mul(PRIME_1).wrapping_add(PRIME_4)
20 }
21
22 let read_chunk = if is_aligned {
23 get_aligned_chunk::<u32>
24 } else {
25 get_unaligned_chunk::<u32>
26 };
27 while data.len() >= 4 {
28 input ^= (read_chunk(data, 0).to_le() as u64).wrapping_mul(PRIME_1);
29 data = &data[4..];
30 input = input.rotate_left(23).wrapping_mul(PRIME_2).wrapping_add(PRIME_3);
31 }
32
33 for byte in data.iter() {
34 input ^= (*byte as u64).wrapping_mul(PRIME_5);
35 input = input.rotate_left(11).wrapping_mul(PRIME_1);
36 }
37
38 avalanche(input)
39}
40
41#[inline(always)]
42const fn init_v(seed: u64) -> (u64, u64, u64, u64) {
43 (
44 seed.wrapping_add(PRIME_1).wrapping_add(PRIME_2),
45 seed.wrapping_add(PRIME_2),
46 seed,
47 seed.wrapping_sub(PRIME_1),
48 )
49}
50
51macro_rules! round_loop {
52 ($input:ident => $($v:tt)+) => {
53 $($v)+.0 = round($($v)+.0, get_unaligned_chunk::<u64>($input, 0).to_le());
54 $($v)+.1 = round($($v)+.1, get_unaligned_chunk::<u64>($input, 8).to_le());
55 $($v)+.2 = round($($v)+.2, get_unaligned_chunk::<u64>($input, 16).to_le());
56 $($v)+.3 = round($($v)+.3, get_unaligned_chunk::<u64>($input, 24).to_le());
57 $input = &$input[32..];
58 }
59}
60
61pub fn xxh64(mut input: &[u8], seed: u64) -> u64 {
63 let input_len = input.len() as u64;
64 let mut result;
65
66 if input.len() >= CHUNK_SIZE {
67 let mut v = init_v(seed);
68
69 loop {
70 round_loop!(input => v);
71
72 if input.len() < CHUNK_SIZE {
73 break;
74 }
75 }
76
77 result = v.0.rotate_left(1).wrapping_add(v.1.rotate_left(7))
78 .wrapping_add(v.2.rotate_left(12))
79 .wrapping_add(v.3.rotate_left(18));
80
81 result = merge_round(result, v.0);
82 result = merge_round(result, v.1);
83 result = merge_round(result, v.2);
84 result = merge_round(result, v.3);
85 } else {
86 result = seed.wrapping_add(PRIME_5)
87 }
88
89 result = result.wrapping_add(input_len);
90
91 finalize(result, input, false)
92}
93
94#[derive(Clone)]
96pub struct Xxh64 {
97 total_len: u64,
98 v: (u64, u64, u64, u64),
99 mem: [u64; 4],
100 mem_size: u64,
101}
102
103impl Xxh64 {
104 #[inline]
105 pub const fn new(seed: u64) -> Self {
107 Self {
108 total_len: 0,
109 v: init_v(seed),
110 mem: [0, 0, 0, 0],
111 mem_size: 0,
112 }
113 }
114
115 pub fn update(&mut self, mut input: &[u8]) {
117 self.total_len = self.total_len.wrapping_add(input.len() as u64);
118
119 if (self.mem_size as usize + input.len()) < CHUNK_SIZE {
120 Buffer {
121 ptr: self.mem.as_mut_ptr() as *mut u8,
122 len: mem::size_of_val(&self.mem),
123 offset: self.mem_size as _,
124 }.copy_from_slice(input);
125
126 self.mem_size += input.len() as u64;
127 return
128 }
129
130 if self.mem_size > 0 {
131 let fill_len = CHUNK_SIZE - self.mem_size as usize;
134
135 Buffer {
136 ptr: self.mem.as_mut_ptr() as *mut u8,
137 len: mem::size_of_val(&self.mem),
138 offset: self.mem_size as _,
139 }.copy_from_slice_by_size(input, fill_len);
140
141 self.v.0 = round(self.v.0, self.mem[0].to_le());
142 self.v.1 = round(self.v.1, self.mem[1].to_le());
143 self.v.2 = round(self.v.2, self.mem[2].to_le());
144 self.v.3 = round(self.v.3, self.mem[3].to_le());
145
146 input = &input[fill_len..];
147 self.mem_size = 0;
148 }
149
150 if input.len() >= CHUNK_SIZE {
151 loop {
152 round_loop!(input => self.v);
153
154 if input.len() < CHUNK_SIZE {
155 break;
156 }
157 }
158 }
159
160 if input.len() > 0 {
161 Buffer {
162 ptr: self.mem.as_mut_ptr() as *mut u8,
163 len: mem::size_of_val(&self.mem),
164 offset: 0
165 }.copy_from_slice(input);
166 self.mem_size = input.len() as u64;
167 }
168 }
169
170 pub fn digest(&self) -> u64 {
172 let mut result;
173
174 if self.total_len >= CHUNK_SIZE as u64 {
175 result = self.v.0.rotate_left(1).wrapping_add(self.v.1.rotate_left(7))
176 .wrapping_add(self.v.2.rotate_left(12))
177 .wrapping_add(self.v.3.rotate_left(18));
178
179 result = merge_round(result, self.v.0);
180 result = merge_round(result, self.v.1);
181 result = merge_round(result, self.v.2);
182 result = merge_round(result, self.v.3);
183 } else {
184 result = self.v.2.wrapping_add(PRIME_5)
185 }
186
187 result = result.wrapping_add(self.total_len);
188
189 let input = unsafe {
190 slice::from_raw_parts(self.mem.as_ptr() as *const u8, self.mem_size as usize)
191 };
192
193 finalize(result, input, true)
194 }
195
196 #[inline]
197 pub fn reset(&mut self, seed: u64) {
199 self.total_len = 0;
200 self.v = init_v(seed);
201 self.mem_size = 0;
202 }
203}
204
205impl core::hash::Hasher for Xxh64 {
206 #[inline(always)]
207 fn finish(&self) -> u64 {
208 self.digest()
209 }
210
211 #[inline(always)]
212 fn write(&mut self, input: &[u8]) {
213 self.update(input)
214 }
215}
216
217#[cfg(feature = "std")]
218impl std::io::Write for Xxh64 {
219 #[inline]
220 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
221 self.update(buf);
222 Ok(buf.len())
223 }
224
225 #[inline]
226 fn flush(&mut self) -> std::io::Result<()> {
227 Ok(())
228 }
229}
230
231impl Default for Xxh64 {
232 #[inline(always)]
233 fn default() -> Self {
234 Xxh64Builder::new(0).build()
235 }
236}
237
238#[derive(Clone, Copy, Default)]
239pub struct Xxh64Builder {
241 seed: u64
242}
243
244impl Xxh64Builder {
245 #[inline(always)]
246 pub const fn new(seed: u64) -> Self {
248 Self {
249 seed
250 }
251 }
252
253 #[inline(always)]
254 pub const fn build(self) -> Xxh64 {
256 Xxh64::new(self.seed)
257 }
258}
259
260impl core::hash::BuildHasher for Xxh64Builder {
261 type Hasher = Xxh64;
262
263 #[inline(always)]
264 fn build_hasher(&self) -> Self::Hasher {
265 self.build()
266 }
267}