1use crate::Uint;
2use core::ops::{
3 BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, ShlAssign, Shr,
4 ShrAssign,
5};
6
7impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
8 #[must_use]
12 #[inline]
13 pub const fn bit(&self, index: usize) -> bool {
14 if index >= BITS {
15 return false;
16 }
17 let (limbs, bits) = (index / 64, index % 64);
18 self.limbs[limbs] & (1 << bits) != 0
19 }
20
21 #[inline]
23 pub fn set_bit(&mut self, index: usize, value: bool) {
24 if index >= BITS {
25 return;
26 }
27 let (limbs, bits) = (index / 64, index % 64);
28 if value {
29 self.limbs[limbs] |= 1 << bits;
30 } else {
31 self.limbs[limbs] &= !(1 << bits);
32 }
33 }
34
35 #[inline]
69 #[must_use]
70 #[track_caller]
71 pub const fn byte(&self, index: usize) -> u8 {
72 #[cfg(target_endian = "little")]
73 {
74 self.as_le_slice()[index]
75 }
76
77 #[cfg(target_endian = "big")]
78 #[allow(clippy::cast_possible_truncation)] {
80 (self.limbs[index / 8] >> ((index % 8) * 8)) as u8
81 }
82 }
83
84 #[inline]
98 #[must_use]
99 pub const fn checked_byte(&self, index: usize) -> Option<u8> {
100 if index < Self::BYTES {
101 Some(self.byte(index))
102 } else {
103 None
104 }
105 }
106
107 #[inline]
111 #[must_use]
112 pub fn reverse_bits(mut self) -> Self {
113 self.limbs.reverse();
114 for limb in &mut self.limbs {
115 *limb = limb.reverse_bits();
116 }
117 if BITS % 64 != 0 {
118 self >>= 64 - BITS % 64;
119 }
120 self
121 }
122
123 #[inline]
125 #[must_use]
126 pub const fn not(mut self) -> Self {
127 if BITS == 0 {
128 return Self::ZERO;
129 }
130 let mut i = 0;
131 while i < LIMBS {
132 self.limbs[i] = !self.limbs[i];
133 i += 1;
134 }
135 self.masked()
136 }
137
138 #[inline]
142 pub(crate) const fn count_significant_words(&self) -> usize {
143 let mut i = LIMBS;
144 while i > 0 {
145 i -= 1;
146 if self.limbs[i] != 0 {
147 return i + 1;
148 }
149 }
150 0
151 }
152
153 #[inline]
156 #[must_use]
157 pub const fn leading_zeros(&self) -> usize {
158 let fixed = Self::MASK.leading_zeros() as usize;
159
160 as_primitives!(self, {
161 u64(x) => return x.leading_zeros() as usize - fixed,
162 u128(x) => return x.leading_zeros() as usize - fixed,
163 u256(lo, hi) => return (if hi != 0 {
164 hi.leading_zeros()
165 } else {
166 lo.leading_zeros() + 128
167 }) as usize - fixed,
168 });
169
170 let s = self.count_significant_words();
171 if s == 0 {
172 return BITS;
173 }
174 let n = LIMBS - s;
175 let skipped = n * 64;
176 let top = self.limbs[s - 1].leading_zeros() as usize;
177 skipped + top - fixed
178 }
179
180 #[inline]
183 #[must_use]
184 pub const fn leading_ones(&self) -> usize {
185 Self::not(*self).leading_zeros()
186 }
187
188 #[inline]
191 #[must_use]
192 pub fn trailing_zeros(&self) -> usize {
193 self.as_limbs()
194 .iter()
195 .position(|&limb| limb != 0)
196 .map_or(BITS, |n| {
197 n * 64 + self.as_limbs()[n].trailing_zeros() as usize
198 })
199 }
200
201 #[inline]
204 #[must_use]
205 pub fn trailing_ones(&self) -> usize {
206 self.as_limbs()
207 .iter()
208 .position(|&limb| limb != u64::MAX)
209 .map_or(BITS, |n| {
210 n * 64 + self.as_limbs()[n].trailing_ones() as usize
211 })
212 }
213
214 #[inline]
216 #[must_use]
217 pub const fn count_ones(&self) -> usize {
218 let mut ones = 0;
219 let mut i = 0;
220 while i < LIMBS {
221 ones += self.limbs[i].count_ones() as usize;
222 i += 1;
223 }
224 ones
225 }
226
227 #[must_use]
229 #[inline]
230 pub const fn count_zeros(&self) -> usize {
231 BITS - self.count_ones()
232 }
233
234 #[must_use]
239 #[inline]
240 pub const fn bit_len(&self) -> usize {
241 BITS - self.leading_zeros()
242 }
243
244 #[must_use]
249 #[inline]
250 pub const fn byte_len(&self) -> usize {
251 (self.bit_len() + 7) / 8
252 }
253
254 #[inline]
266 #[must_use]
267 pub fn most_significant_bits(&self) -> (u64, usize) {
268 let first_set_limb = self
269 .as_limbs()
270 .iter()
271 .rposition(|&limb| limb != 0)
272 .unwrap_or(0);
273 if first_set_limb == 0 {
274 (self.as_limbs().first().copied().unwrap_or(0), 0)
275 } else {
276 let hi = self.as_limbs()[first_set_limb];
277 let lo = self.as_limbs()[first_set_limb - 1];
278 let leading_zeros = hi.leading_zeros();
279 let bits = if leading_zeros > 0 {
280 (hi << leading_zeros) | (lo >> (64 - leading_zeros))
281 } else {
282 hi
283 };
284 let exponent = first_set_limb * 64 - leading_zeros as usize;
285 (bits, exponent)
286 }
287 }
288
289 #[inline(always)]
298 #[must_use]
299 pub const fn checked_shl(self, rhs: usize) -> Option<Self> {
300 match self.overflowing_shl(rhs) {
301 (value, false) => Some(value),
302 _ => None,
303 }
304 }
305
306 #[inline(always)]
312 #[must_use]
313 pub const fn saturating_shl(self, rhs: usize) -> Self {
314 match self.overflowing_shl(rhs) {
315 (value, false) => value,
316 _ => Self::MAX,
317 }
318 }
319
320 #[inline]
329 #[must_use]
330 pub const fn overflowing_shl(self, rhs: usize) -> (Self, bool) {
331 let (limbs, bits) = (rhs / 64, rhs % 64);
332 if limbs >= LIMBS {
333 return (Self::ZERO, !self.const_is_zero());
334 }
335
336 let word_bits = 64;
337 let mut r = Self::ZERO;
338 let mut carry = 0;
339 let mut i = 0;
340 while i < LIMBS - limbs {
341 let x = self.limbs[i];
342 r.limbs[i + limbs] = (x << bits) | carry;
343 carry = (x >> (word_bits - bits - 1)) >> 1;
344 i += 1;
345 }
346 (r.masked(), carry != 0)
347 }
348
349 #[inline]
353 pub(crate) fn overflowing_shl_big(self, rhs: Self) -> (Self, bool) {
354 if BITS == 0 {
355 return (Self::ZERO, false);
356 }
357 let Ok(rhs) = u64::try_from(rhs) else {
358 return (Self::ZERO, true);
359 };
360 #[allow(clippy::cast_possible_truncation)]
364 self.overflowing_shl(rhs as usize)
365 }
366
367 #[inline(always)]
374 #[must_use]
375 pub const fn wrapping_shl(self, rhs: usize) -> Self {
376 self.overflowing_shl(rhs).0
377 }
378
379 #[inline(always)]
391 #[must_use]
392 pub const fn checked_shr(self, rhs: usize) -> Option<Self> {
393 match self.overflowing_shr(rhs) {
394 (value, false) => Some(value),
395 _ => None,
396 }
397 }
398
399 #[inline]
412 #[must_use]
413 pub const fn overflowing_shr(self, rhs: usize) -> (Self, bool) {
414 let (limbs, bits) = (rhs / 64, rhs % 64);
415 if limbs >= LIMBS {
416 return (Self::ZERO, !self.const_is_zero());
417 }
418
419 let word_bits = 64;
420 let mut r = Self::ZERO;
421 let mut carry = 0;
422 let mut i = 0;
423 while i < LIMBS - limbs {
424 let x = self.limbs[LIMBS - 1 - i];
425 r.limbs[LIMBS - 1 - i - limbs] = (x >> bits) | carry;
426 carry = (x << (word_bits - bits - 1)) << 1;
427 i += 1;
428 }
429 (r, carry != 0)
430 }
431
432 #[inline]
436 pub(crate) fn overflowing_shr_big(self, rhs: Self) -> (Self, bool) {
437 if BITS == 0 {
438 return (Self::ZERO, false);
439 }
440 let Ok(rhs) = u64::try_from(rhs) else {
441 return (Self::ZERO, true);
442 };
443 #[allow(clippy::cast_possible_truncation)]
447 self.overflowing_shr(rhs as usize)
448 }
449
450 #[inline(always)]
460 #[must_use]
461 pub const fn wrapping_shr(self, rhs: usize) -> Self {
462 self.overflowing_shr(rhs).0
463 }
464
465 #[inline]
467 #[must_use]
468 pub const fn arithmetic_shr(self, rhs: usize) -> Self {
469 if BITS == 0 {
470 return Self::ZERO;
471 }
472 let sign = self.bit(BITS - 1);
473 let mut r = self.wrapping_shr(rhs);
474 if sign {
475 r = r.bitor(Self::MAX.wrapping_shl(BITS.saturating_sub(rhs)));
477 }
478 r
479 }
480
481 #[inline]
484 #[must_use]
485 pub const fn rotate_left(self, rhs: usize) -> Self {
486 if BITS == 0 {
487 return Self::ZERO;
488 }
489 let rhs = rhs % BITS;
490 self.wrapping_shl(rhs).bitor(self.wrapping_shr(BITS - rhs))
492 }
493
494 #[inline(always)]
497 #[must_use]
498 pub const fn rotate_right(self, rhs: usize) -> Self {
499 if BITS == 0 {
500 return Self::ZERO;
501 }
502 let rhs = rhs % BITS;
503 self.rotate_left(BITS - rhs)
504 }
505}
506
507impl<const BITS: usize, const LIMBS: usize> Not for Uint<BITS, LIMBS> {
508 type Output = Self;
509
510 #[inline]
511 fn not(self) -> Self::Output {
512 self.not()
513 }
514}
515
516impl<const BITS: usize, const LIMBS: usize> Not for &Uint<BITS, LIMBS> {
517 type Output = Uint<BITS, LIMBS>;
518
519 #[inline]
520 fn not(self) -> Self::Output {
521 (*self).not()
522 }
523}
524
525macro_rules! impl_bit_op {
526 ($op:tt, $assign_op:tt, $trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
527 impl<const BITS: usize, const LIMBS: usize> $trait_assign<Uint<BITS, LIMBS>>
528 for Uint<BITS, LIMBS>
529 {
530 #[inline(always)]
531 fn $fn_assign(&mut self, rhs: Uint<BITS, LIMBS>) {
532 self.$fn_assign(&rhs);
533 }
534 }
535
536 impl<const BITS: usize, const LIMBS: usize> $trait_assign<&Uint<BITS, LIMBS>>
537 for Uint<BITS, LIMBS>
538 {
539 #[inline]
540 fn $fn_assign(&mut self, rhs: &Uint<BITS, LIMBS>) {
541 for i in 0..LIMBS {
542 u64::$fn_assign(&mut self.limbs[i], rhs.limbs[i]);
543 }
544 }
545 }
546
547 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
548 for Uint<BITS, LIMBS>
549 {
550 type Output = Uint<BITS, LIMBS>;
551
552 #[inline(always)]
553 fn $fn(mut self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
554 self.$fn_assign(rhs);
555 self
556 }
557 }
558
559 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
560 for Uint<BITS, LIMBS>
561 {
562 type Output = Uint<BITS, LIMBS>;
563
564 #[inline(always)]
565 fn $fn(mut self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
566 self.$fn_assign(rhs);
567 self
568 }
569 }
570
571 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
572 for &Uint<BITS, LIMBS>
573 {
574 type Output = Uint<BITS, LIMBS>;
575
576 #[inline(always)]
577 fn $fn(self, mut rhs: Uint<BITS, LIMBS>) -> Self::Output {
578 rhs.$fn_assign(self);
579 rhs
580 }
581 }
582
583 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
584 for &Uint<BITS, LIMBS>
585 {
586 type Output = Uint<BITS, LIMBS>;
587
588 #[inline(always)]
589 fn $fn(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
590 <Uint<BITS, LIMBS>>::$fn(*self, *rhs)
591 }
592 }
593
594 impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
595 #[doc = concat!("Returns the bitwise `", stringify!($op), "` of the two numbers.")]
596 #[inline(always)]
597 #[must_use]
598 pub const fn $fn(mut self, rhs: Uint<BITS, LIMBS>) -> Uint<BITS, LIMBS> {
599 let mut i = 0;
600 while i < LIMBS {
601 self.limbs[i] $assign_op rhs.limbs[i];
602 i += 1;
603 }
604 self
605 }
606 }
607 };
608}
609
610impl_bit_op!(|, |=, BitOr, bitor, BitOrAssign, bitor_assign);
611impl_bit_op!(&, &=, BitAnd, bitand, BitAndAssign, bitand_assign);
612impl_bit_op!(^, ^=, BitXor, bitxor, BitXorAssign, bitxor_assign);
613
614impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
615 type Output = Self;
616
617 #[inline(always)]
618 fn shl(self, rhs: Self) -> Self::Output {
619 self.overflowing_shl_big(rhs).0
620 }
621}
622
623impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
624 type Output = Self;
625
626 #[inline(always)]
627 fn shl(self, rhs: &Self) -> Self::Output {
628 self << *rhs
629 }
630}
631
632impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
633 type Output = Self;
634
635 #[inline(always)]
636 fn shr(self, rhs: Self) -> Self::Output {
637 self.overflowing_shr_big(rhs).0
638 }
639}
640
641impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
642 type Output = Self;
643
644 #[inline(always)]
645 fn shr(self, rhs: &Self) -> Self::Output {
646 self >> *rhs
647 }
648}
649
650impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
651 #[inline(always)]
652 fn shl_assign(&mut self, rhs: Self) {
653 *self = *self << rhs;
654 }
655}
656
657impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
658 #[inline(always)]
659 fn shl_assign(&mut self, rhs: &Self) {
660 *self = *self << rhs;
661 }
662}
663
664impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
665 #[inline(always)]
666 fn shr_assign(&mut self, rhs: Self) {
667 *self = *self >> rhs;
668 }
669}
670
671impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
672 #[inline(always)]
673 fn shr_assign(&mut self, rhs: &Self) {
674 *self = *self >> rhs;
675 }
676}
677
678macro_rules! impl_shift {
679 (@main $u:ty) => {
680 impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
681 type Output = Self;
682
683 #[inline(always)]
684 #[allow(clippy::cast_possible_truncation)]
685 fn shl(self, rhs: $u) -> Self::Output {
686 self.wrapping_shl(rhs as usize)
687 }
688 }
689
690 impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
691 type Output = Self;
692
693 #[inline(always)]
694 #[allow(clippy::cast_possible_truncation)]
695 fn shr(self, rhs: $u) -> Self::Output {
696 self.wrapping_shr(rhs as usize)
697 }
698 }
699 };
700
701 (@ref $u:ty) => {
702 impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
703 type Output = Self;
704
705 #[inline(always)]
706 fn shl(self, rhs: &$u) -> Self::Output {
707 <Self>::shl(self, *rhs)
708 }
709 }
710
711 impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
712 type Output = Self;
713
714 #[inline(always)]
715 fn shr(self, rhs: &$u) -> Self::Output {
716 <Self>::shr(self, *rhs)
717 }
718 }
719 };
720
721 (@assign $u:ty) => {
722 impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
723 #[inline(always)]
724 fn shl_assign(&mut self, rhs: $u) {
725 *self = *self << rhs;
726 }
727 }
728
729 impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
730 #[inline(always)]
731 fn shr_assign(&mut self, rhs: $u) {
732 *self = *self >> rhs;
733 }
734 }
735 };
736
737 ($u:ty) => {
738 impl_shift!(@main $u);
739 impl_shift!(@ref $u);
740 impl_shift!(@assign $u);
741 impl_shift!(@assign &$u);
742 };
743
744 ($u:ty, $($tail:ty),*) => {
745 impl_shift!($u);
746 impl_shift!($($tail),*);
747 };
748}
749
750impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);
751
752#[cfg(target_pointer_width = "64")]
754impl_shift!(u64, i64);
755
756#[cfg(test)]
757mod tests {
758 use super::*;
759 use crate::{
760 aliases::{U128, U256},
761 const_for, nlimbs,
762 };
763 use core::cmp::min;
764 use proptest::proptest;
765
766 #[test]
767 fn test_leading_zeros() {
768 assert_eq!(Uint::<0, 0>::ZERO.leading_zeros(), 0);
769 assert_eq!(Uint::<1, 1>::ZERO.leading_zeros(), 1);
770 assert_eq!(Uint::<1, 1>::ONE.leading_zeros(), 0);
771 const_for!(BITS in NON_ZERO {
772 const LIMBS: usize = nlimbs(BITS);
773 type U = Uint::<BITS, LIMBS>;
774 assert_eq!(U::ZERO.leading_zeros(), BITS);
775 assert_eq!(U::MAX.leading_zeros(), 0);
776 assert_eq!(U::ONE.leading_zeros(), BITS - 1);
777 proptest!(|(value: U)| {
778 let zeros = value.leading_zeros();
779 assert!(zeros <= BITS);
780 assert!(zeros < BITS || value == U::ZERO);
781 if zeros < BITS {
782 let (left, overflow) = value.overflowing_shl(zeros);
783 assert!(!overflow);
784 assert!(left.leading_zeros() == 0 || value == U::ZERO);
785 assert!(left.bit(BITS - 1));
786 assert_eq!(value >> (BITS - zeros), Uint::ZERO);
787 }
788 });
789 });
790 proptest!(|(value: u128)| {
791 let uint = U128::from(value);
792 assert_eq!(uint.leading_zeros(), value.leading_zeros() as usize);
793 });
794
795 assert_eq!(U256::from_limbs([0, 1, 0, 1]).leading_zeros(), 63);
796 assert_eq!(U256::from_limbs([0, 0, 1, 1]).leading_zeros(), 63);
797 assert_eq!(U256::from_limbs([1, 0, 1, 1]).leading_zeros(), 63);
798 assert_eq!(U256::from_limbs([0, 1, 1, 1]).leading_zeros(), 63);
799 assert_eq!(U256::from_limbs([1, 1, 1, 1]).leading_zeros(), 63);
800
801 assert_eq!(U256::from_limbs([1, 0, 1, 0]).leading_zeros(), 64 + 63);
802 }
803
804 #[test]
805 fn test_leading_ones() {
806 assert_eq!(Uint::<0, 0>::ZERO.leading_ones(), 0);
807 assert_eq!(Uint::<1, 1>::ZERO.leading_ones(), 0);
808 assert_eq!(Uint::<1, 1>::ONE.leading_ones(), 1);
809 }
810
811 #[test]
812 fn test_most_significant_bits() {
813 const_for!(BITS in NON_ZERO {
814 const LIMBS: usize = nlimbs(BITS);
815 type U = Uint::<BITS, LIMBS>;
816 proptest!(|(value: u64)| {
817 let value = if U::LIMBS <= 1 { value & U::MASK } else { value };
818 assert_eq!(U::from(value).most_significant_bits(), (value, 0));
819 });
820 });
821 proptest!(|(mut limbs: [u64; 2])| {
822 if limbs[1] == 0 {
823 limbs[1] = 1;
824 }
825 let (bits, exponent) = U128::from_limbs(limbs).most_significant_bits();
826 assert!(bits >= 1_u64 << 63);
827 assert_eq!(exponent, 64 - limbs[1].leading_zeros() as usize);
828 });
829 }
830
831 #[test]
832 fn test_checked_shl() {
833 assert_eq!(
834 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(1),
835 Some(Uint::<65, 2>::from_limbs([0x0020_0000_0000_0000, 0]))
836 );
837 assert_eq!(
838 Uint::<127, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(64),
839 Some(Uint::<127, 2>::from_limbs([0, 0x0010_0000_0000_0000]))
840 );
841 }
842
843 #[test]
844 #[allow(
845 clippy::cast_lossless,
846 clippy::cast_possible_truncation,
847 clippy::cast_possible_wrap
848 )]
849 fn test_small() {
850 const_for!(BITS in [1, 2, 8, 16, 32, 63, 64] {
851 type U = Uint::<BITS, 1>;
852 proptest!(|(a: U, b: U)| {
853 assert_eq!(a | b, U::from_limbs([a.limbs[0] | b.limbs[0]]));
854 assert_eq!(a & b, U::from_limbs([a.limbs[0] & b.limbs[0]]));
855 assert_eq!(a ^ b, U::from_limbs([a.limbs[0] ^ b.limbs[0]]));
856 });
857 proptest!(|(a: U, s in 0..BITS)| {
858 assert_eq!(a << s, U::from_limbs([a.limbs[0] << s & U::MASK]));
859 assert_eq!(a >> s, U::from_limbs([a.limbs[0] >> s]));
860 });
861 });
862 proptest!(|(a: Uint::<32, 1>, s in 0_usize..=34)| {
863 assert_eq!(a.reverse_bits(), Uint::from((a.limbs[0] as u32).reverse_bits() as u64));
864 assert_eq!(a.rotate_left(s), Uint::from((a.limbs[0] as u32).rotate_left(s as u32) as u64));
865 assert_eq!(a.rotate_right(s), Uint::from((a.limbs[0] as u32).rotate_right(s as u32) as u64));
866 if s < 32 {
867 let arr_shifted = (((a.limbs[0] as i32) >> s) as u32) as u64;
868 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
869 }
870 });
871 proptest!(|(a: Uint::<64, 1>, s in 0_usize..=66)| {
872 assert_eq!(a.reverse_bits(), Uint::from(a.limbs[0].reverse_bits()));
873 assert_eq!(a.rotate_left(s), Uint::from(a.limbs[0].rotate_left(s as u32)));
874 assert_eq!(a.rotate_right(s), Uint::from(a.limbs[0].rotate_right(s as u32)));
875 if s < 64 {
876 let arr_shifted = ((a.limbs[0] as i64) >> s) as u64;
877 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
878 }
879 });
880 }
881
882 #[test]
883 fn test_shift_reverse() {
884 const_for!(BITS in SIZES {
885 const LIMBS: usize = nlimbs(BITS);
886 type U = Uint::<BITS, LIMBS>;
887 proptest!(|(value: U, shift in 0..=BITS + 2)| {
888 let left = (value << shift).reverse_bits();
889 let right = value.reverse_bits() >> shift;
890 assert_eq!(left, right);
891 });
892 });
893 }
894
895 #[test]
896 fn test_shift_very_big_rhs() {
897 type U = Uint<128, 2>;
898
899 for rhs in [
900 U::from(u64::MAX),
901 U::from(u128::MAX),
902 U::from_limbs([0, 1]),
903 U::from_limbs([1, 1]),
904 U::from_limbs([1, u64::MAX]),
905 ] {
906 assert_eq!(U::ONE << rhs, U::ZERO, "{rhs}");
907 assert_eq!(U::ONE >> rhs, U::ZERO, "{rhs}");
908 }
909 }
910
911 #[test]
912 fn test_rotate() {
913 const_for!(BITS in SIZES {
914 const LIMBS: usize = nlimbs(BITS);
915 type U = Uint::<BITS, LIMBS>;
916 proptest!(|(value: U, shift in 0..=BITS + 2)| {
917 let rotated = value.rotate_left(shift).rotate_right(shift);
918 assert_eq!(value, rotated);
919 });
920 });
921 }
922
923 #[test]
924 fn test_arithmetic_shr() {
925 const_for!(BITS in SIZES {
926 const LIMBS: usize = nlimbs(BITS);
927 type U = Uint::<BITS, LIMBS>;
928 proptest!(|(value: U, shift in 0..=BITS + 2)| {
929 let shifted = value.arithmetic_shr(shift);
930 assert_eq!(shifted.leading_ones(), match value.leading_ones() {
931 0 => 0,
932 n => min(BITS, n + shift)
933 });
934 });
935 });
936 }
937
938 #[test]
939 fn test_overflowing_shr() {
940 assert_eq!(
943 Uint::<64, 1>::from_limbs([40u64]).overflowing_shr(1),
944 (Uint::<64, 1>::from(20), false)
945 );
946
947 assert_eq!(
950 Uint::<64, 1>::from_limbs([41u64]).overflowing_shr(1),
951 (Uint::<64, 1>::from(20), true)
952 );
953
954 assert_eq!(
957 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).overflowing_shr(1),
958 (Uint::<65, 2>::from_limbs([0x0008_0000_0000_0000, 0]), false)
959 );
960
961 assert_eq!(
965 Uint::<256, 4>::MAX.overflowing_shr(65),
966 (
967 Uint::<256, 4>::from_str_radix(
968 "7fffffffffffffffffffffffffffffffffffffffffffffff",
969 16
970 )
971 .unwrap(),
972 true
973 )
974 );
975 assert_eq!(
978 Uint::<4096, 64>::from_str_radix("3ffffffffffffffffffffffffffffc00000000", 16,)
979 .unwrap()
980 .overflowing_shr(34),
981 (
982 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
983 false
984 )
985 );
986 assert_eq!(
989 Uint::<4096, 64>::from_str_radix(
990 "fffffffffffffffffffffffffffff0000000000000000000000000",
991 16,
992 )
993 .unwrap()
994 .overflowing_shr(100),
995 (
996 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
997 false
998 )
999 );
1000 assert_eq!(
1003 Uint::<4096, 64>::from_str_radix(
1004 "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0bdbfe",
1005 16,
1006 )
1007 .unwrap()
1008 .overflowing_shr(1),
1009 (
1010 Uint::<4096, 64>::from_str_radix(
1011 "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffff85edff",
1012 16
1013 )
1014 .unwrap(),
1015 false
1016 )
1017 );
1018 assert_eq!(
1021 Uint::<4096, 64>::from_str_radix(
1022 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
1023 16,
1024 )
1025 .unwrap()
1026 .overflowing_shr(1000),
1027 (
1028 Uint::<4096, 64>::from_str_radix(
1029 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
1030 16
1031 )
1032 .unwrap(),
1033 false
1034 )
1035 );
1036 assert_eq!(
1039 Uint::<4096, 64>::MAX
1040 .overflowing_shr(34),
1041 (
1042 Uint::<4096, 64>::from_str_radix(
1043 "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
1044 16
1045 )
1046 .unwrap(),
1047 true
1048 )
1049 );
1050 }
1051}