1use crate::Uint;
2use core::ops::{
3 BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, ShlAssign, Shr,
4 ShrAssign,
5};
6
7impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
8 #[must_use]
12 #[inline]
13 pub const fn bit(&self, index: usize) -> bool {
14 if index >= BITS {
15 return false;
16 }
17 let (limbs, bits) = (index / 64, index % 64);
18 self.limbs[limbs] & (1 << bits) != 0
19 }
20
21 #[inline]
23 pub fn set_bit(&mut self, index: usize, value: bool) {
24 if index >= BITS {
25 return;
26 }
27 let (limbs, bits) = (index / 64, index % 64);
28 if value {
29 self.limbs[limbs] |= 1 << bits;
30 } else {
31 self.limbs[limbs] &= !(1 << bits);
32 }
33 }
34
35 #[inline]
69 #[must_use]
70 #[track_caller]
71 pub const fn byte(&self, index: usize) -> u8 {
72 #[cfg(target_endian = "little")]
73 {
74 self.as_le_slice()[index]
75 }
76
77 #[cfg(target_endian = "big")]
78 #[allow(clippy::cast_possible_truncation)] {
80 (self.limbs[index / 8] >> ((index % 8) * 8)) as u8
81 }
82 }
83
84 #[inline]
98 #[must_use]
99 pub const fn checked_byte(&self, index: usize) -> Option<u8> {
100 if index < Self::BYTES {
101 Some(self.byte(index))
102 } else {
103 None
104 }
105 }
106
107 #[inline]
111 #[must_use]
112 pub fn reverse_bits(mut self) -> Self {
113 self.limbs.reverse();
114 for limb in &mut self.limbs {
115 *limb = limb.reverse_bits();
116 }
117 if BITS % 64 != 0 {
118 self >>= 64 - BITS % 64;
119 }
120 self
121 }
122
123 #[inline]
125 #[must_use]
126 pub const fn not(mut self) -> Self {
127 if BITS == 0 {
128 return Self::ZERO;
129 }
130
131 let mut i = 0;
132 while i < LIMBS {
133 self.limbs[i] = !self.limbs[i];
134 i += 1;
135 }
136
137 self.masked()
138 }
139
140 #[inline]
143 #[must_use]
144 pub const fn leading_zeros(&self) -> usize {
145 let mut i = LIMBS;
146 while i > 0 {
147 i -= 1;
148 if self.limbs[i] != 0 {
149 let n = LIMBS - 1 - i;
150 let skipped = n * 64;
151 let fixed = Self::MASK.leading_zeros() as usize;
152 let top = self.limbs[i].leading_zeros() as usize;
153 return skipped + top - fixed;
154 }
155 }
156
157 BITS
158 }
159
160 #[inline]
163 #[must_use]
164 pub const fn leading_ones(&self) -> usize {
165 Self::not(*self).leading_zeros()
166 }
167
168 #[inline]
171 #[must_use]
172 pub fn trailing_zeros(&self) -> usize {
173 self.as_limbs()
174 .iter()
175 .position(|&limb| limb != 0)
176 .map_or(BITS, |n| {
177 n * 64 + self.as_limbs()[n].trailing_zeros() as usize
178 })
179 }
180
181 #[inline]
184 #[must_use]
185 pub fn trailing_ones(&self) -> usize {
186 self.as_limbs()
187 .iter()
188 .position(|&limb| limb != u64::MAX)
189 .map_or(BITS, |n| {
190 n * 64 + self.as_limbs()[n].trailing_ones() as usize
191 })
192 }
193
194 #[inline]
196 #[must_use]
197 pub const fn count_ones(&self) -> usize {
198 let mut total = 0;
199
200 let mut i = 0;
201 while i < LIMBS {
202 total += self.limbs[i].count_ones() as usize;
203 i += 1;
204 }
205
206 total
207 }
208
209 #[must_use]
211 #[inline]
212 pub const fn count_zeros(&self) -> usize {
213 BITS - self.count_ones()
214 }
215
216 #[must_use]
221 #[inline]
222 pub const fn bit_len(&self) -> usize {
223 BITS - self.leading_zeros()
224 }
225
226 #[must_use]
231 #[inline]
232 pub const fn byte_len(&self) -> usize {
233 (self.bit_len() + 7) / 8
234 }
235
236 #[inline]
248 #[must_use]
249 pub fn most_significant_bits(&self) -> (u64, usize) {
250 let first_set_limb = self
251 .as_limbs()
252 .iter()
253 .rposition(|&limb| limb != 0)
254 .unwrap_or(0);
255 if first_set_limb == 0 {
256 (self.as_limbs().first().copied().unwrap_or(0), 0)
257 } else {
258 let hi = self.as_limbs()[first_set_limb];
259 let lo = self.as_limbs()[first_set_limb - 1];
260 let leading_zeros = hi.leading_zeros();
261 let bits = if leading_zeros > 0 {
262 (hi << leading_zeros) | (lo >> (64 - leading_zeros))
263 } else {
264 hi
265 };
266 let exponent = first_set_limb * 64 - leading_zeros as usize;
267 (bits, exponent)
268 }
269 }
270
271 #[inline(always)]
280 #[must_use]
281 pub fn checked_shl(self, rhs: usize) -> Option<Self> {
282 match self.overflowing_shl(rhs) {
283 (value, false) => Some(value),
284 _ => None,
285 }
286 }
287
288 #[inline(always)]
294 #[must_use]
295 pub fn saturating_shl(self, rhs: usize) -> Self {
296 match self.overflowing_shl(rhs) {
297 (value, false) => value,
298 _ => Self::MAX,
299 }
300 }
301
302 #[inline]
311 #[must_use]
312 pub fn overflowing_shl(self, rhs: usize) -> (Self, bool) {
313 let (limbs, bits) = (rhs / 64, rhs % 64);
314 if limbs >= LIMBS {
315 return (Self::ZERO, self != Self::ZERO);
316 }
317
318 let word_bits = 64;
319 let mut r = Self::ZERO;
320 let mut carry = 0;
321 for i in 0..Self::LIMBS - limbs {
322 let x = self.limbs[i];
323 r.limbs[i + limbs] = (x << bits) | carry;
324 carry = (x >> (word_bits - bits - 1)) >> 1;
325 }
326 r.apply_mask();
327 (r, carry != 0)
328 }
329
330 #[inline(always)]
337 #[must_use]
338 pub fn wrapping_shl(self, rhs: usize) -> Self {
339 self.overflowing_shl(rhs).0
340 }
341
342 #[inline(always)]
354 #[must_use]
355 pub fn checked_shr(self, rhs: usize) -> Option<Self> {
356 match self.overflowing_shr(rhs) {
357 (value, false) => Some(value),
358 _ => None,
359 }
360 }
361
362 #[inline]
375 #[must_use]
376 pub fn overflowing_shr(self, rhs: usize) -> (Self, bool) {
377 let (limbs, bits) = (rhs / 64, rhs % 64);
378 if limbs >= LIMBS {
379 return (Self::ZERO, self != Self::ZERO);
380 }
381
382 let word_bits = 64;
383 let mut r = Self::ZERO;
384 let mut carry = 0;
385 for i in 0..LIMBS - limbs {
386 let x = self.limbs[LIMBS - 1 - i];
387 r.limbs[LIMBS - 1 - i - limbs] = (x >> bits) | carry;
388 carry = (x << (word_bits - bits - 1)) << 1;
389 }
390 (r, carry != 0)
391 }
392
393 #[inline(always)]
403 #[must_use]
404 pub fn wrapping_shr(self, rhs: usize) -> Self {
405 self.overflowing_shr(rhs).0
406 }
407
408 #[inline]
410 #[must_use]
411 pub fn arithmetic_shr(self, rhs: usize) -> Self {
412 if BITS == 0 {
413 return Self::ZERO;
414 }
415 let sign = self.bit(BITS - 1);
416 let mut r = self >> rhs;
417 if sign {
418 r |= Self::MAX << BITS.saturating_sub(rhs);
419 }
420 r
421 }
422
423 #[inline]
426 #[must_use]
427 #[allow(clippy::missing_const_for_fn)] pub fn rotate_left(self, rhs: usize) -> Self {
429 if BITS == 0 {
430 return Self::ZERO;
431 }
432 let rhs = rhs % BITS;
433 (self << rhs) | (self >> (BITS - rhs))
434 }
435
436 #[inline(always)]
439 #[must_use]
440 pub fn rotate_right(self, rhs: usize) -> Self {
441 if BITS == 0 {
442 return Self::ZERO;
443 }
444 let rhs = rhs % BITS;
445 self.rotate_left(BITS - rhs)
446 }
447}
448
449impl<const BITS: usize, const LIMBS: usize> Not for Uint<BITS, LIMBS> {
450 type Output = Self;
451
452 #[inline]
453 fn not(self) -> Self::Output {
454 Self::not(self)
455 }
456}
457
458impl<const BITS: usize, const LIMBS: usize> Not for &Uint<BITS, LIMBS> {
459 type Output = Uint<BITS, LIMBS>;
460
461 #[inline]
462 fn not(self) -> Self::Output {
463 (*self).not()
464 }
465}
466
467macro_rules! impl_bit_op {
468 ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
469 impl<const BITS: usize, const LIMBS: usize> $trait_assign<Uint<BITS, LIMBS>>
470 for Uint<BITS, LIMBS>
471 {
472 #[inline(always)]
473 fn $fn_assign(&mut self, rhs: Uint<BITS, LIMBS>) {
474 self.$fn_assign(&rhs);
475 }
476 }
477
478 impl<const BITS: usize, const LIMBS: usize> $trait_assign<&Uint<BITS, LIMBS>>
479 for Uint<BITS, LIMBS>
480 {
481 #[inline]
482 fn $fn_assign(&mut self, rhs: &Uint<BITS, LIMBS>) {
483 for i in 0..LIMBS {
484 u64::$fn_assign(&mut self.limbs[i], rhs.limbs[i]);
485 }
486 }
487 }
488
489 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
490 for Uint<BITS, LIMBS>
491 {
492 type Output = Uint<BITS, LIMBS>;
493
494 #[inline(always)]
495 fn $fn(mut self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
496 self.$fn_assign(rhs);
497 self
498 }
499 }
500
501 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
502 for Uint<BITS, LIMBS>
503 {
504 type Output = Uint<BITS, LIMBS>;
505
506 #[inline(always)]
507 fn $fn(mut self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
508 self.$fn_assign(rhs);
509 self
510 }
511 }
512
513 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
514 for &Uint<BITS, LIMBS>
515 {
516 type Output = Uint<BITS, LIMBS>;
517
518 #[inline(always)]
519 fn $fn(self, mut rhs: Uint<BITS, LIMBS>) -> Self::Output {
520 rhs.$fn_assign(self);
521 rhs
522 }
523 }
524
525 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
526 for &Uint<BITS, LIMBS>
527 {
528 type Output = Uint<BITS, LIMBS>;
529
530 #[inline(always)]
531 fn $fn(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
532 self.clone().$fn(rhs)
533 }
534 }
535 };
536}
537
538impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign);
539impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
540impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);
541
542impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
543 type Output = Self;
544
545 #[inline(always)]
546 fn shl(self, rhs: Self) -> Self::Output {
547 if BITS == 0 {
549 return self;
550 }
551 #[allow(clippy::cast_possible_truncation)]
555 self.wrapping_shl(rhs.as_limbs()[0] as usize)
556 }
557}
558
559impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
560 type Output = Self;
561
562 #[inline(always)]
563 fn shl(self, rhs: &Self) -> Self::Output {
564 self << *rhs
565 }
566}
567
568impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
569 type Output = Self;
570
571 #[inline(always)]
572 fn shr(self, rhs: Self) -> Self::Output {
573 if BITS == 0 {
575 return self;
576 }
577 #[allow(clippy::cast_possible_truncation)]
581 self.wrapping_shr(rhs.as_limbs()[0] as usize)
582 }
583}
584
585impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
586 type Output = Self;
587
588 #[inline(always)]
589 fn shr(self, rhs: &Self) -> Self::Output {
590 self >> *rhs
591 }
592}
593
594impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
595 #[inline(always)]
596 fn shl_assign(&mut self, rhs: Self) {
597 *self = *self << rhs;
598 }
599}
600
601impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
602 #[inline(always)]
603 fn shl_assign(&mut self, rhs: &Self) {
604 *self = *self << rhs;
605 }
606}
607
608impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
609 #[inline(always)]
610 fn shr_assign(&mut self, rhs: Self) {
611 *self = *self >> rhs;
612 }
613}
614
615impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
616 #[inline(always)]
617 fn shr_assign(&mut self, rhs: &Self) {
618 *self = *self >> rhs;
619 }
620}
621
622macro_rules! impl_shift {
623 (@main $u:ty) => {
624 impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
625 type Output = Self;
626
627 #[inline(always)]
628 #[allow(clippy::cast_possible_truncation)]
629 fn shl(self, rhs: $u) -> Self::Output {
630 self.wrapping_shl(rhs as usize)
631 }
632 }
633
634 impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
635 type Output = Self;
636
637 #[inline(always)]
638 #[allow(clippy::cast_possible_truncation)]
639 fn shr(self, rhs: $u) -> Self::Output {
640 self.wrapping_shr(rhs as usize)
641 }
642 }
643 };
644
645 (@ref $u:ty) => {
646 impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
647 type Output = Self;
648
649 #[inline(always)]
650 fn shl(self, rhs: &$u) -> Self::Output {
651 <Self>::shl(self, *rhs)
652 }
653 }
654
655 impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
656 type Output = Self;
657
658 #[inline(always)]
659 fn shr(self, rhs: &$u) -> Self::Output {
660 <Self>::shr(self, *rhs)
661 }
662 }
663 };
664
665 (@assign $u:ty) => {
666 impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
667 #[inline(always)]
668 fn shl_assign(&mut self, rhs: $u) {
669 *self = *self << rhs;
670 }
671 }
672
673 impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
674 #[inline(always)]
675 fn shr_assign(&mut self, rhs: $u) {
676 *self = *self >> rhs;
677 }
678 }
679 };
680
681 ($u:ty) => {
682 impl_shift!(@main $u);
683 impl_shift!(@ref $u);
684 impl_shift!(@assign $u);
685 impl_shift!(@assign &$u);
686 };
687
688 ($u:ty, $($tail:ty),*) => {
689 impl_shift!($u);
690 impl_shift!($($tail),*);
691 };
692}
693
694impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);
695
696#[cfg(target_pointer_width = "64")]
698impl_shift!(u64, i64);
699
700#[cfg(test)]
701mod tests {
702 use super::*;
703 use crate::{aliases::U128, const_for, nlimbs};
704 use core::cmp::min;
705 use proptest::proptest;
706
707 #[test]
708 fn test_leading_zeros() {
709 assert_eq!(Uint::<0, 0>::ZERO.leading_zeros(), 0);
710 assert_eq!(Uint::<1, 1>::ZERO.leading_zeros(), 1);
711 assert_eq!(Uint::<1, 1>::ONE.leading_zeros(), 0);
712 const_for!(BITS in NON_ZERO {
713 const LIMBS: usize = nlimbs(BITS);
714 type U = Uint::<BITS, LIMBS>;
715 assert_eq!(U::ZERO.leading_zeros(), BITS);
716 assert_eq!(U::MAX.leading_zeros(), 0);
717 assert_eq!(U::ONE.leading_zeros(), BITS - 1);
718 proptest!(|(value: U)| {
719 let zeros = value.leading_zeros();
720 assert!(zeros <= BITS);
721 assert!(zeros < BITS || value == U::ZERO);
722 if zeros < BITS {
723 let (left, overflow) = value.overflowing_shl(zeros);
724 assert!(!overflow);
725 assert!(left.leading_zeros() == 0 || value == U::ZERO);
726 assert!(left.bit(BITS - 1));
727 assert_eq!(value >> (BITS - zeros), Uint::ZERO);
728 }
729 });
730 });
731 proptest!(|(value: u128)| {
732 let uint = U128::from(value);
733 assert_eq!(uint.leading_zeros(), value.leading_zeros() as usize);
734 });
735 }
736
737 #[test]
738 fn test_leading_ones() {
739 assert_eq!(Uint::<0, 0>::ZERO.leading_ones(), 0);
740 assert_eq!(Uint::<1, 1>::ZERO.leading_ones(), 0);
741 assert_eq!(Uint::<1, 1>::ONE.leading_ones(), 1);
742 }
743
744 #[test]
745 fn test_most_significant_bits() {
746 const_for!(BITS in NON_ZERO {
747 const LIMBS: usize = nlimbs(BITS);
748 type U = Uint::<BITS, LIMBS>;
749 proptest!(|(value: u64)| {
750 let value = if U::LIMBS <= 1 { value & U::MASK } else { value };
751 assert_eq!(U::from(value).most_significant_bits(), (value, 0));
752 });
753 });
754 proptest!(|(mut limbs: [u64; 2])| {
755 if limbs[1] == 0 {
756 limbs[1] = 1;
757 }
758 let (bits, exponent) = U128::from_limbs(limbs).most_significant_bits();
759 assert!(bits >= 1_u64 << 63);
760 assert_eq!(exponent, 64 - limbs[1].leading_zeros() as usize);
761 });
762 }
763
764 #[test]
765 fn test_checked_shl() {
766 assert_eq!(
767 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(1),
768 Some(Uint::<65, 2>::from_limbs([0x0020_0000_0000_0000, 0]))
769 );
770 assert_eq!(
771 Uint::<127, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(64),
772 Some(Uint::<127, 2>::from_limbs([0, 0x0010_0000_0000_0000]))
773 );
774 }
775
776 #[test]
777 #[allow(
778 clippy::cast_lossless,
779 clippy::cast_possible_truncation,
780 clippy::cast_possible_wrap
781 )]
782 fn test_small() {
783 const_for!(BITS in [1, 2, 8, 16, 32, 63, 64] {
784 type U = Uint::<BITS, 1>;
785 proptest!(|(a: U, b: U)| {
786 assert_eq!(a | b, U::from_limbs([a.limbs[0] | b.limbs[0]]));
787 assert_eq!(a & b, U::from_limbs([a.limbs[0] & b.limbs[0]]));
788 assert_eq!(a ^ b, U::from_limbs([a.limbs[0] ^ b.limbs[0]]));
789 });
790 proptest!(|(a: U, s in 0..BITS)| {
791 assert_eq!(a << s, U::from_limbs([a.limbs[0] << s & U::MASK]));
792 assert_eq!(a >> s, U::from_limbs([a.limbs[0] >> s]));
793 });
794 });
795 proptest!(|(a: Uint::<32, 1>, s in 0_usize..=34)| {
796 assert_eq!(a.reverse_bits(), Uint::from((a.limbs[0] as u32).reverse_bits() as u64));
797 assert_eq!(a.rotate_left(s), Uint::from((a.limbs[0] as u32).rotate_left(s as u32) as u64));
798 assert_eq!(a.rotate_right(s), Uint::from((a.limbs[0] as u32).rotate_right(s as u32) as u64));
799 if s < 32 {
800 let arr_shifted = (((a.limbs[0] as i32) >> s) as u32) as u64;
801 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
802 }
803 });
804 proptest!(|(a: Uint::<64, 1>, s in 0_usize..=66)| {
805 assert_eq!(a.reverse_bits(), Uint::from(a.limbs[0].reverse_bits()));
806 assert_eq!(a.rotate_left(s), Uint::from(a.limbs[0].rotate_left(s as u32)));
807 assert_eq!(a.rotate_right(s), Uint::from(a.limbs[0].rotate_right(s as u32)));
808 if s < 64 {
809 let arr_shifted = ((a.limbs[0] as i64) >> s) as u64;
810 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
811 }
812 });
813 }
814
815 #[test]
816 fn test_shift_reverse() {
817 const_for!(BITS in SIZES {
818 const LIMBS: usize = nlimbs(BITS);
819 type U = Uint::<BITS, LIMBS>;
820 proptest!(|(value: U, shift in 0..=BITS + 2)| {
821 let left = (value << shift).reverse_bits();
822 let right = value.reverse_bits() >> shift;
823 assert_eq!(left, right);
824 });
825 });
826 }
827
828 #[test]
829 fn test_rotate() {
830 const_for!(BITS in SIZES {
831 const LIMBS: usize = nlimbs(BITS);
832 type U = Uint::<BITS, LIMBS>;
833 proptest!(|(value: U, shift in 0..=BITS + 2)| {
834 let rotated = value.rotate_left(shift).rotate_right(shift);
835 assert_eq!(value, rotated);
836 });
837 });
838 }
839
840 #[test]
841 fn test_arithmetic_shr() {
842 const_for!(BITS in SIZES {
843 const LIMBS: usize = nlimbs(BITS);
844 type U = Uint::<BITS, LIMBS>;
845 proptest!(|(value: U, shift in 0..=BITS + 2)| {
846 let shifted = value.arithmetic_shr(shift);
847 assert_eq!(shifted.leading_ones(), match value.leading_ones() {
848 0 => 0,
849 n => min(BITS, n + shift)
850 });
851 });
852 });
853 }
854
855 #[test]
856 fn test_overflowing_shr() {
857 assert_eq!(
860 Uint::<64, 1>::from_limbs([40u64]).overflowing_shr(1),
861 (Uint::<64, 1>::from(20), false)
862 );
863
864 assert_eq!(
867 Uint::<64, 1>::from_limbs([41u64]).overflowing_shr(1),
868 (Uint::<64, 1>::from(20), true)
869 );
870
871 assert_eq!(
874 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).overflowing_shr(1),
875 (Uint::<65, 2>::from_limbs([0x0008_0000_0000_0000, 0]), false)
876 );
877
878 assert_eq!(
882 Uint::<256, 4>::MAX.overflowing_shr(65),
883 (
884 Uint::<256, 4>::from_str_radix(
885 "7fffffffffffffffffffffffffffffffffffffffffffffff",
886 16
887 )
888 .unwrap(),
889 true
890 )
891 );
892 assert_eq!(
895 Uint::<4096, 64>::from_str_radix("3ffffffffffffffffffffffffffffc00000000", 16,)
896 .unwrap()
897 .overflowing_shr(34),
898 (
899 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
900 false
901 )
902 );
903 assert_eq!(
906 Uint::<4096, 64>::from_str_radix(
907 "fffffffffffffffffffffffffffff0000000000000000000000000",
908 16,
909 )
910 .unwrap()
911 .overflowing_shr(100),
912 (
913 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
914 false
915 )
916 );
917 assert_eq!(
920 Uint::<4096, 64>::from_str_radix(
921 "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0bdbfe",
922 16,
923 )
924 .unwrap()
925 .overflowing_shr(1),
926 (
927 Uint::<4096, 64>::from_str_radix(
928 "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffff85edff",
929 16
930 )
931 .unwrap(),
932 false
933 )
934 );
935 assert_eq!(
938 Uint::<4096, 64>::from_str_radix(
939 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
940 16,
941 )
942 .unwrap()
943 .overflowing_shr(1000),
944 (
945 Uint::<4096, 64>::from_str_radix(
946 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
947 16
948 )
949 .unwrap(),
950 false
951 )
952 );
953 assert_eq!(
956 Uint::<4096, 64>::MAX
957 .overflowing_shr(34),
958 (
959 Uint::<4096, 64>::from_str_radix(
960 "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
961 16
962 )
963 .unwrap(),
964 true
965 )
966 );
967 }
968}