From e54485f12b471901bee2972557b9a28f7ae9ca79 Mon Sep 17 00:00:00 2001 From: Timothy Warren Date: Tue, 5 May 2020 15:41:27 -0400 Subject: [PATCH] Allow comparing size of Bigints, and allow comparing BigInts to number primitives --- src/bigint.rs | 181 ++++++++++++++++++++++++++++++++++++++++---------- src/num.rs | 27 +++++++- 2 files changed, 173 insertions(+), 35 deletions(-) diff --git a/src/bigint.rs b/src/bigint.rs index cce0172..fb5feff 100644 --- a/src/bigint.rs +++ b/src/bigint.rs @@ -13,6 +13,7 @@ use alloc::string::*; #[cfg(feature = "std")] use std::prelude::v1::*; +use core::cmp::{Ordering, PartialOrd, PartialEq}; use core::convert::TryInto; use core::mem::replace; use core::ops::{ @@ -131,7 +132,7 @@ impl BigInt { todo!(); } - fn get_digit_count(a: &Self, b: &Self) -> usize { + fn get_ceil_digit_count(a: &Self, b: &Self) -> usize { let a_digits = a.inner.len(); let b_digits = b.inner.len(); @@ -164,6 +165,38 @@ impl BigInt { Positive } } + + /// Normal primitive multiplication + fn prim_mul(self, rhs: Self, digits: usize) -> Self { + let mut out = BigInt::with_capacity(digits); + + let mut carry = 0usize; + for i in 0..digits { + let a = *self.inner.get(i).unwrap_or(&0usize); + let b = *rhs.inner.get(i).unwrap_or(&0usize); + + if a == 0 || b == 0 { + out.inner.push(0); + continue; + } + + let (res, overflowed) = a.overflowing_mul(b); + + if overflowed { + todo!() + } else { + let (res, overflowed) = res.overflowing_add(carry); + + out.inner.push(res); + carry = if overflowed { 1 } else { 0 }; + } + } + + out.sign = Self::get_sign(self, rhs, FracOp::Other); + out.shrink_to_fit(); + + out + } } impl Add for BigInt { @@ -178,7 +211,7 @@ impl Add for BigInt { return self - -rhs; } - let digits = Self::get_digit_count(&self, &rhs) + 1; + let digits = Self::get_ceil_digit_count(&self, &rhs) + 1; let mut out = BigInt::with_capacity(digits); let mut carry = 0usize; @@ -217,7 +250,7 @@ impl Sub for BigInt { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { - let digits = Self::get_digit_count(&self, &rhs); + let digits = Self::get_ceil_digit_count(&self, &rhs); let mut out = BigInt::with_capacity(digits); // Handle cases where addition makes more sense @@ -233,16 +266,17 @@ impl Sub for BigInt { let b = *rhs.inner.get(i).unwrap_or(&0usize); if a >= borrow && (a - borrow) >= b { + // This is the easy way, no additional borrowing or underflow let res = a - b - borrow; out.inner.push(res); borrow = 0; } else { - // To prevent subtraction overflow, the max borrowed - // value is usize::MAX. The rest of the borrowed value + // To prevent overflow, the max borrowed value is + // usize::MAX (place-value - 1). The rest of the borrowed value // will be added on afterwords. // In base ten, this would be like: - // 18 - 9 = 9-9 + 9 + // 15 - 8 = (9 - 8) + (5 + 1) let rem = (a + 1) - borrow; let res = (core::usize::MAX - b) + rem; out.inner.push(res); @@ -263,36 +297,12 @@ impl Mul for BigInt { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { + let input_digits = Self::get_ceil_digit_count(&self, &rhs); + // Multiplication can result in twice the number of digits - let digits = Self::get_digit_count(&self, &rhs) * 2; - let mut out = BigInt::with_capacity(digits); + let out_digits = Self::get_ceil_digit_count(&self, &rhs) * 2; - let mut carry = 0usize; - for i in 0..digits { - let a = *self.inner.get(i).unwrap_or(&0usize); - let b = *rhs.inner.get(i).unwrap_or(&0usize); - - if a == 0 || b == 0 { - out.inner.push(0); - continue; - } - - let (res, overflowed) = a.overflowing_mul(b); - - if overflowed { - todo!() - } else { - let (res, overflowed) = res.overflowing_add(carry); - - out.inner.push(res); - carry = if overflowed { 1 } else { 0 }; - } - } - - out.sign = Self::get_sign(self, rhs, FracOp::Other); - out.shrink_to_fit(); - - out + self.prim_mul(rhs, out_digits) } } @@ -377,6 +387,48 @@ impl Not for BigInt { } } +impl PartialOrd for BigInt { + fn partial_cmp(&self, other: &Self) -> Option { + // The signs differ + if self.sign != other.sign { + // If the signs are different, the magnitude doesn't matter + // unless the value is zero on both sides + return if self.eq(&0) && other.eq(&0) { + Some(Ordering::Equal) + } else { + self.sign.partial_cmp(&other.sign) + }; + } + + // Everything is the same + if self.inner == other.inner { + return Some(Ordering::Equal); + } + + // The number of place values differs + if self.inner.len() != other.inner.len() { + return if self.inner.len() > other.inner.len() { + Some(Ordering::Greater) + } else { + Some(Ordering::Less) + }; + } + + // At this point the sign is the same, and the number of place values is equal, + // so compare the individual place values (from greatest to least) until they + // are different. At this point, the digits can not all be equal. + for i in (0usize..self.inner.len()).rev() { + if self.inner[i] < other.inner[i] { + return Some(Ordering::Less); + } else if self.inner[i] > other.inner[i] { + return Some(Ordering::Greater); + } + } + + unreachable!(); + } +} + macro_rules! impl_from_smaller { ($(($s: ty, $u: ty)),* ) => { $( @@ -436,11 +488,45 @@ macro_rules! impl_from_larger { }; } +macro_rules! impl_ord_literal { + ($(($($prim: ty),+), $base: ty), *) => { + $( + $( + impl PartialEq<$prim> for BigInt { + fn eq(&self, other: &$prim) -> bool { + self == &BigInt::from(*other) + } + } + + impl PartialEq for $prim { + fn eq(&self, other: &BigInt) -> bool { + &BigInt::from(*self) == other + } + } + + impl PartialOrd<$prim> for BigInt { + fn partial_cmp(&self, other: &$prim) -> Option { + self.partial_cmp(&BigInt::from(*other)) + } + } + + impl PartialOrd for $prim { + fn partial_cmp(&self, other: &BigInt) -> Option { + (&BigInt::from(*self)).partial_cmp(other) + } + } + )+ + )* + }; +} + #[cfg(target_pointer_width = "32")] impl_from_larger!((i64, u64), (i128, u128)); #[cfg(target_pointer_width = "32")] impl_from_smaller!((i8, u8), (i16, u16), (i32, u32)); #[cfg(target_pointer_width = "32")] +impl_ord_literal!((i8,u8,i16,u16,i32,u32,i64,u64), u32); +#[cfg(target_pointer_width = "32")] static BITS: usize = 32; #[cfg(target_pointer_width = "64")] @@ -448,6 +534,8 @@ impl_from_larger!((i128, u128)); #[cfg(target_pointer_width = "64")] impl_from_smaller!((i8, u8), (i16, u16), (i32, u32), (i64, u64)); #[cfg(target_pointer_width = "64")] +impl_ord_literal!((i8,u8,i16,u16,i32,u32,i64,u64), u32); +#[cfg(target_pointer_width = "64")] static BITS: usize = 64; #[cfg(test)] @@ -699,6 +787,31 @@ mod tests { assert_eq!(b.inner[0], core::usize::MAX); } + #[test] + fn test_partial_eq() { + let a = 12345u16; + let b = BigInt::from(a); + + assert!(a.eq(&b)); + assert!(b.eq(&a)); + } + + #[test] + fn test_partial_ord() { + let a = 12345u32; + let b = BigInt::from(a); + let c = 3u8; + + assert_eq!(a.partial_cmp(&b), Some(Ordering::Equal)); + assert_eq!(c.partial_cmp(&b), Some(Ordering::Less)); + assert_eq!(b.partial_cmp(&c), Some(Ordering::Greater)); + + assert!(big_int!(-32) < big_int!(3)); + assert!(big_int!(3) > big_int!(-32)); + assert!(big_int!(152) > big_int!(132)); + assert_eq!(big_int!(123), big_int!(123)); + } + #[test] fn test_from() { // Signed numbers diff --git a/src/num.rs b/src/num.rs index c080e89..262fe4d 100644 --- a/src/num.rs +++ b/src/num.rs @@ -2,7 +2,7 @@ //! //! Home to the numeric trait chain of doom, aka `Unsigned` #![allow(unused_comparisons)] -use core::cmp::{max, min}; +use core::cmp::{max, min, Ordering}; use core::convert::TryFrom; use core::fmt::Debug; use core::ops::{ @@ -55,6 +55,21 @@ impl Not for Sign { } } +impl PartialOrd for Sign { + fn partial_cmp(&self, other: &Self) -> Option { + match self { + Self::Positive => match other { + Self::Positive => Some(Ordering::Equal), + Self::Negative => Some(Ordering::Greater), + }, + Self::Negative => match other { + Self::Positive => Some(Ordering::Less), + Self::Negative => Some(Ordering::Equal), + } + } + } +} + /// Native number type pub trait Num: Add @@ -303,6 +318,16 @@ mod tests { let ns = !s; assert_eq!(ns, Sign::Negative); + + let a = Sign::Negative; + let b = Sign::Positive; + let c = Sign::Negative; + let d = Sign::Positive; + + assert_eq!(a.partial_cmp(&b), Some(Ordering::Less)); + assert_eq!(b.partial_cmp(&a), Some(Ordering::Greater)); + assert_eq!(a.partial_cmp(&c), Some(Ordering::Equal)); + assert_eq!(b.partial_cmp(&d), Some(Ordering::Equal)); } #[test]