rusty-numbers/src/rational.rs

268 lines
6.6 KiB
Rust

//! # Rational Numbers (fractions)
use crate::num::*;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
/// Type representing a fraction
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Frac<T: Unsigned = usize> {
numer: T,
denom: T,
sign: Sign,
}
#[macro_export]
/// Create a [Frac](rational/struct.Frac.html) type with signed or unsigned number literals
///
/// Accepts:
///
/// ```no-run
/// // Fractions
/// frac!(1/3);
///
/// // Whole numbers
/// frac!(5u8);
///
/// // Whole numbers and fractions
/// frac!(1 1/2);
/// ```
macro_rules! frac {
($w:literal $n:literal / $d:literal) => {
frac!($w) + frac!($n / $d)
};
($n:literal / $d:literal) => {
frac($n, $d)
};
($w:literal) => {
frac($w, 1)
};
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum FracOp {
Subtraction,
Other,
}
/// Create a new rational number from signed or unsigned integers
#[allow(dead_code)]
fn frac<T: Int + Int<Un = U>, U: Unsigned>(n: T, d: T) -> Frac<U> {
let mut sign = Sign::Positive;
if n.is_neg() {
sign = !sign;
}
if d.is_neg() {
sign = !sign;
}
let numer = n.to_unsigned();
let denom = d.to_unsigned();
Frac::new(numer, denom, sign)
}
impl<T: Unsigned> Frac<T> {
/// Create a new rational number from unsigned integers and a sign
///
/// Generally, you will probably prefer to use the [frac!](../macro.frac.html) macro
/// instead, as that accepts both signed and unsigned arguments
pub fn new(n: T, d: T, s: Sign) -> Frac<T> {
if d.is_zero() {
panic!("Fraction can not have a zero denominator");
}
Frac {
numer: n,
denom: d,
sign: s,
}
.reduce()
}
/// Determine the output sign given the two input signs
fn get_sign(a: Self, b: Self, c: FracOp) -> Sign {
if a.sign != b.sign {
if c == FracOp::Subtraction && b.sign == Sign::Negative {
Sign::Positive
} else {
Sign::Negative
}
} else {
Sign::Positive
}
}
/// Convert the fraction to its simplest form
fn reduce(mut self) -> Self {
let gcd = T::gcd(self.numer, self.denom);
self.numer /= gcd;
self.denom /= gcd;
self
}
}
impl<T: Unsigned + Mul<Output = T>> Mul for Frac<T> {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
let numer = self.numer * rhs.numer;
let denom = self.denom * rhs.denom;
let sign = Self::get_sign(self, rhs, FracOp::Other);
Self::new(numer, denom, sign)
}
}
impl<T: Unsigned + Mul<Output = T>> MulAssign for Frac<T> {
fn mul_assign(&mut self, rhs: Self) {
*self = self.clone() * rhs
}
}
impl<T: Unsigned + Mul<Output = T>> Div for Frac<T> {
type Output = Self;
fn div(self, rhs: Self) -> Self {
let numer = self.numer * rhs.denom;
let denom = self.denom * rhs.numer;
let sign = Self::get_sign(self, rhs, FracOp::Other);
Self::new(numer, denom, sign)
}
}
impl<T: Unsigned + Mul<Output = T>> DivAssign for Frac<T> {
fn div_assign(&mut self, rhs: Self) {
*self = self.clone() / rhs
}
}
impl<T: Unsigned + Add<Output = T> + Sub<Output = T> + Mul<Output = T>> Add for Frac<T> {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
let a = self;
let b = rhs;
// If the sign of one input differs,
// subtraction is equivalent
if self.sign != rhs.sign {
if a.numer > b.numer {
return a - b;
} else if a.numer < b.numer {
return b - a;
}
}
// Find a common denominator if needed
if a.denom != b.denom {
// Let's just use the simplest method, rather than
// worrying about reducing to the least common denominator
let numer = (a.numer * b.denom) + (b.numer * a.denom);
let denom = a.denom * b.denom;
let sign = Self::get_sign(a, b, FracOp::Other);
return Self::new(numer, denom, sign);
}
let numer = a.numer + b.numer;
let denom = self.denom;
let sign = Self::get_sign(a, b, FracOp::Other);
Self::new(numer, denom, sign)
}
}
impl<T: Unsigned + Add<Output = T> + Sub<Output = T> + Mul<Output = T>> AddAssign for Frac<T> {
fn add_assign(&mut self, rhs: Self) {
*self = self.clone() + rhs
}
}
impl<T: Unsigned + Sub<Output = T> + Mul<Output = T>> Sub for Frac<T> {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
// Set the larger argument as `a`
let a = self;
let b = rhs;
if a.denom != b.denom {
let numer = (a.numer * b.denom) - (b.numer * a.denom);
let denom = a.denom * b.denom;
let sign = Self::get_sign(a, b, FracOp::Subtraction);
return Self::new(numer, denom, sign);
}
let numer = a.numer - b.numer;
let denom = a.denom;
let sign = Self::get_sign(a, b, FracOp::Subtraction);
Self::new(numer, denom, sign)
}
}
impl<T: Unsigned + Sub<Output = T> + Mul<Output = T>> SubAssign for Frac<T> {
fn sub_assign(&mut self, rhs: Self) {
*self = self.clone() - rhs
}
}
impl<T: Unsigned> Neg for Frac<T> {
type Output = Self;
fn neg(self) -> Self::Output {
let mut out = self.clone();
out.sign = !self.sign;
out
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn mul_test() {
let frac1 = frac!(1 / 3u8);
let frac2 = frac!(2u8 / 3);
let expected = frac!(2u8 / 9);
assert_eq!(frac1 * frac2, expected);
}
#[test]
fn add_test() {
assert_eq!(frac!(5 / 6), frac!(1 / 3) + frac!(1 / 2));
assert_eq!(frac!(1 / 3), frac!(2 / 3) + -frac!(1 / 3), "2/3 + -1/3");
assert_eq!(-frac!(1 / 3), -frac!(2 / 3) + frac!(1 / 3), "-2/3 + 1/3");
}
#[test]
fn sub_test() {
assert_eq!(frac!(1 / 6), frac!(1 / 2) - frac!(1 / 3));
// assert_eq!(frac!(1), frac!(1/3) - -frac!(2/3), "1/3 - -2/3");
// assert_eq!(-frac!(1 / 1), -frac!(2 / 3) - frac!(1 / 3), "-2/3 - 1/3");
}
#[test]
fn macro_test() {
let frac1 = frac!(1 / 3);
let frac2 = frac!(1u32 / 3);
assert_eq!(frac1, frac2);
let frac1 = -frac!(1 / 2);
let frac2 = -frac!(1u32 / 2);
assert_eq!(frac1, frac2);
assert_eq!(frac!(3 / 2), frac!(1 1/2));
assert_eq!(frac!(3 / 1), frac!(3));
}
}