//! # Rational Numbers (fractions) //! //! Traits to implement: //! * Add //! * AddAssign //! * Div //! * DivAssign //! * Mul //! * MulAssign //! * Neg //! * Sub //! * SubAssign use crate::num::*; use std::ops::{Add, Div, Mul, Neg, Sub}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct Frac { numer: T, denom: T, sign: Sign, } #[macro_export] macro_rules! frac { ($n:literal / $d:literal) => { frac($n, $d) }; ($n:literal / $d:literal) => { frac($n, $d) }; } /// Create a new rational number pub fn frac, U: Unsigned>(n: S, d: S) -> Frac { // Converting from signed to unsigned should always be safe // when using the absolute value, especially since I'm converting // between the same bit size let mut sign = Sign::Positive; let numer = n.to_unsigned(); let denom = d.to_unsigned(); if n.is_neg() { sign = !sign; } if d.is_neg() { sign = !sign; } Frac { numer, denom, sign }.reduce() } impl Frac { /// Create a new rational number pub fn new(n: T, d: T, s: Sign) -> Frac { if d.is_zero() { panic!("Fraction can not have a zero denominator"); } Frac { numer: n, denom: d, sign: s, } .reduce() } /// Determine the output sign given the two input signs fn get_sign(a: Self, b: Self) -> Sign { if a.sign != b.sign { Sign::Negative } else { Sign::Positive } } /// Convert the fraction to its simplest form fn reduce(mut self) -> Self { let gcd = T::gcd(self.numer, self.denom); self.numer /= gcd; self.denom /= gcd; self } } impl> Mul for Frac { type Output = Self; fn mul(self, rhs: Self) -> Self { let numer = self.numer * rhs.numer; let denom = self.denom * rhs.denom; let sign = Self::get_sign(self, rhs); Self::new(numer, denom, sign) } } impl> Div for Frac { type Output = Self; fn div(self, rhs: Self) -> Self { let numer = self.numer * rhs.denom; let denom = self.denom * rhs.numer; let sign = Self::get_sign(self, rhs); Self::new(numer, denom, sign) } } impl + Sub + Mul> Add for Frac { type Output = Self; fn add(self, rhs: Self) -> Self::Output { let a = self; let b = rhs; // If the sign of one input differs, // subtraction is equivalent if self.sign != rhs.sign { if a.numer > b.numer { return a - b; } else if a.numer < b.numer { return b - a; } } // Find a common denominator if needed if a.denom != b.denom { // Let's just use the simplest method, rather than // worrying about reducing to the least common denominator let numer = (a.numer * b.denom) + (b.numer * a.denom); let denom = a.denom * b.denom; let sign = Self::get_sign(a, b); return Self::new(numer, denom, sign); } let numer = a.numer + b.numer; let denom = self.denom; let sign = Self::get_sign(a, b); Self::new(numer, denom, sign) } } impl> Sub for Frac { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { let a = self; let b = rhs; unimplemented!() } } impl Neg for Frac { type Output = Self; fn neg(self) -> Self::Output { let mut out = self.clone(); out.sign = !self.sign; out } } #[cfg(test)] mod tests { use super::*; #[test] fn mul_test() { let frac1 = Frac::new(1u8, 3u8, Sign::Positive); let frac2 = Frac::new(2u8, 3u8, Sign::Positive); let expected = Frac::new(2u8, 9u8, Sign::Positive); assert_eq!(frac1 * frac2, expected); } #[test] fn add_test() { assert_eq!(frac!(5 / 6), frac!(1 / 3) + frac!(1 / 2)); } #[test] fn macro_test() { let frac1 = frac!(1 / 3); let frac2 = Frac::new(1u32, 3, Sign::Positive); assert_eq!(frac1, frac2); let frac1 = -frac!(1 / 2); let frac2 = Frac::new(1u32, 2, Sign::Negative); assert_eq!(frac1, frac2); } }