fixed/
cmp_fixed.rs

1// Copyright © 2018–2025 Trevor Spiteri
2
3// This library is free software: you can redistribute it and/or
4// modify it under the terms of either
5//
6//   * the Apache License, Version 2.0 or
7//   * the MIT License
8//
9// at your option.
10//
11// You should have recieved copies of the Apache License and the MIT
12// License along with the library. If not, see
13// <https://www.apache.org/licenses/LICENSE-2.0> and
14// <https://opensource.org/licenses/MIT>.
15
16use crate::types::extra::Unsigned;
17use crate::{
18    FixedI8, FixedI16, FixedI32, FixedI64, FixedI128, FixedU8, FixedU16, FixedU32, FixedU64,
19    FixedU128,
20};
21use core::cmp::Ordering;
22
23// Works by converting the signed number to unsigned, but does not change size
24// of either number.
25macro_rules! diff_sign {
26    ($Sig:ident($Uns:ident, $UnsInner:ident), $OtherUns:ident) => {
27        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialEq<$Sig<FracRhs>> for $OtherUns<FracLhs> {
28            #[inline]
29            fn eq(&self, rhs: &$Sig<FracRhs>) -> bool {
30                if rhs.is_negative() {
31                    return false;
32                }
33                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
34                PartialEq::eq(self, &unsigned_rhs)
35            }
36        }
37
38        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialEq<$OtherUns<FracRhs>> for $Sig<FracLhs> {
39            #[inline]
40            fn eq(&self, rhs: &$OtherUns<FracRhs>) -> bool {
41                if self.is_negative() {
42                    return false;
43                }
44                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
45                PartialEq::eq(&unsigned_lhs, rhs)
46            }
47        }
48
49        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialOrd<$Sig<FracRhs>>
50            for $OtherUns<FracLhs>
51        {
52            #[inline]
53            fn partial_cmp(&self, rhs: &$Sig<FracRhs>) -> Option<Ordering> {
54                if rhs.is_negative() {
55                    return Some(Ordering::Greater);
56                }
57                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
58                PartialOrd::partial_cmp(self, &unsigned_rhs)
59            }
60
61            #[inline]
62            fn lt(&self, rhs: &$Sig<FracRhs>) -> bool {
63                if rhs.is_negative() {
64                    return false;
65                }
66                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
67                PartialOrd::lt(self, &unsigned_rhs)
68            }
69
70            #[inline]
71            fn le(&self, rhs: &$Sig<FracRhs>) -> bool {
72                if rhs.is_negative() {
73                    return false;
74                }
75                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
76                PartialOrd::le(self, &unsigned_rhs)
77            }
78
79            #[inline]
80            fn gt(&self, rhs: &$Sig<FracRhs>) -> bool {
81                if rhs.is_negative() {
82                    return true;
83                }
84                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
85                PartialOrd::gt(self, &unsigned_rhs)
86            }
87
88            #[inline]
89            fn ge(&self, rhs: &$Sig<FracRhs>) -> bool {
90                if rhs.is_negative() {
91                    return true;
92                }
93                let unsigned_rhs = $Uns::<FracRhs>::from_bits(rhs.to_bits() as $UnsInner);
94                PartialOrd::ge(self, &unsigned_rhs)
95            }
96        }
97
98        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialOrd<$OtherUns<FracRhs>>
99            for $Sig<FracLhs>
100        {
101            #[inline]
102            fn partial_cmp(&self, rhs: &$OtherUns<FracRhs>) -> Option<Ordering> {
103                if self.is_negative() {
104                    return Some(Ordering::Less);
105                }
106                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
107                PartialOrd::partial_cmp(&unsigned_lhs, rhs)
108            }
109
110            #[inline]
111            fn lt(&self, rhs: &$OtherUns<FracRhs>) -> bool {
112                if self.is_negative() {
113                    return true;
114                }
115                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
116                PartialOrd::lt(&unsigned_lhs, rhs)
117            }
118
119            #[inline]
120            fn le(&self, rhs: &$OtherUns<FracRhs>) -> bool {
121                if self.is_negative() {
122                    return true;
123                }
124                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
125                PartialOrd::le(&unsigned_lhs, rhs)
126            }
127
128            #[inline]
129            fn gt(&self, rhs: &$OtherUns<FracRhs>) -> bool {
130                if self.is_negative() {
131                    return false;
132                }
133                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
134                PartialOrd::gt(&unsigned_lhs, rhs)
135            }
136
137            #[inline]
138            fn ge(&self, rhs: &$OtherUns<FracRhs>) -> bool {
139                if self.is_negative() {
140                    return false;
141                }
142                let unsigned_lhs = $Uns::<FracLhs>::from_bits(self.to_bits() as $UnsInner);
143                PartialOrd::ge(&unsigned_lhs, rhs)
144            }
145        }
146    };
147
148    ($Sig:ident($Uns:ident, $UnsInner:ident)) => {
149        diff_sign! { $Sig($Uns, $UnsInner), FixedU8 }
150        diff_sign! { $Sig($Uns, $UnsInner), FixedU16 }
151        diff_sign! { $Sig($Uns, $UnsInner), FixedU32 }
152        diff_sign! { $Sig($Uns, $UnsInner), FixedU64 }
153        diff_sign! { $Sig($Uns, $UnsInner), FixedU128 }
154    };
155}
156
157diff_sign! { FixedI8(FixedU8, u8) }
158diff_sign! { FixedI16(FixedU16, u16) }
159diff_sign! { FixedI32(FixedU32, u32) }
160diff_sign! { FixedI64(FixedU64, u64) }
161diff_sign! { FixedI128(FixedU128, u128) }
162
163// Both numbers must have the same sign (both signed or both unsigned). Works by
164// widening the narrow number to have the same width as the wide number.
165macro_rules! diff_size {
166    ($Nar:ident, $Wid:ident($WidInner:ident)) => {
167        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialEq<$Nar<FracRhs>> for $Wid<FracLhs> {
168            #[inline]
169            fn eq(&self, rhs: &$Nar<FracRhs>) -> bool {
170                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
171                PartialEq::eq(self, &widened_rhs)
172            }
173        }
174
175        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialEq<$Wid<FracRhs>> for $Nar<FracLhs> {
176            #[inline]
177            fn eq(&self, rhs: &$Wid<FracRhs>) -> bool {
178                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
179                PartialEq::eq(&widened_lhs, rhs)
180            }
181        }
182
183        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialOrd<$Nar<FracRhs>> for $Wid<FracLhs> {
184            #[inline]
185            fn partial_cmp(&self, rhs: &$Nar<FracRhs>) -> Option<Ordering> {
186                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
187                PartialOrd::partial_cmp(self, &widened_rhs)
188            }
189
190            #[inline]
191            fn lt(&self, rhs: &$Nar<FracRhs>) -> bool {
192                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
193                PartialOrd::lt(self, &widened_rhs)
194            }
195
196            #[inline]
197            fn le(&self, rhs: &$Nar<FracRhs>) -> bool {
198                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
199                PartialOrd::le(self, &widened_rhs)
200            }
201
202            #[inline]
203            fn gt(&self, rhs: &$Nar<FracRhs>) -> bool {
204                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
205                PartialOrd::gt(self, &widened_rhs)
206            }
207
208            #[inline]
209            fn ge(&self, rhs: &$Nar<FracRhs>) -> bool {
210                let widened_rhs = $Wid::<FracRhs>::from_bits(rhs.to_bits() as $WidInner);
211                PartialOrd::ge(self, &widened_rhs)
212            }
213        }
214
215        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialOrd<$Wid<FracRhs>> for $Nar<FracLhs> {
216            #[inline]
217            fn partial_cmp(&self, rhs: &$Wid<FracRhs>) -> Option<Ordering> {
218                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
219                PartialOrd::partial_cmp(&widened_lhs, rhs)
220            }
221
222            #[inline]
223            fn lt(&self, rhs: &$Wid<FracRhs>) -> bool {
224                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
225                PartialOrd::lt(&widened_lhs, rhs)
226            }
227
228            #[inline]
229            fn le(&self, rhs: &$Wid<FracRhs>) -> bool {
230                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
231                PartialOrd::le(&widened_lhs, rhs)
232            }
233
234            #[inline]
235            fn gt(&self, rhs: &$Wid<FracRhs>) -> bool {
236                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
237                PartialOrd::gt(&widened_lhs, rhs)
238            }
239
240            #[inline]
241            fn ge(&self, rhs: &$Wid<FracRhs>) -> bool {
242                let widened_lhs = $Wid::<FracLhs>::from_bits(self.to_bits() as $WidInner);
243                PartialOrd::ge(&widened_lhs, rhs)
244            }
245        }
246    };
247}
248
249diff_size! { FixedI8, FixedI16(i16) }
250diff_size! { FixedI8, FixedI32(i32) }
251diff_size! { FixedI8, FixedI64(i64) }
252diff_size! { FixedI8, FixedI128(i128) }
253diff_size! { FixedI16, FixedI32(i32) }
254diff_size! { FixedI16, FixedI64(i64) }
255diff_size! { FixedI16, FixedI128(i128) }
256diff_size! { FixedI32, FixedI64(i64) }
257diff_size! { FixedI32, FixedI128(i128) }
258diff_size! { FixedI64, FixedI128(i128) }
259diff_size! { FixedU8, FixedU16(u16) }
260diff_size! { FixedU8, FixedU32(u32) }
261diff_size! { FixedU8, FixedU64(u64) }
262diff_size! { FixedU8, FixedU128(u128) }
263diff_size! { FixedU16, FixedU32(u32) }
264diff_size! { FixedU16, FixedU64(u64) }
265diff_size! { FixedU16, FixedU128(u128) }
266diff_size! { FixedU32, FixedU64(u64) }
267diff_size! { FixedU32, FixedU128(u128) }
268diff_size! { FixedU64, FixedU128(u128) }
269
270macro_rules! cmp {
271    ($Fixed:ident($Inner:ident)) => {
272        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialEq<$Fixed<FracRhs>> for $Fixed<FracLhs> {
273            #[inline]
274            fn eq(&self, rhs: &$Fixed<FracRhs>) -> bool {
275                let lhs = self.to_bits();
276                let rhs = rhs.to_bits();
277
278                // lhs_extra_frac and nbits are known exactly at compile time,
279                // so with optimizations the branch is selected at compile time
280                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
281                let nbits = $Inner::BITS as i32;
282
283                if lhs_extra_frac <= -nbits {
284                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
285                    let rhs_is_reduced = rhs != 0;
286                    lhs == shifted_rhs && !rhs_is_reduced
287                } else if lhs_extra_frac < 0 {
288                    let shifted_rhs = rhs >> -lhs_extra_frac;
289                    let rhs_is_reduced = rhs != (shifted_rhs << -lhs_extra_frac);
290                    lhs == shifted_rhs && !rhs_is_reduced
291                } else if lhs_extra_frac == 0 {
292                    lhs == rhs
293                } else if lhs_extra_frac < nbits {
294                    let shifted_lhs = lhs >> lhs_extra_frac;
295                    let lhs_is_reduced = lhs != (shifted_lhs << lhs_extra_frac);
296                    shifted_lhs == rhs && !lhs_is_reduced
297                } else {
298                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
299                    let lhs_is_reduced = lhs != 0;
300                    shifted_lhs == rhs && !lhs_is_reduced
301                }
302            }
303        }
304
305        impl<FracLhs: Unsigned, FracRhs: Unsigned> PartialOrd<$Fixed<FracRhs>> for $Fixed<FracLhs> {
306            #[inline]
307            fn partial_cmp(&self, rhs: &$Fixed<FracRhs>) -> Option<Ordering> {
308                let lhs = self.to_bits();
309                let rhs = rhs.to_bits();
310
311                // lhs_extra_frac and nbits are known exactly at compile time,
312                // so with optimizations the branch is selected at compile time
313                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
314                let nbits = $Inner::BITS as i32;
315                if lhs_extra_frac <= -nbits {
316                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
317                    let rhs_is_reduced = rhs != 0;
318                    if lhs == shifted_rhs && rhs_is_reduced {
319                        Some(Ordering::Less)
320                    } else {
321                        Some(Ord::cmp(&lhs, &shifted_rhs))
322                    }
323                } else if lhs_extra_frac < 0 {
324                    let shifted_rhs = rhs >> -lhs_extra_frac;
325                    let rhs_is_reduced = rhs != (shifted_rhs << -lhs_extra_frac);
326                    if lhs == shifted_rhs && rhs_is_reduced {
327                        Some(Ordering::Less)
328                    } else {
329                        Some(Ord::cmp(&lhs, &shifted_rhs))
330                    }
331                } else if lhs_extra_frac == 0 {
332                    Some(Ord::cmp(&lhs, &rhs))
333                } else if lhs_extra_frac < nbits {
334                    let shifted_lhs = lhs >> lhs_extra_frac;
335                    let lhs_is_reduced = lhs != (shifted_lhs << lhs_extra_frac);
336                    if shifted_lhs == rhs && lhs_is_reduced {
337                        Some(Ordering::Greater)
338                    } else {
339                        Some(Ord::cmp(&shifted_lhs, &rhs))
340                    }
341                } else {
342                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
343                    let lhs_is_reduced = lhs != 0;
344                    if shifted_lhs == rhs && lhs_is_reduced {
345                        Some(Ordering::Greater)
346                    } else {
347                        Some(Ord::cmp(&shifted_lhs, &rhs))
348                    }
349                }
350            }
351
352            #[inline]
353            fn lt(&self, rhs: &$Fixed<FracRhs>) -> bool {
354                let lhs = self.to_bits();
355                let rhs = rhs.to_bits();
356
357                // lhs_extra_frac and nbits are known exactly at compile time,
358                // so with optimizations the branch is selected at compile time
359                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
360                let nbits = $Inner::BITS as i32;
361                if lhs_extra_frac <= -nbits {
362                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
363                    let rhs_is_reduced = rhs != 0;
364                    (lhs == shifted_rhs && rhs_is_reduced) || lhs < shifted_rhs
365                } else if lhs_extra_frac < 0 {
366                    let shifted_rhs = rhs >> -lhs_extra_frac;
367                    let rhs_is_reduced = rhs != (shifted_rhs << -lhs_extra_frac);
368                    (lhs == shifted_rhs && rhs_is_reduced) || lhs < shifted_rhs
369                } else if lhs_extra_frac == 0 {
370                    lhs < rhs
371                } else if lhs_extra_frac < nbits {
372                    let shifted_lhs = lhs >> lhs_extra_frac;
373                    shifted_lhs < rhs
374                } else {
375                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
376                    shifted_lhs < rhs
377                }
378            }
379
380            #[inline]
381            fn le(&self, rhs: &$Fixed<FracRhs>) -> bool {
382                let lhs = self.to_bits();
383                let rhs = rhs.to_bits();
384
385                // lhs_extra_frac and nbits are known exactly at compile time,
386                // so with optimizations the branch is selected at compile time
387                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
388                let nbits = $Inner::BITS as i32;
389                if lhs_extra_frac <= -nbits {
390                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
391                    lhs <= shifted_rhs
392                } else if lhs_extra_frac < 0 {
393                    let shifted_rhs = rhs >> -lhs_extra_frac;
394                    lhs <= shifted_rhs
395                } else if lhs_extra_frac == 0 {
396                    lhs <= rhs
397                } else if lhs_extra_frac < nbits {
398                    let shifted_lhs = lhs >> lhs_extra_frac;
399                    let lhs_is_reduced = lhs != (shifted_lhs << lhs_extra_frac);
400                    !(shifted_lhs == rhs && lhs_is_reduced) && shifted_lhs <= rhs
401                } else {
402                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
403                    let lhs_is_reduced = lhs != 0;
404                    !(shifted_lhs == rhs && lhs_is_reduced) && shifted_lhs <= rhs
405                }
406            }
407
408            #[inline]
409            fn gt(&self, rhs: &$Fixed<FracRhs>) -> bool {
410                let lhs = self.to_bits();
411                let rhs = rhs.to_bits();
412
413                // lhs_extra_frac and nbits are known exactly at compile time,
414                // so with optimizations the branch is selected at compile time
415                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
416                let nbits = $Inner::BITS as i32;
417                if lhs_extra_frac <= -nbits {
418                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
419                    lhs > shifted_rhs
420                } else if lhs_extra_frac < 0 {
421                    let shifted_rhs = rhs >> -lhs_extra_frac;
422                    lhs > shifted_rhs
423                } else if lhs_extra_frac == 0 {
424                    lhs > rhs
425                } else if lhs_extra_frac < nbits {
426                    let shifted_lhs = lhs >> lhs_extra_frac;
427                    let lhs_is_reduced = lhs != (shifted_lhs << lhs_extra_frac);
428                    (shifted_lhs == rhs && lhs_is_reduced) || shifted_lhs > rhs
429                } else {
430                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
431                    let lhs_is_reduced = lhs != 0;
432                    (shifted_lhs == rhs && lhs_is_reduced) || shifted_lhs > rhs
433                }
434            }
435
436            #[inline]
437            fn ge(&self, rhs: &$Fixed<FracRhs>) -> bool {
438                let lhs = self.to_bits();
439                let rhs = rhs.to_bits();
440
441                // lhs_extra_frac and nbits are known exactly at compile time,
442                // so with optimizations the branch is selected at compile time
443                let lhs_extra_frac = FracLhs::to_i32().saturating_sub(FracRhs::to_i32());
444                let nbits = $Inner::BITS as i32;
445                if lhs_extra_frac <= -nbits {
446                    let shifted_rhs = rhs >> (nbits - 1) >> 1;
447                    let rhs_is_reduced = rhs != 0;
448                    !(lhs == shifted_rhs && rhs_is_reduced) && lhs >= shifted_rhs
449                } else if lhs_extra_frac < 0 {
450                    let shifted_rhs = rhs >> -lhs_extra_frac;
451                    let rhs_is_reduced = rhs != (shifted_rhs << -lhs_extra_frac);
452                    !(lhs == shifted_rhs && rhs_is_reduced) && lhs >= shifted_rhs
453                } else if lhs_extra_frac == 0 {
454                    lhs >= rhs
455                } else if lhs_extra_frac < nbits {
456                    let shifted_lhs = lhs >> lhs_extra_frac;
457                    shifted_lhs >= rhs
458                } else {
459                    let shifted_lhs = lhs >> (nbits - 1) >> 1;
460                    shifted_lhs >= rhs
461                }
462            }
463        }
464
465        impl<Frac: Unsigned> Eq for $Fixed<Frac> {}
466
467        impl<Frac: Unsigned> Ord for $Fixed<Frac> {
468            #[inline]
469            fn cmp(&self, rhs: &$Fixed<Frac>) -> Ordering {
470                let lhs = self.to_bits();
471                let rhs = rhs.to_bits();
472                Ord::cmp(&lhs, &rhs)
473            }
474        }
475    };
476}
477
478cmp! { FixedI8(i8) }
479cmp! { FixedI16(i16) }
480cmp! { FixedI32(i32) }
481cmp! { FixedI64(i64) }
482cmp! { FixedI128(i128) }
483cmp! { FixedU8(u8) }
484cmp! { FixedU16(u16) }
485cmp! { FixedU32(u32) }
486cmp! { FixedU64(u64) }
487cmp! { FixedU128(u128) }
488
489#[cfg(test)]
490mod tests {
491    #[test]
492    fn issue_57() {
493        use crate::types::I80F48;
494        let a: u64 = 66000;
495        let b: u64 = 1000;
496        assert!(I80F48::from(a) > b);
497    }
498}