1#![no_std]
20#![deny(missing_docs)]
21#![deny(missing_copy_implementations)]
22#![deny(missing_debug_implementations)]
23
24macro_rules! impl_display {
25 ($t:ident) => {
26 impl core::fmt::Display for $t {
27 #[inline]
28 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
29 write!(f, "{}", self.get())
30 }
31 }
32 };
33}
34
35#[cfg(feature = "approx-eq")]
36pub use float_cmp::{ApproxEq, ApproxEqUlps, Ulps};
37
38#[cfg(feature = "approx-eq")]
39macro_rules! impl_approx_32 {
40 ($t:ident) => {
41 impl float_cmp::ApproxEq for $t {
42 type Margin = float_cmp::F32Margin;
43
44 #[inline]
45 fn approx_eq<M: Into<Self::Margin>>(self, other: Self, margin: M) -> bool {
46 self.0.approx_eq(other.0, margin)
47 }
48 }
49
50 impl float_cmp::ApproxEqUlps for $t {
51 type Flt = f32;
52
53 #[inline]
54 fn approx_eq_ulps(&self, other: &Self, ulps: i32) -> bool {
55 self.0.approx_eq_ulps(&other.0, ulps)
56 }
57 }
58 };
59}
60
61#[cfg(not(feature = "approx-eq"))]
62macro_rules! impl_approx_32 {
63 ($t:ident) => {};
64}
65
66#[cfg(feature = "approx-eq")]
67macro_rules! impl_approx_64 {
68 ($t:ident) => {
69 #[cfg(feature = "approx-eq")]
70 impl float_cmp::ApproxEq for $t {
71 type Margin = float_cmp::F64Margin;
72
73 #[inline]
74 fn approx_eq<M: Into<Self::Margin>>(self, other: Self, margin: M) -> bool {
75 self.0.approx_eq(other.0, margin)
76 }
77 }
78
79 #[cfg(feature = "approx-eq")]
80 impl float_cmp::ApproxEqUlps for $t {
81 type Flt = f64;
82
83 #[inline]
84 fn approx_eq_ulps(&self, other: &Self, ulps: i64) -> bool {
85 self.0.approx_eq_ulps(&other.0, ulps)
86 }
87 }
88 };
89}
90
91#[cfg(not(feature = "approx-eq"))]
92macro_rules! impl_approx_64 {
93 ($t:ident) => {};
94}
95
96#[derive(Copy, Clone, Default, Debug)]
100#[repr(transparent)]
101pub struct FiniteF32(f32);
102
103impl FiniteF32 {
104 #[inline]
108 pub fn new(n: f32) -> Option<Self> {
109 if n.is_finite() {
110 Some(FiniteF32(n))
111 } else {
112 None
113 }
114 }
115
116 #[inline]
122 pub const unsafe fn new_unchecked(n: f32) -> Self {
123 FiniteF32(n)
124 }
125
126 #[inline]
128 pub const fn get(&self) -> f32 {
129 self.0
130 }
131}
132
133impl Eq for FiniteF32 {}
134
135impl PartialEq for FiniteF32 {
136 #[inline]
137 fn eq(&self, other: &Self) -> bool {
138 self.0 == other.0
139 }
140}
141
142impl Ord for FiniteF32 {
143 #[inline]
144 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
145 if self.0 < other.0 {
146 core::cmp::Ordering::Less
147 } else if self.0 > other.0 {
148 core::cmp::Ordering::Greater
149 } else {
150 core::cmp::Ordering::Equal
151 }
152 }
153}
154
155impl PartialOrd for FiniteF32 {
156 #[inline]
157 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
158 Some(self.cmp(other))
159 }
160}
161
162impl core::hash::Hash for FiniteF32 {
163 #[inline]
164 fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
165 self.0.to_bits().hash(state);
166 }
167}
168
169impl PartialEq<f32> for FiniteF32 {
170 #[inline]
171 fn eq(&self, other: &f32) -> bool {
172 self.get() == *other
173 }
174}
175
176impl_display!(FiniteF32);
177impl_approx_32!(FiniteF32);
178
179#[derive(Copy, Clone, Default, Debug)]
183#[repr(transparent)]
184pub struct FiniteF64(f64);
185
186impl FiniteF64 {
187 #[inline]
191 pub fn new(n: f64) -> Option<Self> {
192 if n.is_finite() {
193 Some(FiniteF64(n))
194 } else {
195 None
196 }
197 }
198
199 #[inline]
205 pub const unsafe fn new_unchecked(n: f64) -> Self {
206 FiniteF64(n)
207 }
208
209 #[inline]
211 pub const fn get(&self) -> f64 {
212 self.0
213 }
214}
215
216impl Eq for FiniteF64 {}
217
218impl PartialEq for FiniteF64 {
219 #[inline]
220 fn eq(&self, other: &Self) -> bool {
221 self.0 == other.0
222 }
223}
224
225impl Ord for FiniteF64 {
226 #[inline]
227 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
228 if self.0 < other.0 {
229 core::cmp::Ordering::Less
230 } else if self.0 > other.0 {
231 core::cmp::Ordering::Greater
232 } else {
233 core::cmp::Ordering::Equal
234 }
235 }
236}
237
238impl PartialOrd for FiniteF64 {
239 #[inline]
240 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
241 Some(self.cmp(other))
242 }
243}
244
245impl core::hash::Hash for FiniteF64 {
246 #[inline]
247 fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
248 self.0.to_bits().hash(state);
249 }
250}
251
252impl PartialEq<f64> for FiniteF64 {
253 #[inline]
254 fn eq(&self, other: &f64) -> bool {
255 self.get() == *other
256 }
257}
258
259impl_display!(FiniteF64);
260impl_approx_64!(FiniteF64);
261
262#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default, Debug)]
264#[repr(transparent)]
265pub struct PositiveF32(FiniteF32);
266
267impl PositiveF32 {
268 pub const ZERO: Self = PositiveF32(FiniteF32(0.0));
270
271 #[inline]
275 pub fn new(n: f32) -> Option<Self> {
276 if n.is_finite() && n >= 0.0 {
277 Some(PositiveF32(FiniteF32(n)))
278 } else {
279 None
280 }
281 }
282
283 #[inline]
289 pub const unsafe fn new_unchecked(n: f32) -> Self {
290 PositiveF32(FiniteF32(n))
291 }
292
293 #[inline]
295 pub const fn get(&self) -> f32 {
296 self.0.get()
297 }
298
299 #[inline]
301 pub const fn get_finite(&self) -> FiniteF32 {
302 self.0
303 }
304}
305
306impl PartialEq<f32> for PositiveF32 {
307 #[inline]
308 fn eq(&self, other: &f32) -> bool {
309 self.get() == *other
310 }
311}
312
313impl_display!(PositiveF32);
314impl_approx_32!(PositiveF32);
315
316#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default, Debug)]
318#[repr(transparent)]
319pub struct PositiveF64(FiniteF64);
320
321impl PositiveF64 {
322 pub const ZERO: Self = PositiveF64(FiniteF64(0.0));
324
325 #[inline]
329 pub fn new(n: f64) -> Option<Self> {
330 if n.is_finite() && n >= 0.0 {
331 Some(PositiveF64(FiniteF64(n)))
332 } else {
333 None
334 }
335 }
336
337 #[inline]
343 pub const unsafe fn new_unchecked(n: f64) -> Self {
344 PositiveF64(FiniteF64(n))
345 }
346
347 #[inline]
349 pub const fn get(&self) -> f64 {
350 self.0.get()
351 }
352
353 #[inline]
355 pub const fn get_finite(&self) -> FiniteF64 {
356 self.0
357 }
358}
359
360impl PartialEq<f64> for PositiveF64 {
361 #[inline]
362 fn eq(&self, other: &f64) -> bool {
363 self.get() == *other
364 }
365}
366
367impl_display!(PositiveF64);
368impl_approx_64!(PositiveF64);
369
370#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
372#[repr(transparent)]
373pub struct NonZeroPositiveF32(FiniteF32);
374
375impl NonZeroPositiveF32 {
376 #[inline]
380 pub fn new(n: f32) -> Option<Self> {
381 if n.is_finite() && n > 0.0 {
382 Some(NonZeroPositiveF32(FiniteF32(n)))
383 } else {
384 None
385 }
386 }
387
388 #[inline]
394 pub const unsafe fn new_unchecked(n: f32) -> Self {
395 NonZeroPositiveF32(FiniteF32(n))
396 }
397
398 #[inline]
400 pub const fn get(&self) -> f32 {
401 self.0.get()
402 }
403
404 #[inline]
406 pub const fn get_finite(&self) -> FiniteF32 {
407 self.0
408 }
409}
410
411impl PartialEq<f32> for NonZeroPositiveF32 {
412 #[inline]
413 fn eq(&self, other: &f32) -> bool {
414 self.get() == *other
415 }
416}
417
418impl_display!(NonZeroPositiveF32);
419impl_approx_32!(NonZeroPositiveF32);
420
421#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
423#[repr(transparent)]
424pub struct NonZeroPositiveF64(FiniteF64);
425
426impl NonZeroPositiveF64 {
427 #[inline]
431 pub fn new(n: f64) -> Option<Self> {
432 if n.is_finite() && n > 0.0 {
433 Some(NonZeroPositiveF64(FiniteF64(n)))
434 } else {
435 None
436 }
437 }
438
439 #[inline]
445 pub const unsafe fn new_unchecked(n: f64) -> Self {
446 NonZeroPositiveF64(FiniteF64(n))
447 }
448
449 #[inline]
451 pub const fn get(&self) -> f64 {
452 self.0.get()
453 }
454
455 #[inline]
457 pub const fn get_finite(&self) -> FiniteF64 {
458 self.0
459 }
460}
461
462impl PartialEq<f64> for NonZeroPositiveF64 {
463 #[inline]
464 fn eq(&self, other: &f64) -> bool {
465 self.get() == *other
466 }
467}
468
469impl_display!(NonZeroPositiveF64);
470impl_approx_64!(NonZeroPositiveF64);
471
472#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
474#[repr(transparent)]
475pub struct NormalizedF32(FiniteF32);
476
477impl NormalizedF32 {
478 pub const ZERO: Self = NormalizedF32(FiniteF32(0.0));
480 pub const ONE: Self = NormalizedF32(FiniteF32(1.0));
482
483 #[inline]
485 pub fn new(n: f32) -> Option<Self> {
486 if n.is_finite() && n >= 0.0 && n <= 1.0 {
487 Some(NormalizedF32(FiniteF32(n)))
488 } else {
489 None
490 }
491 }
492
493 #[inline]
499 pub const unsafe fn new_unchecked(n: f32) -> Self {
500 NormalizedF32(FiniteF32(n))
501 }
502
503 #[inline]
507 pub fn new_clamped(n: f32) -> Self {
508 if n.is_finite() {
509 NormalizedF32(FiniteF32(clamp_f32(0.0, n, 1.0)))
510 } else {
511 Self::ZERO
512 }
513 }
514
515 #[inline]
517 pub fn new_u8(n: u8) -> Self {
518 NormalizedF32(FiniteF32(f32::from(n) / 255.0))
519 }
520
521 #[inline]
523 pub fn new_u16(n: u16) -> Self {
524 NormalizedF32(FiniteF32(f32::from(n) / 65535.0))
525 }
526
527 #[inline]
529 pub const fn get(self) -> f32 {
530 self.0.get()
531 }
532
533 #[inline]
535 pub const fn get_finite(&self) -> FiniteF32 {
536 self.0
537 }
538
539 #[inline]
541 pub fn to_u8(&self) -> u8 {
542 ((self.0).0 * 255.0 + 0.5) as u8
543 }
544
545 #[inline]
547 pub fn to_u16(&self) -> u16 {
548 ((self.0).0 * 65535.0 + 0.5) as u16
549 }
550}
551
552impl core::ops::Mul<NormalizedF32> for NormalizedF32 {
553 type Output = Self;
554
555 #[inline]
556 fn mul(self, rhs: Self) -> Self::Output {
557 Self::new_clamped((self.0).0 * (rhs.0).0)
558 }
559}
560
561impl PartialEq<f32> for NormalizedF32 {
562 #[inline]
563 fn eq(&self, other: &f32) -> bool {
564 self.get() == *other
565 }
566}
567
568impl_display!(NormalizedF32);
569impl_approx_32!(NormalizedF32);
570
571#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
573#[repr(transparent)]
574pub struct NormalizedF64(FiniteF64);
575
576impl NormalizedF64 {
577 pub const ZERO: Self = NormalizedF64(FiniteF64(0.0));
579 pub const ONE: Self = NormalizedF64(FiniteF64(1.0));
581
582 #[inline]
584 pub fn new(n: f64) -> Option<Self> {
585 if n >= 0.0 && n <= 1.0 {
586 Some(NormalizedF64(FiniteF64(n)))
587 } else {
588 None
589 }
590 }
591
592 #[inline]
598 pub const unsafe fn new_unchecked(n: f64) -> Self {
599 NormalizedF64(FiniteF64(n))
600 }
601
602 #[inline]
606 pub fn new_clamped(n: f64) -> Self {
607 if n.is_finite() {
608 NormalizedF64(FiniteF64(clamp_f64(0.0, n, 1.0)))
609 } else {
610 Self::ZERO
611 }
612 }
613
614 #[inline]
616 pub fn new_u8(n: u8) -> Self {
617 NormalizedF64(FiniteF64(f64::from(n) / 255.0))
618 }
619
620 #[inline]
622 pub fn new_u16(n: u16) -> Self {
623 NormalizedF64(FiniteF64(f64::from(n) / 65535.0))
624 }
625
626 #[inline]
628 pub const fn get(self) -> f64 {
629 self.0.get()
630 }
631
632 #[inline]
634 pub const fn get_finite(&self) -> FiniteF64 {
635 self.0
636 }
637
638 #[inline]
640 pub fn to_u8(&self) -> u8 {
641 ((self.0).0 * 255.0 + 0.5) as u8
642 }
643
644 #[inline]
646 pub fn to_u16(&self) -> u16 {
647 ((self.0).0 * 65535.0 + 0.5) as u16
648 }
649}
650
651impl core::ops::Mul<NormalizedF64> for NormalizedF64 {
652 type Output = Self;
653
654 #[inline]
655 fn mul(self, rhs: Self) -> Self::Output {
656 Self::new_clamped((self.0).0 * (rhs.0).0)
657 }
658}
659
660impl PartialEq<f64> for NormalizedF64 {
661 #[inline]
662 fn eq(&self, other: &f64) -> bool {
663 self.get() == *other
664 }
665}
666
667impl_display!(NormalizedF64);
668impl_approx_64!(NormalizedF64);
669
670#[inline]
671fn clamp_f32(min: f32, val: f32, max: f32) -> f32 {
672 max.min(val).max(min)
673}
674
675#[inline]
676fn clamp_f64(min: f64, val: f64, max: f64) -> f64 {
677 max.min(val).max(min)
678}
679
680#[cfg(test)]
681mod tests {
682 use super::*;
683
684 #[test]
685 fn finite_f32() {
686 assert_eq!(FiniteF32::new(0.0).map(|n| n.get()), Some(0.0));
687 assert_eq!(FiniteF32::new(core::f32::NAN), None);
688 assert_eq!(FiniteF32::new(core::f32::INFINITY), None);
689 assert_eq!(FiniteF32::new(core::f32::NEG_INFINITY), None);
690 }
691
692 #[test]
693 fn positive_f32() {
694 assert_eq!(NonZeroPositiveF32::new(-1.0).map(|n| n.get()), None);
695 assert_eq!(NonZeroPositiveF32::new(0.0).map(|n| n.get()), None);
696 assert_eq!(NonZeroPositiveF32::new(1.0).map(|n| n.get()), Some(1.0));
697 assert_eq!(
698 NonZeroPositiveF32::new(core::f32::EPSILON).map(|n| n.get()),
699 Some(core::f32::EPSILON)
700 );
701 assert_eq!(
702 NonZeroPositiveF32::new(-core::f32::EPSILON).map(|n| n.get()),
703 None
704 );
705 assert_eq!(NonZeroPositiveF32::new(core::f32::NAN), None);
706 assert_eq!(NonZeroPositiveF32::new(core::f32::INFINITY), None);
707 assert_eq!(NonZeroPositiveF32::new(core::f32::NEG_INFINITY), None);
708 }
709
710 #[test]
711 fn positive_f64() {
712 assert_eq!(NonZeroPositiveF32::new(-1.0).map(|n| n.get()), None);
713 assert_eq!(NonZeroPositiveF64::new(0.0).map(|n| n.get()), None);
714 assert_eq!(NonZeroPositiveF64::new(1.0).map(|n| n.get()), Some(1.0));
715 assert_eq!(
716 NonZeroPositiveF64::new(core::f64::EPSILON).map(|n| n.get()),
717 Some(core::f64::EPSILON)
718 );
719 assert_eq!(
720 NonZeroPositiveF64::new(-core::f64::EPSILON).map(|n| n.get()),
721 None
722 );
723 assert_eq!(NonZeroPositiveF64::new(core::f64::NAN), None);
724 assert_eq!(NonZeroPositiveF64::new(core::f64::INFINITY), None);
725 assert_eq!(NonZeroPositiveF64::new(core::f64::NEG_INFINITY), None);
726 }
727
728 #[test]
729 fn norm_f32() {
730 assert_eq!(NormalizedF32::new(-0.5), None);
731 assert_eq!(
732 NormalizedF32::new(-core::f32::EPSILON).map(|n| n.get()),
733 None
734 );
735 assert_eq!(NormalizedF32::new(0.0).map(|n| n.get()), Some(0.0));
736 assert_eq!(NormalizedF32::new(0.5).map(|n| n.get()), Some(0.5));
737 assert_eq!(NormalizedF32::new(1.0).map(|n| n.get()), Some(1.0));
738 assert_eq!(NormalizedF32::new(1.5), None);
739 assert_eq!(NormalizedF32::new(core::f32::NAN), None);
740 assert_eq!(NormalizedF32::new(core::f32::INFINITY), None);
741 assert_eq!(NormalizedF32::new(core::f32::NEG_INFINITY), None);
742 }
743
744 #[test]
745 fn clamped_norm_f32() {
746 assert_eq!(NormalizedF32::new_clamped(-0.5).get(), 0.0);
747 assert_eq!(NormalizedF32::new_clamped(0.5).get(), 0.5);
748 assert_eq!(NormalizedF32::new_clamped(1.5).get(), 1.0);
749 assert_eq!(NormalizedF32::new_clamped(core::f32::NAN).get(), 0.0);
750 assert_eq!(NormalizedF32::new_clamped(core::f32::INFINITY).get(), 0.0);
751 assert_eq!(
752 NormalizedF32::new_clamped(core::f32::NEG_INFINITY).get(),
753 0.0
754 );
755 }
756
757 #[test]
758 fn norm_f64() {
759 assert_eq!(NormalizedF64::new(-0.5), None);
760 assert_eq!(
761 NormalizedF64::new(-core::f64::EPSILON).map(|n| n.get()),
762 None
763 );
764 assert_eq!(NormalizedF64::new(0.0).map(|n| n.get()), Some(0.0));
765 assert_eq!(NormalizedF64::new(0.5).map(|n| n.get()), Some(0.5));
766 assert_eq!(NormalizedF64::new(1.0).map(|n| n.get()), Some(1.0));
767 assert_eq!(NormalizedF64::new(1.5), None);
768 assert_eq!(NormalizedF64::new(core::f64::NAN), None);
769 assert_eq!(NormalizedF64::new(core::f64::INFINITY), None);
770 assert_eq!(NormalizedF64::new(core::f64::NEG_INFINITY), None);
771 }
772
773 #[test]
774 fn clamped_norm_f64() {
775 assert_eq!(NormalizedF64::new_clamped(-0.5).get(), 0.0);
776 assert_eq!(NormalizedF64::new_clamped(0.5).get(), 0.5);
777 assert_eq!(NormalizedF64::new_clamped(1.5).get(), 1.0);
778 assert_eq!(NormalizedF64::new_clamped(core::f64::NAN).get(), 0.0);
779 assert_eq!(NormalizedF64::new_clamped(core::f64::INFINITY).get(), 0.0);
780 assert_eq!(
781 NormalizedF64::new_clamped(core::f64::NEG_INFINITY).get(),
782 0.0
783 );
784 }
785}