const_soft_float/soft_f32/
div.rs

1use crate::soft_f32::{u32_widen_mul, SoftF32};
2
3type F = SoftF32;
4
5type FInt = u32;
6
7pub(crate) const fn div(a: F, b: F) -> F {
8    const NUMBER_OF_HALF_ITERATIONS: usize = 0;
9    const NUMBER_OF_FULL_ITERATIONS: usize = 3;
10    const USE_NATIVE_FULL_ITERATIONS: bool = true;
11
12    let one = 1;
13    let zero = 0;
14    let hw = F::BITS / 2;
15    let lo_mask = u32::MAX >> hw;
16
17    let significand_bits = F::SIGNIFICAND_BITS;
18    let max_exponent = F::EXPONENT_MAX;
19
20    let exponent_bias = F::EXPONENT_BIAS;
21
22    let implicit_bit = F::IMPLICIT_BIT;
23    let significand_mask = F::SIGNIFICAND_MASK;
24    let sign_bit = F::SIGN_MASK as FInt;
25    let abs_mask = sign_bit - one;
26    let exponent_mask = F::EXPONENT_MASK;
27    let inf_rep = exponent_mask;
28    let quiet_bit = implicit_bit >> 1;
29    let qnan_rep = exponent_mask | quiet_bit;
30
31    #[inline(always)]
32    const fn negate_u32(a: u32) -> u32 {
33        (<i32>::wrapping_neg(a as i32)) as u32
34    }
35
36    let a_rep = a.repr();
37    let b_rep = b.repr();
38
39    let a_exponent = (a_rep >> significand_bits) & max_exponent;
40    let b_exponent = (b_rep >> significand_bits) & max_exponent;
41    let quotient_sign = (a_rep ^ b_rep) & sign_bit;
42
43    let mut a_significand = a_rep & significand_mask;
44    let mut b_significand = b_rep & significand_mask;
45    let mut scale = 0;
46
47    // Detect if a or b is zero, denormal, infinity, or NaN.
48    if a_exponent.wrapping_sub(one) >= (max_exponent - 1)
49        || b_exponent.wrapping_sub(one) >= (max_exponent - 1)
50    {
51        let a_abs = a_rep & abs_mask;
52        let b_abs = b_rep & abs_mask;
53
54        // NaN / anything = qNaN
55        if a_abs > inf_rep {
56            return F::from_repr(a_rep | quiet_bit);
57        }
58        // anything / NaN = qNaN
59        if b_abs > inf_rep {
60            return F::from_repr(b_rep | quiet_bit);
61        }
62
63        if a_abs == inf_rep {
64            if b_abs == inf_rep {
65                // infinity / infinity = NaN
66                return F::from_repr(qnan_rep);
67            } else {
68                // infinity / anything else = +/- infinity
69                return F::from_repr(a_abs | quotient_sign);
70            }
71        }
72
73        // anything else / infinity = +/- 0
74        if b_abs == inf_rep {
75            return F::from_repr(quotient_sign);
76        }
77
78        if a_abs == zero {
79            if b_abs == zero {
80                // zero / zero = NaN
81                return F::from_repr(qnan_rep);
82            } else {
83                // zero / anything else = +/- zero
84                return F::from_repr(quotient_sign);
85            }
86        }
87
88        // anything else / zero = +/- infinity
89        if b_abs == zero {
90            return F::from_repr(inf_rep | quotient_sign);
91        }
92
93        // one or both of a or b is denormal, the other (if applicable) is a
94        // normal number.  Renormalize one or both of a and b, and set scale to
95        // include the necessary exponent adjustment.
96        if a_abs < implicit_bit {
97            let (exponent, significand) = F::normalize(a_significand);
98            scale += exponent;
99            a_significand = significand;
100        }
101
102        if b_abs < implicit_bit {
103            let (exponent, significand) = F::normalize(b_significand);
104            scale -= exponent;
105            b_significand = significand;
106        }
107    }
108
109    // Set the implicit significand bit.  If we fell through from the
110    // denormal path it was already set by normalize( ), but setting it twice
111    // won't hurt anything.
112    a_significand |= implicit_bit;
113    b_significand |= implicit_bit;
114
115    let written_exponent: i32 = (a_exponent
116        .wrapping_sub(b_exponent)
117        .wrapping_add(scale as u32))
118    .wrapping_add(exponent_bias) as i32;
119    let b_uq1 = b_significand << (F::BITS - significand_bits - 1);
120
121    // Align the significand of b as a UQ1.(n-1) fixed-point number in the range
122    // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax
123    // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2.
124    // The max error for this approximation is achieved at endpoints, so
125    //   abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289...,
126    // which is about 4.5 bits.
127    // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571...
128
129    // Then, refine the reciprocal estimate using a quadratically converging
130    // Newton-Raphson iteration:
131    //     x_{n+1} = x_n * (2 - x_n * b)
132    //
133    // Let b be the original divisor considered "in infinite precision" and
134    // obtained from IEEE754 representation of function argument (with the
135    // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in
136    // UQ1.(W-1).
137    //
138    // Let b_hw be an infinitely precise number obtained from the highest (HW-1)
139    // bits of divisor significand (with the implicit bit set). Corresponds to
140    // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated**
141    // version of b_UQ1.
142    //
143    // Let e_n := x_n - 1/b_hw
144    //     E_n := x_n - 1/b
145    // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b)
146    //           = abs(e_n) + (b - b_hw) / (b*b_hw)
147    //          <= abs(e_n) + 2 * 2^-HW
148
149    // rep_t-sized iterations may be slower than the corresponding half-width
150    // variant depending on the handware and whether single/double/quad precision
151    // is selected.
152    // NB: Using half-width iterations increases computation errors due to
153    // rounding, so error estimations have to be computed taking the selected
154    // mode into account!
155
156    #[allow(clippy::absurd_extreme_comparisons)]
157    let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 {
158        // Starting with (n-1) half-width iterations
159        let b_uq1_hw: u16 = (b_significand >> (significand_bits + 1 - hw)) as u16;
160
161        // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
162        // with W0 being either 16 or 32 and W0 <= HW.
163        // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which
164        // b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
165
166        // HW is at least 32. Shifting into the highest bits if needed.
167        let c_hw = (0x7504_u32 as u16).wrapping_shl(hw.wrapping_sub(32));
168
169        // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
170        // so x0 fits to UQ0.HW without wrapping.
171        let x_uq0_hw: u16 = {
172            let mut x_uq0_hw: u16 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */);
173            // An e_0 error is comprised of errors due to
174            // * x0 being an inherently imprecise first approximation of 1/b_hw
175            // * C_hw being some (irrational) number **truncated** to W0 bits
176            // Please note that e_0 is calculated against the infinitely precise
177            // reciprocal of b_hw (that is, **truncated** version of b).
178            //
179            // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0
180
181            // By construction, 1 <= b < 2
182            // f(x)  = x * (2 - b*x) = 2*x - b*x^2
183            // f'(x) = 2 * (1 - b*x)
184            //
185            // On the [0, 1] interval, f(0)   = 0,
186            // then it increses until  f(1/b) = 1 / b, maximum on (0, 1),
187            // then it decreses to     f(1)   = 2 - b
188            //
189            // Let g(x) = x - f(x) = b*x^2 - x.
190            // On (0, 1/b), g(x) < 0 <=> f(x) > x
191            // On (1/b, 1], g(x) > 0 <=> f(x) < x
192            //
193            // For half-width iterations, b_hw is used instead of b.
194            #[allow(clippy::reversed_empty_ranges)]
195            let mut idx = 0;
196            while idx < NUMBER_OF_HALF_ITERATIONS {
197                // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp
198                // of corr_UQ1_hw.
199                // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1).
200                // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided
201                // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is
202                // expected to be strictly positive because b_UQ1_hw has its highest bit set
203                // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).
204                let corr_uq1_hw: u16 = 0_u32
205                    .wrapping_sub((x_uq0_hw as u32).wrapping_mul(b_uq1_hw as u32) >> hw)
206                    as u16;
207
208                // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally
209                // obtaining an UQ1.(HW-1) number and proving its highest bit could be
210                // considered to be 0 to be able to represent it in UQ0.HW.
211                // From the above analysis of f(x), if corr_UQ1_hw would be represented
212                // without any intermediate loss of precision (that is, in twice_rep_t)
213                // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly
214                // less otherwise. On the other hand, to obtain [1.]000..., one have to pass
215                // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due
216                // to 1.0 being not representable as UQ0.HW).
217                // The fact corr_UQ1_hw was virtually round up (due to result of
218                // multiplication being **first** truncated, then negated - to improve
219                // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.
220                x_uq0_hw = ((x_uq0_hw as u32).wrapping_mul(corr_uq1_hw as u32) >> (hw - 1)) as u16;
221                // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t
222                // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after
223                // any number of iterations, so just subtract 2 from the reciprocal
224                // approximation after last iteration.
225
226                // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW:
227                // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1
228                //             = 1 - e_n * b_hw + 2*eps1
229                // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2
230                //          = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2
231                //          = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2
232                // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2
233                //         = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw
234                //                        \------ >0 -------/   \-- >0 ---/
235                // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U)
236                idx += 1;
237            }
238            // For initial half-width iterations, U = 2^-HW
239            // Let  abs(e_n)     <= u_n * U,
240            // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U)
241            // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2)
242
243            // Account for possible overflow (see above). For an overflow to occur for the
244            // first time, for "ideal" corr_UQ1_hw (that is, without intermediate
245            // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum
246            // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to
247            // be not below that value (see g(x) above), so it is safe to decrement just
248            // once after the final iteration. On the other hand, an effective value of
249            // divisor changes after this point (from b_hw to b), so adjust here.
250            x_uq0_hw.wrapping_sub(1_u16)
251        };
252
253        // Error estimations for full-precision iterations are calculated just
254        // as above, but with U := 2^-W and taking extra decrementing into account.
255        // We need at least one such iteration.
256
257        // Simulating operations on a twice_rep_t to perform a single final full-width
258        // iteration. Using ad-hoc multiplication implementations to take advantage
259        // of particular structure of operands.
260
261        let blo: u32 = b_uq1 & lo_mask;
262        // x_UQ0 = x_UQ0_hw * 2^HW - 1
263        // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
264        //
265        //   <--- higher half ---><--- lower half --->
266        //   [x_UQ0_hw * b_UQ1_hw]
267        // +            [  x_UQ0_hw *  blo  ]
268        // -                      [      b_UQ1       ]
269        // = [      result       ][.... discarded ...]
270        let corr_uq1 = negate_u32(
271            (x_uq0_hw as u32) * (b_uq1_hw as u32) + (((x_uq0_hw as u32) * (blo)) >> hw) - 1,
272        ); // account for *possible* carry
273        let lo_corr = corr_uq1 & lo_mask;
274        let hi_corr = corr_uq1 >> hw;
275        // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
276        let mut x_uq0 = (((x_uq0_hw as u32) * hi_corr) << 1)
277            .wrapping_add(((x_uq0_hw as u32) * lo_corr) >> (hw - 1))
278            .wrapping_sub(2);
279        // 1 to account for the highest bit of corr_UQ1 can be 1
280        // 1 to account for possible carry
281        // Just like the case of half-width iterations but with possibility
282        // of overflowing by one extra Ulp of x_UQ0.
283        x_uq0 -= one;
284        // ... and then traditional fixup by 2 should work
285
286        // On error estimation:
287        // abs(E_{N-1}) <=   (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW
288        //                 + (2^-HW + 2^-W))
289        // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW
290
291        // Then like for the half-width iterations:
292        // With 0 <= eps1, eps2 < 2^-W
293        // E_N  = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b
294        // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ]
295        // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ]
296        x_uq0
297    } else {
298        // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n
299        let c = 0x7504F333_u32 << (F::BITS - 32);
300        let x_uq0 = c.wrapping_sub(b_uq1);
301        // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32
302        x_uq0
303    };
304
305    let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS {
306        let mut idx = 0;
307        while idx < NUMBER_OF_FULL_ITERATIONS {
308            let corr_uq1: u32 = 0_u64
309                .wrapping_sub(((x_uq0 as u64).wrapping_mul(b_uq1 as u64)).wrapping_shr(F::BITS))
310                as u32;
311            x_uq0 = (((x_uq0 as u64) * (corr_uq1 as u64)) >> (F::BITS - 1)) as u32;
312            idx += 1;
313        }
314        x_uq0
315    } else {
316        // not using native full iterations
317        x_uq0
318    };
319
320    // Finally, account for possible overflow, as explained above.
321    x_uq0 = x_uq0.wrapping_sub(2);
322
323    // u_n for different precisions (with N-1 half-width iterations):
324    // W0 is the precision of C
325    //   u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW
326
327    // Estimated with bc:
328    //   define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; }
329    //   define half2(un) { return 2.0 * un / 2.0^hw + 2.0; }
330    //   define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; }
331    //   define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; }
332
333    //             | f32 (0 + 3) | f32 (2 + 1)  | f64 (3 + 1)  | f128 (4 + 1)
334    // u_0         | < 184224974 | < 2812.1     | < 184224974  | < 791240234244348797
335    // u_1         | < 15804007  | < 242.7      | < 15804007   | < 67877681371350440
336    // u_2         | < 116308    | < 2.81       | < 116308     | < 499533100252317
337    // u_3         | < 7.31      |              | < 7.31       | < 27054456580
338    // u_4         |             |              |              | < 80.4
339    // Final (U_N) | same as u_3 | < 72         | < 218        | < 13920
340
341    // Add 2 to U_N due to final decrement.
342
343    let reciprocal_precision: FInt = 10;
344
345    // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W
346    let x_uq0 = x_uq0 - reciprocal_precision;
347    // Now 1/b - (2*P) * 2^-W < x < 1/b
348    // FIXME Is x_UQ0 still >= 0.5?
349
350    let mut quotient: FInt = u32_widen_mul(x_uq0, a_significand << 1).1;
351    // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
352
353    // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
354    // adjust it to be in [1.0, 2.0) as UQ1.SB.
355    let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) {
356        // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB,
357        // effectively doubling its value as well as its error estimation.
358        let residual_lo = (a_significand << (significand_bits + 1))
359            .wrapping_sub(quotient.wrapping_mul(b_significand));
360        a_significand <<= 1;
361        (residual_lo, written_exponent.wrapping_sub(1))
362    } else {
363        // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it
364        // to UQ1.SB by right shifting by 1. Least significant bit is omitted.
365        quotient >>= 1;
366        let residual_lo =
367            (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand));
368        (residual_lo, written_exponent)
369    };
370
371    //drop mutability
372    let quotient = quotient;
373
374    // NB: residualLo is calculated above for the normal result case.
375    //     It is re-computed on denormal path that is expected to be not so
376    //     performance-sensitive.
377
378    // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB
379    // Each NextAfter() increments the floating point value by at least 2^-SB
380    // (more, if exponent was incremented).
381    // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint):
382    //   q
383    //   |   | * |   |   |       |       |
384    //       <--->      2^t
385    //   |   |   |   |   |   *   |       |
386    //               q
387    // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB.
388    //   (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB
389    //   (8*P) * 2^-W         < 0.5 * 2^-SB
390    //   P < 2^(W-4-SB)
391    // Generally, for at most R NextAfter() to be enough,
392    //   P < (2*R - 1) * 2^(W-4-SB)
393    // For f32 (0+3): 10 < 32 (OK)
394    // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required
395    // For f64: 220 < 256 (OK)
396    // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required)
397
398    // If we have overflowed the exponent, return infinity
399    if written_exponent >= max_exponent as i32 {
400        return F::from_repr(inf_rep | quotient_sign);
401    }
402
403    // Now, quotient <= the correctly-rounded result
404    // and may need taking NextAfter() up to 3 times (see error estimates above)
405    // r = a - b * q
406    let abs_result = if written_exponent > 0 {
407        let mut ret = quotient & significand_mask;
408        ret |= ((written_exponent as u32) << significand_bits) as u32;
409        residual <<= 1;
410        ret
411    } else {
412        if (significand_bits as i32 + written_exponent) < 0 {
413            return F::from_repr(quotient_sign);
414        }
415        let ret = quotient.wrapping_shr(negate_u32(written_exponent as u32) + 1);
416        residual = (a_significand
417            .wrapping_shl(significand_bits.wrapping_add(written_exponent as u32))
418            as u32)
419            .wrapping_sub((ret.wrapping_mul(b_significand)) << 1);
420        ret
421    };
422    // Round
423    let abs_result = {
424        residual += abs_result & one; // tie to even
425                                      // The above line conditionally turns the below LT comparison into LTE
426
427        if residual > b_significand {
428            abs_result + one
429        } else {
430            abs_result
431        }
432    };
433    F::from_repr(abs_result | quotient_sign)
434}
435
436#[cfg(test)]
437mod test {
438    use crate::soft_f32::SoftF32;
439
440    #[test]
441    fn sanity_check() {
442        assert_eq!(SoftF32(10.0).div(SoftF32(5.0)).0, 2.0)
443    }
444}