|
#include "cr_sin.h" |
|
|
|
#include <stdbool.h> |
|
#include <stddef.h> |
|
#include <stdint.h> |
|
#include <stdio.h> |
|
#include <stdlib.h> |
|
#include <string.h> |
|
|
|
typedef struct { |
|
uint32_t *v; |
|
size_t len; |
|
size_t cap; |
|
} Big; |
|
|
|
typedef enum { |
|
CR_EVAL_SIN, |
|
CR_EVAL_COS |
|
} EvalKind; |
|
|
|
static void big_init(Big *a) { |
|
a->v = NULL; |
|
a->len = 0; |
|
a->cap = 0; |
|
} |
|
|
|
static void big_die(const char *why) { |
|
fprintf(stderr, "cr_sin internal error: %s\n", why); |
|
abort(); |
|
} |
|
|
|
static void big_reserve(Big *a, size_t cap) { |
|
if (cap <= a->cap) { |
|
return; |
|
} |
|
uint32_t *p = (uint32_t *)realloc(a->v, cap * sizeof(uint32_t)); |
|
if (p == NULL) { |
|
big_die("allocation failed"); |
|
} |
|
for (size_t i = a->cap; i < cap; ++i) { |
|
p[i] = 0; |
|
} |
|
a->v = p; |
|
a->cap = cap; |
|
} |
|
|
|
static void big_normalize(Big *a) { |
|
while (a->len != 0 && a->v[a->len - 1] == 0) { |
|
--a->len; |
|
} |
|
} |
|
|
|
static void big_free(Big *a) { |
|
free(a->v); |
|
a->v = NULL; |
|
a->len = 0; |
|
a->cap = 0; |
|
} |
|
|
|
static void big_set_zero(Big *a) { |
|
a->len = 0; |
|
} |
|
|
|
static void big_set_u64(Big *a, uint64_t x) { |
|
if (x == 0) { |
|
big_set_zero(a); |
|
return; |
|
} |
|
big_reserve(a, 2); |
|
a->v[0] = (uint32_t)x; |
|
a->v[1] = (uint32_t)(x >> 32); |
|
a->len = a->v[1] == 0 ? 1 : 2; |
|
} |
|
|
|
static void big_set_pow2(Big *a, unsigned bit) { |
|
size_t limb = (size_t)bit / 32u; |
|
unsigned off = bit % 32u; |
|
big_reserve(a, limb + 1u); |
|
for (size_t i = 0; i <= limb; ++i) { |
|
a->v[i] = 0; |
|
} |
|
a->v[limb] = (uint32_t)1u << off; |
|
a->len = limb + 1u; |
|
} |
|
|
|
static void big_copy(Big *dst, const Big *src) { |
|
big_reserve(dst, src->len); |
|
if (src->len != 0) { |
|
memcpy(dst->v, src->v, src->len * sizeof(uint32_t)); |
|
} |
|
dst->len = src->len; |
|
} |
|
|
|
static bool big_is_zero(const Big *a) { |
|
return a->len == 0; |
|
} |
|
|
|
static int big_cmp(const Big *a, const Big *b) { |
|
if (a->len < b->len) { |
|
return -1; |
|
} |
|
if (a->len > b->len) { |
|
return 1; |
|
} |
|
for (size_t i = a->len; i != 0; --i) { |
|
uint32_t av = a->v[i - 1]; |
|
uint32_t bv = b->v[i - 1]; |
|
if (av < bv) { |
|
return -1; |
|
} |
|
if (av > bv) { |
|
return 1; |
|
} |
|
} |
|
return 0; |
|
} |
|
|
|
static void big_add_inplace(Big *a, const Big *b) { |
|
size_t n = a->len > b->len ? a->len : b->len; |
|
big_reserve(a, n + 1u); |
|
uint64_t carry = 0; |
|
for (size_t i = 0; i < n; ++i) { |
|
uint64_t av = i < a->len ? a->v[i] : 0; |
|
uint64_t bv = i < b->len ? b->v[i] : 0; |
|
uint64_t s = av + bv + carry; |
|
a->v[i] = (uint32_t)s; |
|
carry = s >> 32; |
|
} |
|
a->len = n; |
|
if (carry != 0) { |
|
a->v[a->len++] = (uint32_t)carry; |
|
} |
|
} |
|
|
|
static void big_sub_inplace(Big *a, const Big *b, const char *ctx) { |
|
uint64_t borrow = 0; |
|
for (size_t i = 0; i < a->len; ++i) { |
|
uint64_t av = a->v[i]; |
|
uint64_t bv = i < b->len ? b->v[i] : 0; |
|
uint64_t sub = bv + borrow; |
|
if (av < sub) { |
|
a->v[i] = (uint32_t)((UINT64_C(1) << 32) + av - sub); |
|
borrow = 1; |
|
} else { |
|
a->v[i] = (uint32_t)(av - sub); |
|
borrow = 0; |
|
} |
|
} |
|
if (borrow != 0) { |
|
fprintf(stderr, "while subtracting: %s\n", ctx); |
|
big_die("negative Big subtraction"); |
|
} |
|
big_normalize(a); |
|
} |
|
|
|
static void big_mul_u64_inplace(Big *a, uint64_t m) { |
|
if (a->len == 0 || m == 1) { |
|
return; |
|
} |
|
if (m == 0) { |
|
big_set_zero(a); |
|
return; |
|
} |
|
uint32_t lo = (uint32_t)m; |
|
uint32_t hi = (uint32_t)(m >> 32); |
|
Big original; |
|
big_init(&original); |
|
big_copy(&original, a); |
|
size_t old_len = a->len; |
|
big_reserve(a, old_len + 2u); |
|
uint64_t carry = 0; |
|
for (size_t i = 0; i < old_len; ++i) { |
|
uint64_t cur = (uint64_t)a->v[i] * lo + carry; |
|
a->v[i] = (uint32_t)cur; |
|
carry = cur >> 32; |
|
} |
|
a->len = old_len; |
|
if (carry != 0) { |
|
a->v[a->len++] = (uint32_t)carry; |
|
} |
|
if (hi != 0) { |
|
Big shifted; |
|
big_init(&shifted); |
|
big_reserve(&shifted, old_len + 2u); |
|
shifted.v[0] = 0; |
|
uint64_t carry2 = 0; |
|
for (size_t i = 0; i < old_len; ++i) { |
|
uint64_t cur = (uint64_t)original.v[i] * hi + carry2; |
|
shifted.v[i + 1u] = (uint32_t)cur; |
|
carry2 = cur >> 32; |
|
} |
|
shifted.len = old_len + 1u; |
|
if (carry2 != 0) { |
|
shifted.v[shifted.len++] = (uint32_t)carry2; |
|
} |
|
big_normalize(&shifted); |
|
big_add_inplace(a, &shifted); |
|
big_free(&shifted); |
|
} |
|
big_free(&original); |
|
} |
|
|
|
static uint64_t big_div_u64_inplace(Big *a, uint64_t d) { |
|
if (d == 0) { |
|
big_die("division by zero"); |
|
} |
|
unsigned __int128 rem = 0; |
|
for (size_t i = a->len; i != 0; --i) { |
|
unsigned __int128 cur = (rem << 32) | a->v[i - 1]; |
|
a->v[i - 1] = (uint32_t)(cur / d); |
|
rem = cur % d; |
|
} |
|
big_normalize(a); |
|
return (uint64_t)rem; |
|
} |
|
|
|
static void big_rshift_bits_inplace(Big *a, unsigned bits) { |
|
if (a->len == 0 || bits == 0) { |
|
return; |
|
} |
|
size_t limb_shift = (size_t)bits / 32u; |
|
unsigned bit_shift = bits % 32u; |
|
if (limb_shift >= a->len) { |
|
big_set_zero(a); |
|
return; |
|
} |
|
size_t out_len = a->len - limb_shift; |
|
for (size_t i = 0; i < out_len; ++i) { |
|
uint64_t cur = a->v[i + limb_shift]; |
|
if (bit_shift != 0 && i + limb_shift + 1u < a->len) { |
|
cur |= (uint64_t)a->v[i + limb_shift + 1u] << 32; |
|
} |
|
a->v[i] = (uint32_t)(cur >> bit_shift); |
|
} |
|
a->len = out_len; |
|
big_normalize(a); |
|
} |
|
|
|
static unsigned big_bit_length(const Big *a) { |
|
if (a->len == 0) { |
|
return 0; |
|
} |
|
uint32_t top = a->v[a->len - 1u]; |
|
return (unsigned)((a->len - 1u) * 32u + 32u - (unsigned)__builtin_clz(top)); |
|
} |
|
|
|
static bool big_bit_test(const Big *a, unsigned bit) { |
|
size_t limb = (size_t)bit / 32u; |
|
if (limb >= a->len) { |
|
return false; |
|
} |
|
return ((a->v[limb] >> (bit % 32u)) & 1u) != 0; |
|
} |
|
|
|
static bool big_any_low_bits(const Big *a, unsigned bits) { |
|
if (bits == 0 || a->len == 0) { |
|
return false; |
|
} |
|
size_t full_limbs = (size_t)bits / 32u; |
|
unsigned rem_bits = bits % 32u; |
|
size_t n = full_limbs < a->len ? full_limbs : a->len; |
|
for (size_t i = 0; i < n; ++i) { |
|
if (a->v[i] != 0) { |
|
return true; |
|
} |
|
} |
|
if (rem_bits != 0 && full_limbs < a->len) { |
|
uint32_t mask = ((uint32_t)1u << rem_bits) - 1u; |
|
return (a->v[full_limbs] & mask) != 0; |
|
} |
|
return false; |
|
} |
|
|
|
static uint64_t big_low_u64_after_rshift(const Big *a, unsigned shift) { |
|
size_t limb_shift = (size_t)shift / 32u; |
|
unsigned bit_shift = shift % 32u; |
|
uint64_t out = 0; |
|
for (unsigned out_limb = 0; out_limb < 2; ++out_limb) { |
|
size_t src = limb_shift + out_limb; |
|
uint64_t word = 0; |
|
if (src < a->len) { |
|
word = a->v[src]; |
|
} |
|
if (bit_shift != 0 && src + 1u < a->len) { |
|
word |= (uint64_t)a->v[src + 1u] << 32; |
|
} |
|
out |= (uint64_t)(uint32_t)(word >> bit_shift) << (32u * out_limb); |
|
} |
|
return out; |
|
} |
|
|
|
static uint64_t big_low_u64_lshifted(const Big *a, unsigned shift) { |
|
if (a->len == 0) { |
|
return 0; |
|
} |
|
if (shift >= 64) { |
|
return 0; |
|
} |
|
uint64_t lo = a->len > 0 ? a->v[0] : 0; |
|
uint64_t hi = a->len > 1 ? a->v[1] : 0; |
|
return (lo | (hi << 32)) << shift; |
|
} |
|
|
|
static uint64_t big_round_pow2_to_u64(const Big *a, long shift) { |
|
uint64_t q; |
|
bool half = false; |
|
bool sticky = false; |
|
if (shift <= 0) { |
|
q = big_low_u64_lshifted(a, (unsigned)(-shift)); |
|
} else { |
|
q = big_low_u64_after_rshift(a, (unsigned)shift); |
|
half = big_bit_test(a, (unsigned)shift - 1u); |
|
sticky = big_any_low_bits(a, (unsigned)shift - 1u); |
|
} |
|
if (half && (sticky || (q & 1u) != 0)) { |
|
++q; |
|
} |
|
return q; |
|
} |
|
|
|
static void big_mul_to(Big *out, const Big *a, const Big *b) { |
|
if (a->len == 0 || b->len == 0) { |
|
big_set_zero(out); |
|
return; |
|
} |
|
size_t n = a->len + b->len; |
|
big_reserve(out, n); |
|
for (size_t i = 0; i < n; ++i) { |
|
out->v[i] = 0; |
|
} |
|
out->len = n; |
|
for (size_t i = 0; i < a->len; ++i) { |
|
uint64_t carry = 0; |
|
for (size_t j = 0; j < b->len; ++j) { |
|
uint64_t cur = (uint64_t)out->v[i + j] + (uint64_t)a->v[i] * b->v[j] + carry; |
|
out->v[i + j] = (uint32_t)cur; |
|
carry = cur >> 32; |
|
} |
|
size_t k = i + b->len; |
|
while (carry != 0) { |
|
uint64_t cur = (uint64_t)out->v[k] + carry; |
|
out->v[k] = (uint32_t)cur; |
|
carry = cur >> 32; |
|
++k; |
|
if (k >= out->cap && carry != 0) { |
|
big_reserve(out, out->cap + 1u); |
|
out->v[out->cap - 1u] = 0; |
|
} |
|
if (k > out->len) { |
|
out->len = k; |
|
} |
|
} |
|
} |
|
big_normalize(out); |
|
} |
|
|
|
static void big_mul_fixed_to(Big *out, const Big *a, const Big *b, unsigned scale) { |
|
Big product; |
|
big_init(&product); |
|
big_mul_to(&product, a, b); |
|
big_rshift_bits_inplace(&product, scale); |
|
big_copy(out, &product); |
|
big_free(&product); |
|
} |
|
|
|
static void arctan_inv_scaled(Big *out, uint32_t q, unsigned bits) { |
|
Big term; |
|
big_init(&term); |
|
big_set_pow2(&term, bits); |
|
big_div_u64_inplace(&term, q); |
|
big_copy(out, &term); |
|
|
|
uint64_t q2 = (uint64_t)q * q; |
|
for (uint64_t n = 0;; ++n) { |
|
big_mul_u64_inplace(&term, 2u * n + 1u); |
|
big_div_u64_inplace(&term, q2 * (2u * n + 3u)); |
|
if (big_is_zero(&term)) { |
|
break; |
|
} |
|
if ((n & 1u) == 0) { |
|
big_sub_inplace(out, &term, "arctan alternating term"); |
|
} else { |
|
big_add_inplace(out, &term); |
|
} |
|
} |
|
big_free(&term); |
|
} |
|
|
|
static void compute_pi_scaled(Big *out, unsigned bits) { |
|
enum { PI_GUARD = 32 }; |
|
Big a; |
|
Big b; |
|
big_init(&a); |
|
big_init(&b); |
|
arctan_inv_scaled(&a, 5u, bits + PI_GUARD); |
|
arctan_inv_scaled(&b, 239u, bits + PI_GUARD); |
|
big_mul_u64_inplace(&a, 16u); |
|
big_mul_u64_inplace(&b, 4u); |
|
big_sub_inplace(&a, &b, "Machin pi"); |
|
big_rshift_bits_inplace(&a, PI_GUARD); |
|
big_copy(out, &a); |
|
big_free(&a); |
|
big_free(&b); |
|
} |
|
|
|
static uint64_t pow2_mod_u64(unsigned exp, uint64_t mod) { |
|
uint64_t base = 2u % mod; |
|
uint64_t out = 1u % mod; |
|
while (exp != 0) { |
|
if ((exp & 1u) != 0) { |
|
out = (uint64_t)(((unsigned __int128)out * base) % mod); |
|
} |
|
base = (uint64_t)(((unsigned __int128)base * base) % mod); |
|
exp >>= 1u; |
|
} |
|
return out; |
|
} |
|
|
|
static uint64_t round_positive_scaled_bits(const Big *mag, unsigned scale) { |
|
if (big_is_zero(mag)) { |
|
return 0; |
|
} |
|
unsigned bl = big_bit_length(mag); |
|
int e = (int)bl - 1 - (int)scale; |
|
if (e >= -1022) { |
|
long shift = (long)scale + e - 52L; |
|
uint64_t m = big_round_pow2_to_u64(mag, shift); |
|
if (m == (UINT64_C(1) << 53)) { |
|
m >>= 1u; |
|
++e; |
|
} |
|
if (e > 0) { |
|
return UINT64_C(0x3ff0000000000000); |
|
} |
|
uint64_t exp_bits = (uint64_t)(e + 1023); |
|
return (exp_bits << 52) | (m - (UINT64_C(1) << 52)); |
|
} |
|
|
|
long shift = (long)scale - 1074L; |
|
uint64_t m = big_round_pow2_to_u64(mag, shift); |
|
if (m >= (UINT64_C(1) << 52)) { |
|
return UINT64_C(0x0010000000000000); |
|
} |
|
return m; |
|
} |
|
|
|
static double bits_to_double(uint64_t bits) { |
|
double x; |
|
memcpy(&x, &bits, sizeof x); |
|
return x; |
|
} |
|
|
|
static uint64_t double_to_bits(double x) { |
|
uint64_t bits; |
|
memcpy(&bits, &x, sizeof bits); |
|
return bits; |
|
} |
|
|
|
static bool rounded_interval_decides(const Big *center, uint64_t err, unsigned scale, bool negative, double *out) { |
|
Big e; |
|
Big lo; |
|
Big hi; |
|
big_init(&e); |
|
big_init(&lo); |
|
big_init(&hi); |
|
big_set_u64(&e, err); |
|
if (big_cmp(center, &e) > 0) { |
|
big_copy(&lo, center); |
|
big_sub_inplace(&lo, &e, "interval lower endpoint"); |
|
} else { |
|
big_set_zero(&lo); |
|
} |
|
big_copy(&hi, center); |
|
big_add_inplace(&hi, &e); |
|
|
|
uint64_t lo_bits = round_positive_scaled_bits(&lo, scale); |
|
uint64_t hi_bits = round_positive_scaled_bits(&hi, scale); |
|
bool ok = lo_bits == hi_bits; |
|
if (ok) { |
|
uint64_t bits = lo_bits; |
|
if (negative) { |
|
bits |= UINT64_C(0x8000000000000000); |
|
} |
|
*out = bits_to_double(bits); |
|
} |
|
big_free(&e); |
|
big_free(&lo); |
|
big_free(&hi); |
|
return ok; |
|
} |
|
|
|
static void scaled_angle(Big *z, uint64_t numerator, unsigned denom_shift, unsigned scale) { |
|
enum { Z_GUARD = 32 }; |
|
Big pi; |
|
big_init(&pi); |
|
compute_pi_scaled(&pi, scale + Z_GUARD); |
|
big_mul_u64_inplace(&pi, numerator); |
|
big_div_u64_inplace(&pi, 180u); |
|
big_rshift_bits_inplace(&pi, denom_shift + Z_GUARD); |
|
big_copy(z, &pi); |
|
big_free(&pi); |
|
} |
|
|
|
static void eval_series(Big *out, uint64_t *err, uint64_t numerator, unsigned denom_shift, EvalKind kind, unsigned scale) { |
|
Big z; |
|
Big z2; |
|
Big term; |
|
Big tmp; |
|
big_init(&z); |
|
big_init(&z2); |
|
big_init(&term); |
|
big_init(&tmp); |
|
|
|
scaled_angle(&z, numerator, denom_shift, scale); |
|
big_mul_fixed_to(&z2, &z, &z, scale); |
|
|
|
uint64_t terms = 0; |
|
if (kind == CR_EVAL_SIN) { |
|
big_copy(out, &z); |
|
big_copy(&term, &z); |
|
for (uint64_t n = 1;; ++n) { |
|
big_mul_fixed_to(&tmp, &term, &z2, scale); |
|
big_div_u64_inplace(&tmp, (2u * n) * (2u * n + 1u)); |
|
if (big_is_zero(&tmp)) { |
|
break; |
|
} |
|
if ((n & 1u) != 0) { |
|
big_sub_inplace(out, &tmp, "sin Taylor term"); |
|
} else { |
|
big_add_inplace(out, &tmp); |
|
} |
|
big_copy(&term, &tmp); |
|
++terms; |
|
} |
|
} else { |
|
big_set_pow2(out, scale); |
|
big_set_pow2(&term, scale); |
|
for (uint64_t n = 1;; ++n) { |
|
big_mul_fixed_to(&tmp, &term, &z2, scale); |
|
big_div_u64_inplace(&tmp, (2u * n - 1u) * (2u * n)); |
|
if (big_is_zero(&tmp)) { |
|
break; |
|
} |
|
if ((n & 1u) != 0) { |
|
big_sub_inplace(out, &tmp, "cos Taylor term"); |
|
} else { |
|
big_add_inplace(out, &tmp); |
|
} |
|
big_copy(&term, &tmp); |
|
++terms; |
|
} |
|
} |
|
|
|
*err = 1048576u + 256u * terms; |
|
big_free(&z); |
|
big_free(&z2); |
|
big_free(&term); |
|
big_free(&tmp); |
|
} |
|
|
|
static double exact_signed_zero(bool negative) { |
|
return bits_to_double(negative ? UINT64_C(0x8000000000000000) : 0); |
|
} |
|
|
|
static uint64_t reduce_abs_degrees(uint64_t mantissa, int exponent, unsigned *denom_shift) { |
|
if (exponent >= 0) { |
|
*denom_shift = 0; |
|
uint64_t m = mantissa % 360u; |
|
uint64_t p = pow2_mod_u64((unsigned)exponent, 360u); |
|
return (uint64_t)(((unsigned __int128)m * p) % 360u); |
|
} |
|
|
|
unsigned k = (unsigned)(-exponent); |
|
*denom_shift = k; |
|
if (k <= 44u) { |
|
uint64_t modulus = 360ULL << k; |
|
return mantissa % modulus; |
|
} |
|
return mantissa; |
|
} |
|
|
|
static double eval_reduced(uint64_t reduced_num, unsigned denom_shift, bool input_negative) { |
|
if (reduced_num == 0) { |
|
return exact_signed_zero(input_negative); |
|
} |
|
|
|
uint64_t quarter; |
|
unsigned quadrant; |
|
uint64_t rem; |
|
if (denom_shift <= 56u) { |
|
quarter = 90ULL << denom_shift; |
|
quadrant = (unsigned)(reduced_num / quarter); |
|
rem = reduced_num - (uint64_t)quadrant * quarter; |
|
} else { |
|
quadrant = 0; |
|
rem = reduced_num; |
|
quarter = 0; |
|
} |
|
|
|
if ((quadrant == 2u && rem == 0) || (quadrant == 0u && rem == 0)) { |
|
return exact_signed_zero(input_negative); |
|
} |
|
|
|
bool negative = input_negative; |
|
EvalKind kind; |
|
uint64_t t = rem; |
|
uint64_t half_quarter = denom_shift <= 57u ? 45ULL << denom_shift : UINT64_MAX; |
|
|
|
switch (quadrant & 3u) { |
|
case 0: |
|
kind = CR_EVAL_SIN; |
|
break; |
|
case 1: |
|
kind = CR_EVAL_COS; |
|
break; |
|
case 2: |
|
kind = CR_EVAL_SIN; |
|
negative = !negative; |
|
break; |
|
default: |
|
kind = CR_EVAL_COS; |
|
negative = !negative; |
|
break; |
|
} |
|
|
|
if (rem == 0 && kind == CR_EVAL_COS) { |
|
return bits_to_double((negative ? UINT64_C(0x8000000000000000) : 0) | UINT64_C(0x3ff0000000000000)); |
|
} |
|
|
|
if (denom_shift <= 56u && t > half_quarter) { |
|
t = quarter - t; |
|
kind = kind == CR_EVAL_SIN ? CR_EVAL_COS : CR_EVAL_SIN; |
|
} |
|
|
|
if (t == 0) { |
|
if (kind == CR_EVAL_SIN) { |
|
return exact_signed_zero(negative); |
|
} |
|
return bits_to_double((negative ? UINT64_C(0x8000000000000000) : 0) | UINT64_C(0x3ff0000000000000)); |
|
} |
|
|
|
for (unsigned scale = 192u;; scale *= 2u) { |
|
Big value; |
|
uint64_t err; |
|
double out; |
|
big_init(&value); |
|
eval_series(&value, &err, t, denom_shift, kind, scale); |
|
bool decided = rounded_interval_decides(&value, err, scale, negative, &out); |
|
big_free(&value); |
|
if (decided) { |
|
return out; |
|
} |
|
if (scale > UINT32_MAX / 2u) { |
|
big_die("scale overflow before interval decision"); |
|
} |
|
} |
|
} |
|
|
|
double cr_sin(double x) { |
|
uint64_t bits = double_to_bits(x); |
|
bool negative = (bits >> 63) != 0; |
|
uint64_t exp_bits = (bits >> 52) & 0x7ffu; |
|
uint64_t frac = bits & UINT64_C(0x000fffffffffffff); |
|
|
|
if (exp_bits == 0x7ffu) { |
|
return bits_to_double(UINT64_C(0x7ff8000000000000) | (frac != 0 ? frac : 0)); |
|
} |
|
if ((bits & UINT64_C(0x7fffffffffffffff)) == 0) { |
|
return x; |
|
} |
|
|
|
uint64_t mantissa; |
|
int exponent; |
|
if (exp_bits == 0) { |
|
mantissa = frac; |
|
exponent = -1074; |
|
} else { |
|
mantissa = (UINT64_C(1) << 52) | frac; |
|
exponent = (int)exp_bits - 1023 - 52; |
|
} |
|
|
|
unsigned denom_shift; |
|
uint64_t reduced = reduce_abs_degrees(mantissa, exponent, &denom_shift); |
|
return eval_reduced(reduced, denom_shift, negative); |
|
} |