diff --git a/acb/approx_dot.c b/acb/approx_dot.c index ed6cc247..d9f1df72 100644 --- a/acb/approx_dot.c +++ b/acb/approx_dot.c @@ -81,12 +81,12 @@ void _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); void _arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); static void _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, @@ -329,7 +329,7 @@ acb_approx_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong slong re_prec, im_prec; int xnegative, ynegative; mp_size_t xn, yn, re_sn, im_sn, alloc; - mp_bitcnt_t shift; + flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mp_limb_t re_serr, im_serr; /* Sum over arithmetic errors */ diff --git a/acb/dot.c b/acb/dot.c index 3f0b8546..777b5e87 100644 --- a/acb/dot.c +++ b/acb/dot.c @@ -164,12 +164,12 @@ add_errors(mag_t rad, uint64_t Aerr, slong Aexp, uint64_t Berr, slong Bexp, uint void _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); void _arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); static void _arb_dot_output(arb_t res, mp_ptr sum, mp_size_t sn, int negative, @@ -358,7 +358,7 @@ acb_dot(acb_t res, const acb_t initial, int subtract, acb_srcptr x, slong xstep, slong xrexp, yrexp; int xnegative, ynegative; mp_size_t xn, yn, re_sn, im_sn, alloc; - mp_bitcnt_t shift; + flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mag_srcptr xr, yr; diff --git a/acb_mat/test/t-exp.c b/acb_mat/test/t-exp.c index 8227462f..0557e50b 100644 --- a/acb_mat/test/t-exp.c +++ b/acb_mat/test/t-exp.c @@ -12,7 +12,7 @@ #include "acb_mat.h" void -_fmpq_mat_randtest_for_exp(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_for_exp(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong i, j; slong d, l, u; diff --git a/arb.h b/arb.h index 9e88609e..efcc42d5 100644 --- a/arb.h +++ b/arb.h @@ -161,7 +161,7 @@ void arb_abs(arb_t y, const arb_t x); void arb_sgn(arb_t res, const arb_t x); int arb_sgn_nonzero(const arb_t x); -void _arb_digits_round_inplace(char * s, mp_bitcnt_t * shift, fmpz_t error, slong n, arf_rnd_t rnd); +void _arb_digits_round_inplace(char * s, flint_bitcnt_t * shift, fmpz_t error, slong n, arf_rnd_t rnd); int arb_set_str(arb_t res, const char * inp, slong prec); @@ -894,11 +894,11 @@ int _arb_get_mpn_fixed_mod_log2(mp_ptr w, fmpz_t q, mp_limb_t * error, slong _arb_exp_taylor_bound(slong mag, slong prec); -void _arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N); +void _arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N); -void _arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N); +void _arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N); /* sin/cos implementation */ @@ -943,12 +943,12 @@ void arb_sin_cos_wide(arb_t s, arb_t c, const arb_t x, slong prec); void _arb_sin_cos_generic(arb_t s, arb_t c, const arf_t x, const mag_t xrad, slong prec); void arb_sin_cos_generic(arb_t s, arb_t c, const arb_t x, slong prec); -ARB_INLINE mp_bitcnt_t +ARB_INLINE flint_bitcnt_t _arb_mpn_leading_zeros(mp_srcptr d, mp_size_t n) { mp_limb_t t; mp_size_t zero_limbs; - mp_bitcnt_t bits; + flint_bitcnt_t bits; zero_limbs = 0; @@ -969,11 +969,11 @@ _arb_mpn_leading_zeros(mp_srcptr d, mp_size_t n) } } -void _arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N); +void _arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N); -void _arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N); +void _arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N); void arb_atan_arf_bb(arb_t z, const arf_t x, slong prec); diff --git a/arb/approx_dot.c b/arb/approx_dot.c index 225246d8..422ceddd 100644 --- a/arb/approx_dot.c +++ b/arb/approx_dot.c @@ -82,12 +82,12 @@ void _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); void _arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, - int negative, mp_bitcnt_t shift); + int negative, flint_bitcnt_t shift); void arb_approx_dot_simple(arb_t res, const arb_t initial, int subtract, @@ -131,7 +131,7 @@ arb_approx_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong slong xexp, yexp, exp, max_exp, min_exp, sum_exp; int xnegative, ynegative; mp_size_t xn, yn, sn, alloc; - mp_bitcnt_t shift; + flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mp_limb_t serr; /* Sum over arithmetic errors - not used, but need dummy for calls */ diff --git a/arb/atan_arf_bb.c b/arb/atan_arf_bb.c index b654d37f..a73776a4 100644 --- a/arb/atan_arf_bb.c +++ b/arb/atan_arf_bb.c @@ -153,7 +153,7 @@ arb_atan_arf_bb(arb_t z, const arf_t x, slong prec) { slong iter, bits, r, mag, q, wp, N; slong argred_bits, start_bits; - mp_bitcnt_t Qexp[1]; + flint_bitcnt_t Qexp[1]; int inverse; mag_t inp_err; fmpz_t s, t, u, P, Q, err; diff --git a/arb/atan_sum_bs_powtab.c b/arb/atan_sum_bs_powtab.c index e1080888..714831ae 100644 --- a/arb/atan_sum_bs_powtab.c +++ b/arb/atan_sum_bs_powtab.c @@ -16,9 +16,9 @@ slong _arb_compute_bs_exponents(slong * tab, slong n); slong _arb_get_exp_pos(const slong * tab, slong step); static void -bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, +bsplit(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const slong * xexp, - const fmpz * xpow, mp_bitcnt_t r, slong a, slong b) + const fmpz * xpow, flint_bitcnt_t r, slong a, slong b) { if (b - a == 1) { @@ -47,7 +47,7 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, else { slong step, m, i; - mp_bitcnt_t Q2exp[1]; + flint_bitcnt_t Q2exp[1]; fmpz_t Q2, T2; step = (b - a) / 2; @@ -75,8 +75,8 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, } void -_arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N) +_arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N) { slong * xexp; slong length, i; diff --git a/arb/atan_sum_bs_simple.c b/arb/atan_sum_bs_simple.c index 2570ff24..fa791078 100644 --- a/arb/atan_sum_bs_simple.c +++ b/arb/atan_sum_bs_simple.c @@ -12,8 +12,8 @@ #include "arb.h" static void -bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong a, slong b) +bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong a, slong b) { if (b - a == 1) { @@ -30,7 +30,7 @@ bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, else { slong step, m; - mp_bitcnt_t Q2exp[1]; + flint_bitcnt_t Q2exp[1]; fmpz_t P2, Q2, T2; step = (b - a) / 2; @@ -58,8 +58,8 @@ bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, } void -_arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N) +_arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N) { fmpz_t P; fmpz_init(P); diff --git a/arb/dot.c b/arb/dot.c index af9c6e1e..10f0841e 100644 --- a/arb/dot.c +++ b/arb/dot.c @@ -232,7 +232,7 @@ mulhigh(mp_ptr res, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, void _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, mp_srcptr yptr, mp_size_t yn, - int negative, mp_bitcnt_t shift) + int negative, flint_bitcnt_t shift) { slong shift_bits, shift_limbs, term_prec; mp_limb_t cy; @@ -358,7 +358,7 @@ _arb_dot_addmul_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, void _arb_dot_add_generic(mp_ptr sum, mp_ptr serr, mp_ptr tmp, mp_size_t sn, mp_srcptr xptr, mp_size_t xn, - int negative, mp_bitcnt_t shift) + int negative, flint_bitcnt_t shift) { slong shift_bits, shift_limbs, term_prec; mp_limb_t cy, err; @@ -450,7 +450,7 @@ arb_dot(arb_t res, const arb_t initial, int subtract, arb_srcptr x, slong xstep, slong xrexp, yrexp, srad_exp, max_rad_exp; int xnegative, ynegative, inexact; mp_size_t xn, yn, sn, alloc; - mp_bitcnt_t shift; + flint_bitcnt_t shift; arb_srcptr xi, yi; arf_srcptr xm, ym; mag_srcptr xr, yr; diff --git a/arb/exp_arf_bb.c b/arb/exp_arf_bb.c index 9f31e951..73aab59c 100644 --- a/arb/exp_arf_bb.c +++ b/arb/exp_arf_bb.c @@ -45,7 +45,7 @@ arb_exp_arf_bb(arb_t z, const arf_t x, slong prec, int minus_one) { slong k, iter, bits, r, mag, q, wp, N; slong argred_bits, start_bits; - mp_bitcnt_t Qexp[1]; + flint_bitcnt_t Qexp[1]; int inexact; fmpz_t t, u, T, Q; arb_t w; diff --git a/arb/exp_sum_bs_powtab.c b/arb/exp_sum_bs_powtab.c index fd117dc4..ec87de94 100644 --- a/arb/exp_sum_bs_powtab.c +++ b/arb/exp_sum_bs_powtab.c @@ -124,9 +124,9 @@ _arb_get_exp_pos(const slong * tab, slong step) } static void -bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, +bsplit(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const slong * xexp, - const fmpz * xpow, mp_bitcnt_t r, slong a, slong b) + const fmpz * xpow, flint_bitcnt_t r, slong a, slong b) { int cc; @@ -155,7 +155,7 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, else { slong step, m, i; - mp_bitcnt_t Q2exp[1]; + flint_bitcnt_t Q2exp[1]; fmpz_t Q2, T2; step = (b - a) / 2; @@ -182,8 +182,8 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, } void -_arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N) +_arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N) { slong * xexp; slong length, i; diff --git a/arb/exp_sum_bs_simple.c b/arb/exp_sum_bs_simple.c index c70670ae..315c33b7 100644 --- a/arb/exp_sum_bs_simple.c +++ b/arb/exp_sum_bs_simple.c @@ -12,7 +12,7 @@ #include "arb.h" static void -bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, +bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const fmpz_t x, slong r, slong a, slong b, int cont) { if (b - a == 1) @@ -25,7 +25,7 @@ bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, else { slong m; - mp_bitcnt_t Q2exp[1]; + flint_bitcnt_t Q2exp[1]; fmpz_t P2, Q2, T2; m = a + (b - a) / 2; @@ -54,8 +54,8 @@ bsplit(fmpz_t P, fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, } void -_arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N) +_arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N) { fmpz_t P; fmpz_init(P); diff --git a/arb/get_interval_fmpz_2exp.c b/arb/get_interval_fmpz_2exp.c index 8a24b866..c062e51e 100644 --- a/arb/get_interval_fmpz_2exp.c +++ b/arb/get_interval_fmpz_2exp.c @@ -36,7 +36,7 @@ arb_get_interval_fmpz_2exp(fmpz_t a, fmpz_t b, fmpz_t exp, const arb_t x) arf_t rad; fmpz_t tmp; slong shift; - mp_bitcnt_t aval, bval; + flint_bitcnt_t aval, bval; fmpz_init(tmp); diff --git a/arb/get_str.c b/arb/get_str.c index 1ad203b4..a340d0b5 100644 --- a/arb/get_str.c +++ b/arb/get_str.c @@ -173,7 +173,7 @@ _arb_digits_as_float_str(char ** d, fmpz_t e, slong minfix, slong maxfix) exactly. */ void -_arb_digits_round_inplace(char * s, mp_bitcnt_t * shift, fmpz_t error, slong n, arf_rnd_t rnd) +_arb_digits_round_inplace(char * s, flint_bitcnt_t * shift, fmpz_t error, slong n, arf_rnd_t rnd) { slong i, m; int up; @@ -300,7 +300,7 @@ arb_get_str_parts(int * negative, char **mid_digits, fmpz_t mid_exp, { fmpz_t mid, rad, exp, err; slong good; - mp_bitcnt_t shift; + flint_bitcnt_t shift; if (!arb_is_finite(x)) { diff --git a/arb/sin_cos_arf_bb.c b/arb/sin_cos_arf_bb.c index 27b4ef29..4da5db52 100644 --- a/arb/sin_cos_arf_bb.c +++ b/arb/sin_cos_arf_bb.c @@ -15,9 +15,9 @@ slong _arb_compute_bs_exponents(slong * tab, slong n); slong _arb_get_exp_pos(const slong * tab, slong step); static void -bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, +bsplit(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const slong * xexp, - const fmpz * xpow, mp_bitcnt_t r, slong a, slong b) + const fmpz * xpow, flint_bitcnt_t r, slong a, slong b) { int cc; @@ -50,7 +50,7 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, else { slong step, m, i; - mp_bitcnt_t Q2exp[1]; + flint_bitcnt_t Q2exp[1]; fmpz_t Q2, T2; step = (b - a) / 2; @@ -78,8 +78,8 @@ bsplit(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, /* todo: also allow computing cos, using the same table... */ void -_arb_sin_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, - const fmpz_t x, mp_bitcnt_t r, slong N) +_arb_sin_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, + const fmpz_t x, flint_bitcnt_t r, slong N) { slong * xexp; slong length, i; @@ -155,11 +155,11 @@ bs_num_terms(slong mag, slong prec) } void -arb_sin_cos_fmpz_div_2exp_bsplit(arb_t wsin, arb_t wcos, const fmpz_t x, mp_bitcnt_t r, slong prec) +arb_sin_cos_fmpz_div_2exp_bsplit(arb_t wsin, arb_t wcos, const fmpz_t x, flint_bitcnt_t r, slong prec) { fmpz_t T, Q; slong N, xmag; - mp_bitcnt_t Qexp[1]; + flint_bitcnt_t Qexp[1]; /* slightly reduce memory usage at very high precision */ arb_zero(wsin); diff --git a/arb/sin_cos_pi_fmpq.c b/arb/sin_cos_pi_fmpq.c index 9cd4d642..dd351f78 100644 --- a/arb/sin_cos_pi_fmpq.c +++ b/arb/sin_cos_pi_fmpq.c @@ -93,7 +93,7 @@ reduce_octant(fmpz_t v, fmpz_t w, const fmpq_t x) const fmpz * p = fmpq_numref(x); const fmpz * q = fmpq_denref(x); unsigned int octant; - mp_bitcnt_t vval, wval; + flint_bitcnt_t vval, wval; if (*p > COEFF_MIN / 8 && *p < COEFF_MAX / 8 && diff --git a/arb/test/t-atan_sum_bs_powtab.c b/arb/test/t-atan_sum_bs_powtab.c index 8d0a38a3..93bc949c 100644 --- a/arb/test/t-atan_sum_bs_powtab.c +++ b/arb/test/t-atan_sum_bs_powtab.c @@ -26,7 +26,7 @@ int main() slong N; fmpz_t x, T, Q; fmpq_t S, V; - mp_bitcnt_t Qexp, r; + flint_bitcnt_t Qexp, r; fmpz_init(x); fmpz_init(T); diff --git a/arb/test/t-digits_round_inplace.c b/arb/test/t-digits_round_inplace.c index d63b02c9..235fc820 100644 --- a/arb/test/t-digits_round_inplace.c +++ b/arb/test/t-digits_round_inplace.c @@ -23,7 +23,7 @@ int main() { char s[30]; slong i, j, len, n; - mp_bitcnt_t shift; + flint_bitcnt_t shift; fmpz_t inp, out, err, t; arf_rnd_t rnd; diff --git a/arb/test/t-exp_sum_bs_powtab.c b/arb/test/t-exp_sum_bs_powtab.c index 5d471616..dc1e535d 100644 --- a/arb/test/t-exp_sum_bs_powtab.c +++ b/arb/test/t-exp_sum_bs_powtab.c @@ -26,7 +26,7 @@ int main() slong N; fmpz_t x, T, Q; fmpq_t S, V; - mp_bitcnt_t Qexp, r; + flint_bitcnt_t Qexp, r; fmpz_init(x); fmpz_init(T); diff --git a/arb/test/t-sin_cos_arf_bb.c b/arb/test/t-sin_cos_arf_bb.c index 492176e1..a40328dc 100644 --- a/arb/test/t-sin_cos_arf_bb.c +++ b/arb/test/t-sin_cos_arf_bb.c @@ -12,7 +12,7 @@ #include "arb.h" void arb_sin_cos_fmpz_div_2exp_bsplit(arb_t wsin, arb_t wcos, - const fmpz_t x, mp_bitcnt_t r, slong prec); + const fmpz_t x, flint_bitcnt_t r, slong prec); int main() { diff --git a/arb_mat/test/t-cho.c b/arb_mat/test/t-cho.c index 67572d18..c286a569 100644 --- a/arb_mat/test/t-cho.c +++ b/arb_mat/test/t-cho.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-exp.c b/arb_mat/test/t-exp.c index ffa6cd09..8695fb6b 100644 --- a/arb_mat/test/t-exp.c +++ b/arb_mat/test/t-exp.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_for_exp(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_for_exp(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong i, j; slong d, l, u; diff --git a/arb_mat/test/t-inv_cho_precomp.c b/arb_mat/test/t-inv_cho_precomp.c index 497385aa..188cb323 100644 --- a/arb_mat/test/t-inv_cho_precomp.c +++ b/arb_mat/test/t-inv_cho_precomp.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-inv_ldl_precomp.c b/arb_mat/test/t-inv_ldl_precomp.c index 38daf8cf..f13a8f19 100644 --- a/arb_mat/test/t-inv_ldl_precomp.c +++ b/arb_mat/test/t-inv_ldl_precomp.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-ldl.c b/arb_mat/test/t-ldl.c index 67622f35..5f849053 100644 --- a/arb_mat/test/t-ldl.c +++ b/arb_mat/test/t-ldl.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-solve_cho_precomp.c b/arb_mat/test/t-solve_cho_precomp.c index b1188114..189000d1 100644 --- a/arb_mat/test/t-solve_cho_precomp.c +++ b/arb_mat/test/t-solve_cho_precomp.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-solve_ldl_precomp.c b/arb_mat/test/t-solve_ldl_precomp.c index 9e682ba2..1372e1ea 100644 --- a/arb_mat/test/t-solve_ldl_precomp.c +++ b/arb_mat/test/t-solve_ldl_precomp.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-spd_inv.c b/arb_mat/test/t-spd_inv.c index 325edee8..5ba58418 100644 --- a/arb_mat/test/t-spd_inv.c +++ b/arb_mat/test/t-spd_inv.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arb_mat/test/t-spd_solve.c b/arb_mat/test/t-spd_solve.c index 9edb2d66..656fca43 100644 --- a/arb_mat/test/t-spd_solve.c +++ b/arb_mat/test/t-spd_solve.c @@ -12,7 +12,7 @@ #include "arb_mat.h" void -_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, mp_bitcnt_t bits) +_fmpq_mat_randtest_positive_semidefinite(fmpq_mat_t mat, flint_rand_t state, flint_bitcnt_t bits) { slong n; fmpq_mat_t R, RT; diff --git a/arf.h b/arf.h index fe083a35..045c7296 100644 --- a/arf.h +++ b/arf.h @@ -997,7 +997,7 @@ ARB_DLL extern void _arf_add_tmp_cleanup(void); int _arf_add_mpn(arf_t z, mp_srcptr xp, mp_size_t xn, int xsgnbit, const fmpz_t xexp, mp_srcptr yp, mp_size_t yn, int ysgnbit, - mp_bitcnt_t shift, slong prec, arf_rnd_t rnd); + flint_bitcnt_t shift, slong prec, arf_rnd_t rnd); int arf_add(arf_ptr z, arf_srcptr x, arf_srcptr y, slong prec, arf_rnd_t rnd); int arf_add_si(arf_ptr z, arf_srcptr x, slong y, slong prec, arf_rnd_t rnd); diff --git a/arf/add_mpn.c b/arf/add_mpn.c index c4aa3470..b163e6ba 100644 --- a/arf/add_mpn.c +++ b/arf/add_mpn.c @@ -24,12 +24,12 @@ void _arf_add_tmp_cleanup(void) /* Assumptions: top limbs of x and y nonzero. */ int _arf_add_mpn(arf_t z, mp_srcptr xp, mp_size_t xn, int xsgnbit, const fmpz_t xexp, - mp_srcptr yp, mp_size_t yn, int ysgnbit, mp_bitcnt_t shift, + mp_srcptr yp, mp_size_t yn, int ysgnbit, flint_bitcnt_t shift, slong prec, arf_rnd_t rnd) { mp_size_t wn, zn, zn_original, alloc, xbase, wbase; mp_size_t shift_limbs; - mp_bitcnt_t shift_bits; + flint_bitcnt_t shift_bits; int inexact; slong fix; mp_limb_t cy; diff --git a/arf/get_integer_mpn.c b/arf/get_integer_mpn.c index 138a13b2..7b1cbcd4 100644 --- a/arf/get_integer_mpn.c +++ b/arf/get_integer_mpn.c @@ -19,7 +19,7 @@ _arf_get_integer_mpn(mp_ptr y, mp_srcptr x, mp_size_t xn, slong exp) if (bot_exp >= 0) { mp_size_t bot_limbs; - mp_bitcnt_t bot_bits; + flint_bitcnt_t bot_bits; bot_limbs = bot_exp / FLINT_BITS; bot_bits = bot_exp % FLINT_BITS; @@ -42,7 +42,7 @@ _arf_get_integer_mpn(mp_ptr y, mp_srcptr x, mp_size_t xn, slong exp) else { mp_size_t top_limbs; - mp_bitcnt_t top_bits; + flint_bitcnt_t top_bits; mp_limb_t cy; top_limbs = exp / FLINT_BITS; diff --git a/arf/set_round_mpn.c b/arf/set_round_mpn.c index 05f84c04..8da835ad 100644 --- a/arf/set_round_mpn.c +++ b/arf/set_round_mpn.c @@ -16,7 +16,7 @@ _arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, int sgnbit, slong prec, arf_rnd_t rnd) { unsigned int leading; - mp_bitcnt_t exp, bc, val, val_bits; + flint_bitcnt_t exp, bc, val, val_bits; mp_size_t yn, val_limbs; mp_ptr yptr; mp_limb_t t; @@ -67,7 +67,7 @@ _arf_set_round_mpn(arf_t y, slong * exp_shift, mp_srcptr x, mp_size_t xn, /* The bit to the right of the truncation point determines the rounding direction. */ mp_size_t exc_limbs = (exp - prec - 1) / FLINT_BITS; - mp_bitcnt_t exc_bits = (exp - prec - 1) % FLINT_BITS; + flint_bitcnt_t exc_bits = (exp - prec - 1) % FLINT_BITS; increment = (x[exc_limbs] >> exc_bits) & 1; } diff --git a/arf/set_round_uiui.c b/arf/set_round_uiui.c index 5486feb0..78ac9c72 100644 --- a/arf/set_round_uiui.c +++ b/arf/set_round_uiui.c @@ -119,7 +119,7 @@ _arf_set_round_uiui(arf_t z, slong * fix, mp_limb_t hi, mp_limb_t lo, int sgnbit else { /* two or more excess bits; test the first excess bit */ - mp_bitcnt_t pos = 2 * FLINT_BITS - leading - prec - 1; + flint_bitcnt_t pos = 2 * FLINT_BITS - leading - prec - 1; if (pos < FLINT_BITS) up = (lo >> pos) & 1; diff --git a/doc/source/arb.rst b/doc/source/arb.rst index aa0f5869..feea1222 100644 --- a/doc/source/arb.rst +++ b/doc/source/arb.rst @@ -1675,9 +1675,9 @@ Internals for computing elementary functions could be improved by using `\log(2)` based reduction at precision low enough that the value can be assumed to be cached. -.. function:: void _arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, mp_bitcnt_t r, slong N) +.. function:: void _arb_exp_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const fmpz_t x, flint_bitcnt_t r, slong N) -.. function:: void _arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, mp_bitcnt_t r, slong N) +.. function:: void _arb_exp_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const fmpz_t x, flint_bitcnt_t r, slong N) Computes *T*, *Q* and *Qexp* such that `T / (Q 2^{\text{Qexp}}) = \sum_{k=1}^N (x/2^r)^k/k!` using binary splitting. @@ -1692,9 +1692,9 @@ Internals for computing elementary functions Computes the exponential function using a generic version of the rectangular splitting strategy, intended for intermediate precision. -.. function:: void _arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, mp_bitcnt_t r, slong N) +.. function:: void _arb_atan_sum_bs_simple(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const fmpz_t x, flint_bitcnt_t r, slong N) -.. function:: void _arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, mp_bitcnt_t * Qexp, const fmpz_t x, mp_bitcnt_t r, slong N) +.. function:: void _arb_atan_sum_bs_powtab(fmpz_t T, fmpz_t Q, flint_bitcnt_t * Qexp, const fmpz_t x, flint_bitcnt_t r, slong N) Computes *T*, *Q* and *Qexp* such that `T / (Q 2^{\text{Qexp}}) = \sum_{k=1}^N (-1)^k (x/2^r)^{2k} / (2k+1)` diff --git a/doc/source/fmpz_extras.rst b/doc/source/fmpz_extras.rst index c199404c..572cbc8e 100644 --- a/doc/source/fmpz_extras.rst +++ b/doc/source/fmpz_extras.rst @@ -24,7 +24,7 @@ Convenience methods Sets *z* to the sum (respectively difference) of *x* and *y*. -.. function:: void fmpz_adiv_q_2exp(fmpz_t z, const fmpz_t x, mp_bitcnt_t exp) +.. function:: void fmpz_adiv_q_2exp(fmpz_t z, const fmpz_t x, flint_bitcnt_t exp) Sets *z* to `x / 2^{exp}`, rounded away from zero. @@ -109,7 +109,7 @@ Low-level conversions and *zptr* is set to point to *ztmp*. The case where *zv* is zero is not handled specially, and *zn* is set to 1. -.. function:: void fmpz_lshift_mpn(fmpz_t z, mp_srcptr src, mp_size_t n, int negative, mp_bitcnt_t shift) +.. function:: void fmpz_lshift_mpn(fmpz_t z, mp_srcptr src, mp_size_t n, int negative, flint_bitcnt_t shift) Sets *z* to the integer represented by the *n* limbs in the array *src*, or minus this value if *negative* is 1, shifted left by *shift* bits. diff --git a/doc/source/issues.rst b/doc/source/issues.rst index 66126199..b473aeea 100644 --- a/doc/source/issues.rst +++ b/doc/source/issues.rst @@ -64,7 +64,7 @@ internal representation of numbers (using limb arrays). A limb count (always nonnegative). -.. type:: mp_bitcnt_t +.. type:: flint_bitcnt_t A bit offset within an array of limbs (always nonnegative). diff --git a/fmpr/set_round.c b/fmpr/set_round.c index 36f4f4f7..148a8ece 100644 --- a/fmpr/set_round.c +++ b/fmpr/set_round.c @@ -12,8 +12,8 @@ #include "fmpr.h" /* like mpn_scan0b, but takes an upper size */ -static __inline__ mp_bitcnt_t -mpn_scan0b(mp_srcptr up, mp_size_t size, mp_bitcnt_t from_bit) +static __inline__ flint_bitcnt_t +mpn_scan0b(mp_srcptr up, mp_size_t size, flint_bitcnt_t from_bit) { mp_limb_t t; slong i, c; diff --git a/fmpr/set_round_mpn.c b/fmpr/set_round_mpn.c index 907ef8cc..9e192bd3 100644 --- a/fmpr/set_round_mpn.c +++ b/fmpr/set_round_mpn.c @@ -12,8 +12,8 @@ #include "fmpr.h" /* like mpn_scan0, but takes an upper size */ -static __inline__ mp_bitcnt_t -mpn_scan0b(mp_srcptr up, mp_size_t size, mp_bitcnt_t from_bit) +static __inline__ flint_bitcnt_t +mpn_scan0b(mp_srcptr up, mp_size_t size, flint_bitcnt_t from_bit) { mp_limb_t t; slong i, c; diff --git a/fmpz_extras.h b/fmpz_extras.h index 486e28c8..85a06b3e 100644 --- a/fmpz_extras.h +++ b/fmpz_extras.h @@ -140,7 +140,7 @@ fmpz_set_mpn_large(fmpz_t z, mp_srcptr src, mp_size_t n, int negative) } static __inline__ void -fmpz_adiv_q_2exp(fmpz_t z, const fmpz_t x, mp_bitcnt_t exp) +fmpz_adiv_q_2exp(fmpz_t z, const fmpz_t x, flint_bitcnt_t exp) { int sign = fmpz_sgn(x); @@ -246,7 +246,7 @@ fmpz_min(fmpz_t z, const fmpz_t x, const fmpz_t y) (zn) = FLINT_ABS(zn); \ } -void fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, mp_bitcnt_t shift); +void fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, flint_bitcnt_t shift); static __inline__ slong fmpz_allocated_bytes(const fmpz_t x) diff --git a/fmpz_extras/lshift_mpn.c b/fmpz_extras/lshift_mpn.c index 1f7a7ba2..3bc0ecb9 100644 --- a/fmpz_extras/lshift_mpn.c +++ b/fmpz_extras/lshift_mpn.c @@ -12,12 +12,12 @@ #include "fmpz_extras.h" void -fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, mp_bitcnt_t shift) +fmpz_lshift_mpn(fmpz_t z, mp_srcptr d, mp_size_t dn, int sgnbit, flint_bitcnt_t shift) { __mpz_struct * zmpz; mp_ptr zp; mp_size_t zn, shift_limbs; - mp_bitcnt_t shift_bits; + flint_bitcnt_t shift_bits; zmpz = _fmpz_promote(z);