1 /* mpfr_round_raw_generic, mpfr_round_raw2, mpfr_round_raw, mpfr_prec_round, 2 mpfr_can_round, mpfr_can_round_raw -- various rounding functions 3 4 Copyright 1999-2023 Free Software Foundation, Inc. 5 Contributed by the AriC and Caramba projects, INRIA. 6 7 This file is part of the GNU MPFR Library. 8 9 The GNU MPFR Library is free software; you can redistribute it and/or modify 10 it under the terms of the GNU Lesser General Public License as published by 11 the Free Software Foundation; either version 3 of the License, or (at your 12 option) any later version. 13 14 The GNU MPFR Library is distributed in the hope that it will be useful, but 15 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 17 License for more details. 18 19 You should have received a copy of the GNU Lesser General Public License 20 along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see 21 https://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc., 22 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ 23 24 #include "mpfr-impl.h" 25 26 #define mpfr_round_raw_generic mpfr_round_raw 27 #define flag 0 28 #define use_inexp 1 29 #include "round_raw_generic.c" 30 31 /* mpfr_round_raw_2 is called from mpfr_round_raw2 */ 32 #define mpfr_round_raw_generic mpfr_round_raw_2 33 #define flag 1 34 #define use_inexp 0 35 #include "round_raw_generic.c" 36 37 /* Seems to be unused. Remove comment to implement it. 38 #define mpfr_round_raw_generic mpfr_round_raw_3 39 #define flag 1 40 #define use_inexp 1 41 #include "round_raw_generic.c" 42 */ 43 44 #define mpfr_round_raw_generic mpfr_round_raw_4 45 #define flag 0 46 #define use_inexp 0 47 #include "round_raw_generic.c" 48 49 /* Note: if the new prec is lower than the current one, a reallocation 50 must not be done (see exp_2.c). */ 51 52 int 53 mpfr_prec_round (mpfr_ptr x, mpfr_prec_t prec, mpfr_rnd_t rnd_mode) 54 { 55 mp_limb_t *tmp, *xp; 56 int carry, inexact; 57 mpfr_prec_t nw, ow; 58 MPFR_TMP_DECL(marker); 59 60 MPFR_ASSERTN (MPFR_PREC_COND (prec)); 61 62 nw = MPFR_PREC2LIMBS (prec); /* needed allocated limbs */ 63 64 /* check if x has enough allocated space for the significand */ 65 /* Get the number of limbs from the precision. 66 (Compatible with all allocation methods) */ 67 ow = MPFR_LIMB_SIZE (x); 68 if (MPFR_UNLIKELY (nw > ow)) 69 { 70 /* FIXME: Variable can't be created using custom allocation, 71 MPFR_DECL_INIT or GROUP_ALLOC: How to detect? */ 72 ow = MPFR_GET_ALLOC_SIZE(x); 73 if (nw > ow) 74 { 75 mpfr_size_limb_t *tmpx; 76 77 /* Realloc significand */ 78 tmpx = (mpfr_size_limb_t *) mpfr_reallocate_func 79 (MPFR_GET_REAL_PTR(x), MPFR_MALLOC_SIZE(ow), MPFR_MALLOC_SIZE(nw)); 80 MPFR_SET_MANT_PTR(x, tmpx); /* mant ptr must be set 81 before alloc size */ 82 MPFR_SET_ALLOC_SIZE(x, nw); /* new number of allocated limbs */ 83 } 84 } 85 86 if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) )) 87 { 88 MPFR_PREC(x) = prec; /* Special value: need to set prec */ 89 if (MPFR_IS_NAN(x)) 90 MPFR_RET_NAN; 91 MPFR_ASSERTD(MPFR_IS_INF(x) || MPFR_IS_ZERO(x)); 92 return 0; /* infinity and zero are exact */ 93 } 94 95 /* x is a non-zero real number */ 96 97 MPFR_TMP_MARK(marker); 98 tmp = MPFR_TMP_LIMBS_ALLOC (nw); 99 xp = MPFR_MANT(x); 100 carry = mpfr_round_raw (tmp, xp, MPFR_PREC(x), MPFR_IS_NEG(x), 101 prec, rnd_mode, &inexact); 102 MPFR_PREC(x) = prec; 103 104 if (MPFR_UNLIKELY(carry)) 105 { 106 mpfr_exp_t exp = MPFR_EXP (x); 107 108 if (MPFR_UNLIKELY(exp == __gmpfr_emax)) 109 (void) mpfr_overflow(x, rnd_mode, MPFR_SIGN(x)); 110 else 111 { 112 MPFR_ASSERTD (exp < __gmpfr_emax); 113 MPFR_SET_EXP (x, exp + 1); 114 xp[nw - 1] = MPFR_LIMB_HIGHBIT; 115 if (nw - 1 > 0) 116 MPN_ZERO(xp, nw - 1); 117 } 118 } 119 else 120 MPN_COPY(xp, tmp, nw); 121 122 MPFR_TMP_FREE(marker); 123 return inexact; 124 } 125 126 /* assumption: GMP_NUMB_BITS is a power of 2 */ 127 128 /* assuming b is an approximation to x in direction rnd1 with error at 129 most 2^(MPFR_EXP(b)-err), returns 1 if one is able to round exactly 130 x to precision prec with direction rnd2, and 0 otherwise. 131 Side effects: none. 132 133 rnd1 = RNDN and RNDF are similar: the sign of the error is unknown. 134 135 rnd2 = RNDF: assume that the user will round the approximation b 136 toward the direction of x, i.e. the opposite of rnd1 in directed 137 rounding modes, otherwise RNDN. Some details: 138 139 u xinf v xsup w 140 -----|----+----------|--+------------|----- 141 [----- x -----] 142 rnd1 = RNDD b | 143 rnd1 = RNDU b 144 145 where u, v and w are consecutive machine numbers. 146 147 * If [xinf,xsup] contains no machine numbers, then return 1. 148 149 * If [xinf,xsup] contains 2 machine numbers, then return 0. 150 151 * If [xinf,xsup] contains a single machine number, then return 1 iff 152 the rounding of b is this machine number. 153 With the above choice for the rounding of b, this will always be 154 the case if rnd1 is a directed rounding mode; said otherwise, for 155 rnd2 = RNDF and rnd1 being a directed rounding mode, return 1 iff 156 [xinf,xsup] contains at most 1 machine number. 157 */ 158 159 int 160 mpfr_can_round (mpfr_srcptr b, mpfr_exp_t err, mpfr_rnd_t rnd1, 161 mpfr_rnd_t rnd2, mpfr_prec_t prec) 162 { 163 if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(b))) 164 return 0; /* We cannot round if Zero, Nan or Inf */ 165 else 166 return mpfr_can_round_raw (MPFR_MANT(b), MPFR_LIMB_SIZE(b), 167 MPFR_SIGN(b), err, rnd1, rnd2, prec); 168 } 169 170 /* TODO: mpfr_can_round_raw currently does a memory allocation and some 171 mpn operations. A bit inspection like for mpfr_round_p (round_p.c) may 172 be sufficient, though this would be more complex than the one done in 173 mpfr_round_p, and in particular, for some rnd1/rnd2 combinations, one 174 needs to take care of changes of binade when the value is close to a 175 power of 2. */ 176 177 int 178 mpfr_can_round_raw (const mp_limb_t *bp, mp_size_t bn, int neg, mpfr_exp_t err, 179 mpfr_rnd_t rnd1, mpfr_rnd_t rnd2, mpfr_prec_t prec) 180 { 181 mpfr_prec_t prec2; 182 mp_size_t k, k1, tn; 183 int s, s1; 184 mp_limb_t cc, cc2; 185 mp_limb_t *tmp; 186 mp_limb_t cy = 0, tmp_hi; 187 int res; 188 MPFR_TMP_DECL(marker); 189 190 /* Since mpfr_can_round is a function in the API, use MPFR_ASSERTN. 191 The specification makes sense only for prec >= 1. */ 192 MPFR_ASSERTN (prec >= 1); 193 194 MPFR_ASSERTD(bp[bn - 1] & MPFR_LIMB_HIGHBIT); 195 196 MPFR_ASSERT_SIGN(neg); 197 neg = MPFR_IS_NEG_SIGN(neg); 198 MPFR_ASSERTD (neg == 0 || neg == 1); 199 200 /* For rnd1 and rnd2, transform RNDF / RNDD / RNDU to RNDN / RNDZ / RNDA 201 (with a special case for rnd1 directed rounding, rnd2 = RNDF). */ 202 203 if (rnd1 == MPFR_RNDF) 204 rnd1 = MPFR_RNDN; /* transform RNDF to RNDN */ 205 else if (rnd1 != MPFR_RNDN) 206 rnd1 = MPFR_IS_LIKE_RNDZ(rnd1, neg) ? MPFR_RNDZ : MPFR_RNDA; 207 208 MPFR_ASSERTD (rnd1 == MPFR_RNDN || 209 rnd1 == MPFR_RNDZ || 210 rnd1 == MPFR_RNDA); 211 212 if (rnd2 == MPFR_RNDF) 213 { 214 if (rnd1 == MPFR_RNDN) 215 rnd2 = MPFR_RNDN; 216 else 217 { 218 rnd2 = MPFR_IS_LIKE_RNDZ(rnd1, neg) ? MPFR_RNDA : MPFR_RNDZ; 219 /* Warning: in this case (rnd1 directed rounding, rnd2 = RNDF), 220 the specification of mpfr_can_round says that we should 221 return non-zero (i.e., we can round) when {bp, bn} is 222 exactly representable in precision prec. */ 223 if (mpfr_round_raw2 (bp, bn, neg, MPFR_RNDA, prec) == 0) 224 return 1; 225 } 226 } 227 else if (rnd2 != MPFR_RNDN) 228 rnd2 = MPFR_IS_LIKE_RNDZ(rnd2, neg) ? MPFR_RNDZ : MPFR_RNDA; 229 230 MPFR_ASSERTD (rnd2 == MPFR_RNDN || 231 rnd2 == MPFR_RNDZ || 232 rnd2 == MPFR_RNDA); 233 234 /* For err < prec (+1 for rnd1=RNDN), we can never round correctly, since 235 the error is at least 2*ulp(b) >= ulp(round(b)). 236 However, for err = prec (+1 for rnd1=RNDN), we can round correctly in some 237 rare cases where ulp(b) = 1/2*ulp(U) [see below for the definition of U], 238 which implies rnd1 = RNDZ or RNDN, and rnd2 = RNDA or RNDN. */ 239 240 if (MPFR_UNLIKELY (err < prec + (rnd1 == MPFR_RNDN) || 241 (err == prec + (rnd1 == MPFR_RNDN) && 242 (rnd1 == MPFR_RNDA || 243 rnd2 == MPFR_RNDZ)))) 244 return 0; /* can't round */ 245 246 /* As a consequence... */ 247 MPFR_ASSERTD (err >= prec); 248 249 /* The bound c on the error |x-b| is: c = 2^(MPFR_EXP(b)-err) <= b/2. 250 * So, we now know that x and b have the same sign. By symmetry, 251 * assume x > 0 and b > 0. We have: L <= x <= U, where, depending 252 * on rnd1: 253 * MPFR_RNDN: L = b-c, U = b+c 254 * MPFR_RNDZ: L = b, U = b+c 255 * MPFR_RNDA: L = b-c, U = b 256 * 257 * We can round x iff round(L,prec,rnd2) = round(U,prec,rnd2). 258 */ 259 260 if (MPFR_UNLIKELY (prec > (mpfr_prec_t) bn * GMP_NUMB_BITS)) 261 { /* Then prec > PREC(b): we can round: 262 (i) in rounding to the nearest as long as err >= prec + 2. 263 When err = prec + 1 and b is not a power 264 of two (so that a change of binade cannot occur), then one 265 can round to nearest thanks to the even rounding rule (in the 266 target precision prec, the significand of b ends with a 0). 267 When err = prec + 1 and b is a power of two, when rnd1 = RNDZ one 268 can round too. 269 (ii) in directed rounding mode iff rnd1 is compatible with rnd2 270 and err >= prec + 1, unless b = 2^k and rnd1 = RNDA or RNDN in 271 which case we need err >= prec + 2. 272 */ 273 if ((rnd1 == rnd2 || rnd2 == MPFR_RNDN) && err >= prec + 1) 274 { 275 if (rnd1 != MPFR_RNDZ && 276 err == prec + 1 && 277 mpfr_powerof2_raw2 (bp, bn)) 278 return 0; 279 else 280 return 1; 281 } 282 return 0; 283 } 284 285 /* now prec <= bn * GMP_NUMB_BITS */ 286 287 if (MPFR_UNLIKELY (err > (mpfr_prec_t) bn * GMP_NUMB_BITS)) 288 { 289 /* we distinguish the case where b is a power of two: 290 rnd1 rnd2 can round? 291 RNDZ RNDZ ok 292 RNDZ RNDA no 293 RNDZ RNDN ok 294 RNDA RNDZ no 295 RNDA RNDA ok except when err = prec + 1 296 RNDA RNDN ok except when err = prec + 1 297 RNDN RNDZ no 298 RNDN RNDA no 299 RNDN RNDN ok except when err = prec + 1 */ 300 if (mpfr_powerof2_raw2 (bp, bn)) 301 { 302 if ((rnd2 == MPFR_RNDZ || rnd2 == MPFR_RNDA) && rnd1 != rnd2) 303 return 0; 304 else if (rnd1 == MPFR_RNDZ) 305 return 1; /* RNDZ RNDZ and RNDZ RNDN */ 306 else 307 return err > prec + 1; 308 } 309 310 /* now the general case where b is not a power of two: 311 rnd1 rnd2 can round? 312 RNDZ RNDZ ok 313 RNDZ RNDA except when b is representable in precision 'prec' 314 RNDZ RNDN except when b is the middle of two representable numbers in 315 precision 'prec' and b ends with 'xxx0[1]', 316 or b is representable in precision 'prec' 317 and err = prec + 1 and b ends with '1'. 318 RNDA RNDZ except when b is representable in precision 'prec' 319 RNDA RNDA ok 320 RNDA RNDN except when b is the middle of two representable numbers in 321 precision 'prec' and b ends with 'xxx1[1]', 322 or b is representable in precision 'prec' 323 and err = prec + 1 and b ends with '1'. 324 RNDN RNDZ except when b is representable in precision 'prec' 325 RNDN RNDA except when b is representable in precision 'prec' 326 RNDN RNDN except when b is the middle of two representable numbers in 327 precision 'prec', or b is representable in precision 'prec' 328 and err = prec + 1 and b ends with '1'. */ 329 if (rnd2 == MPFR_RNDN) 330 { 331 if (err == prec + 1 && (bp[0] & 1)) 332 return 0; /* err == prec + 1 implies prec = bn * GMP_NUMB_BITS */ 333 if (prec < (mpfr_prec_t) bn * GMP_NUMB_BITS) 334 { 335 k1 = MPFR_PREC2LIMBS (prec + 1); 336 MPFR_UNSIGNED_MINUS_MODULO(s1, prec + 1); 337 if (((bp[bn - k1] >> s1) & 1) && 338 mpfr_round_raw2 (bp, bn, neg, MPFR_RNDA, prec + 1) == 0) 339 { /* b is the middle of two representable numbers */ 340 if (rnd1 == MPFR_RNDN) 341 return 0; 342 k1 = MPFR_PREC2LIMBS (prec); 343 MPFR_UNSIGNED_MINUS_MODULO(s1, prec); 344 return (rnd1 == MPFR_RNDZ) ^ 345 (((bp[bn - k1] >> s1) & 1) == 0); 346 } 347 } 348 return 1; 349 } 350 else if (rnd1 == rnd2) /* cases RNDZ RNDZ or RNDA RNDA: ok */ 351 return 1; 352 else 353 return mpfr_round_raw2 (bp, bn, neg, MPFR_RNDA, prec) != 0; 354 } 355 356 /* now err <= bn * GMP_NUMB_BITS */ 357 358 /* warning: if k = m*GMP_NUMB_BITS, consider limb m-1 and not m */ 359 k = (err - 1) / GMP_NUMB_BITS; 360 MPFR_UNSIGNED_MINUS_MODULO(s, err); 361 /* the error corresponds to bit s in limb k, the most significant limb 362 being limb 0; in memory, limb k is bp[bn-1-k]. */ 363 364 k1 = (prec - 1) / GMP_NUMB_BITS; 365 MPFR_UNSIGNED_MINUS_MODULO(s1, prec); 366 /* the least significant bit is bit s1 in limb k1 */ 367 368 /* We don't need to consider the k1 most significant limbs. 369 They will be considered later only to detect when subtracting 370 the error bound yields a change of binade. 371 Warning! The number with updated bn may no longer be normalized. */ 372 k -= k1; 373 bn -= k1; 374 prec2 = prec - (mpfr_prec_t) k1 * GMP_NUMB_BITS; 375 376 /* We can decide of the correct rounding if rnd2(b-eps) and rnd2(b+eps) 377 give the same result to the target precision 'prec', i.e., if when 378 adding or subtracting (1 << s) in bp[bn-1-k], it does not change the 379 rounding in direction 'rnd2' at ulp-position bp[bn-1] >> s1, taking also 380 into account the possible change of binade. */ 381 MPFR_TMP_MARK(marker); 382 tn = bn; 383 k++; /* since we work with k+1 everywhere */ 384 tmp = MPFR_TMP_LIMBS_ALLOC (tn); 385 if (bn > k) 386 MPN_COPY (tmp, bp, bn - k); /* copy low bn-k limbs of b into tmp */ 387 388 MPFR_ASSERTD (k > 0); 389 390 switch (rnd1) 391 { 392 case MPFR_RNDZ: 393 /* rnd1 = Round to Zero */ 394 cc = (bp[bn - 1] >> s1) & 1; /* cc is the least significant bit of b */ 395 /* mpfr_round_raw2 returns 1 if one should add 1 at ulp(b,prec), 396 and 0 otherwise */ 397 cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec2); 398 /* cc is the new value of bit s1 in bp[bn-1] after rounding 'rnd2' */ 399 400 /* now round b + 2^(MPFR_EXP(b)-err) */ 401 cy = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s); 402 /* propagate carry up to most significant limb */ 403 for (tn = 0; tn + 1 < k1 && cy != 0; tn ++) 404 cy = bp[bn + tn] == MPFR_LIMB_MAX; 405 if (cy == 0 && err == prec) 406 { 407 res = 0; 408 goto end; 409 } 410 if (MPFR_UNLIKELY(cy)) 411 { 412 /* when a carry occurs, we have b < 2^h <= b+c, we can round iff: 413 rnd2 = RNDZ: never, since b and b+c round to different values; 414 rnd2 = RNDA: when b+c is an exact power of two, and err > prec 415 (since for err = prec, b = 2^h - 1/2*ulp(2^h) is 416 exactly representable and thus rounds to itself); 417 rnd2 = RNDN: whenever cc = 0, since err >= prec implies 418 c <= ulp(b) = 1/2*ulp(2^h), thus b+c rounds to 2^h, 419 and b+c >= 2^h implies that bit 'prec' of b is 1, 420 thus cc = 0 means that b is rounded to 2^h too. */ 421 res = (rnd2 == MPFR_RNDZ) ? 0 422 : (rnd2 == MPFR_RNDA) ? (err > prec && k == bn && tmp[0] == 0) 423 : cc == 0; 424 goto end; 425 } 426 break; 427 case MPFR_RNDN: 428 /* rnd1 = Round to nearest */ 429 430 /* first round b+2^(MPFR_EXP(b)-err) */ 431 cy = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s); 432 /* propagate carry up to most significant limb */ 433 for (tn = 0; tn + 1 < k1 && cy != 0; tn ++) 434 cy = bp[bn + tn] == MPFR_LIMB_MAX; 435 cc = (tmp[bn - 1] >> s1) & 1; /* gives 0 when cc=1 */ 436 cc ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec2); 437 /* cc is the new value of bit s1 in bp[bn-1]+eps after rounding 'rnd2' */ 438 if (MPFR_UNLIKELY (cy != 0)) 439 { 440 /* when a carry occurs, we have b-c < b < 2^h <= b+c, we can round 441 iff: 442 rnd2 = RNDZ: never, since b-c and b+c round to different values; 443 rnd2 = RNDA: when b+c is an exact power of two, and 444 err > prec + 1 (since for err <= prec + 1, 445 b-c <= 2^h - 1/2*ulp(2^h) is exactly representable 446 and thus rounds to itself); 447 rnd2 = RNDN: whenever err > prec + 1, since for err = prec + 1, 448 b+c rounds to 2^h, and b-c rounds to nextbelow(2^h). 449 For err > prec + 1, c <= 1/4*ulp(b) <= 1/8*ulp(2^h), 450 thus 451 2^h - 1/4*ulp(b) <= b-c < b+c <= 2^h + 1/8*ulp(2^h), 452 therefore both b-c and b+c round to 2^h. */ 453 res = (rnd2 == MPFR_RNDZ) ? 0 454 : (rnd2 == MPFR_RNDA) ? (err > prec + 1 && k == bn && tmp[0] == 0) 455 : err > prec + 1; 456 goto end; 457 } 458 subtract_eps: 459 /* now round b-2^(MPFR_EXP(b)-err), this happens for 460 rnd1 = RNDN or RNDA */ 461 MPFR_ASSERTD(rnd1 == MPFR_RNDN || rnd1 == MPFR_RNDA); 462 cy = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s); 463 /* propagate the potential borrow up to the most significant limb 464 (it cannot propagate further since the most significant limb is 465 at least MPFR_LIMB_HIGHBIT). 466 Note: we use the same limb tmp[bn-1] to subtract. */ 467 tmp_hi = tmp[bn - 1]; 468 for (tn = 0; tn < k1 && cy != 0; tn ++) 469 cy = mpn_sub_1 (&tmp_hi, bp + bn + tn, 1, cy); 470 /* We have an exponent decrease when tn = k1 and 471 tmp[bn-1] < MPFR_LIMB_HIGHBIT: 472 b-c < 2^h <= b (for RNDA) or b+c (for RNDN). 473 Then we surely cannot round when rnd2 = RNDZ, since b or b+c round to 474 a value >= 2^h, and b-c rounds to a value < 2^h. 475 We also surely cannot round when (rnd1,rnd2) = (RNDN,RNDA), since 476 b-c rounds to a value <= 2^h, and b+c > 2^h rounds to a value > 2^h. 477 It thus remains: 478 (rnd1,rnd2) = (RNDA,RNDA), (RNDA,RNDN) and (RNDN,RNDN). 479 For (RNDA,RNDA) we can round only when b-c and b round to 2^h, which 480 implies b = 2^h and err > prec (which is true in that case): 481 a necessary condition is that cc = 0. 482 For (RNDA,RNDN) we can round only when b-c and b round to 2^h, which 483 implies b-c >= 2^h - 1/4*ulp(2^h), and b <= 2^h + 1/2*ulp(2^h); 484 since ulp(2^h) = ulp(b), this implies c <= 3/4*ulp(b), thus 485 err > prec. 486 For (RNDN,RNDN) we can round only when b-c and b+c round to 2^h, 487 which implies b-c >= 2^h - 1/4*ulp(2^h), and 488 b+c <= 2^h + 1/2*ulp(2^h); 489 since ulp(2^h) = ulp(b), this implies 2*c <= 3/4*ulp(b), thus 490 err > prec+1. 491 */ 492 if (tn == k1 && tmp_hi < MPFR_LIMB_HIGHBIT) /* exponent decrease */ 493 { 494 if (rnd2 == MPFR_RNDZ || (rnd1 == MPFR_RNDN && rnd2 == MPFR_RNDA) || 495 cc != 0 /* b or b+c does not round to 2^h */) 496 { 497 res = 0; 498 goto end; 499 } 500 /* in that case since the most significant bit of tmp is 0, we 501 should consider one more bit; res = 0 when b-c does not round 502 to 2^h. */ 503 res = mpfr_round_raw2 (tmp, bn, neg, rnd2, prec2 + 1) != 0; 504 goto end; 505 } 506 if (err == prec + (rnd1 == MPFR_RNDN)) 507 { 508 /* No exponent increase nor decrease, thus we have |U-L| = ulp(b). 509 For rnd2 = RNDZ or RNDA, either [L,U] contains one representable 510 number in the target precision, and then L and U round 511 differently; or both L and U are representable: they round 512 differently too; thus in all cases we cannot round. 513 For rnd2 = RNDN, the only case where we can round is when the 514 middle of [L,U] (i.e. b) is representable, and ends with a 0. */ 515 res = (rnd2 == MPFR_RNDN && (((bp[bn - 1] >> s1) & 1) == 0) && 516 mpfr_round_raw2 (bp, bn, neg, MPFR_RNDZ, prec2) == 517 mpfr_round_raw2 (bp, bn, neg, MPFR_RNDA, prec2)); 518 goto end; 519 } 520 break; 521 default: 522 /* rnd1 = Round away */ 523 MPFR_ASSERTD (rnd1 == MPFR_RNDA); 524 cc = (bp[bn - 1] >> s1) & 1; 525 /* the mpfr_round_raw2() call below returns whether one should add 1 or 526 not for rounding */ 527 cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec2); 528 /* cc is the new value of bit s1 in bp[bn-1]+eps after rounding 'rnd2' */ 529 530 goto subtract_eps; 531 } 532 533 cc2 = (tmp[bn - 1] >> s1) & 1; 534 res = cc == (cc2 ^ mpfr_round_raw2 (tmp, bn, neg, rnd2, prec2)); 535 536 end: 537 MPFR_TMP_FREE(marker); 538 return res; 539 } 540