1 /* pp_sort.c 2 * 3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others 5 * 6 * You may distribute under the terms of either the GNU General Public 7 * License or the Artistic License, as specified in the README file. 8 * 9 */ 10 11 /* 12 * ...they shuffled back towards the rear of the line. 'No, not at the 13 * rear!' the slave-driver shouted. 'Three files up. And stay there... 14 * 15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] 16 */ 17 18 /* This file contains pp ("push/pop") functions that 19 * execute the opcodes that make up a perl program. A typical pp function 20 * expects to find its arguments on the stack, and usually pushes its 21 * results onto the stack, hence the 'pp' terminology. Each OP structure 22 * contains a pointer to the relevant pp_foo() function. 23 * 24 * This particular file just contains pp_sort(), which is complex 25 * enough to merit its own file! See the other pp*.c files for the rest of 26 * the pp_ functions. 27 */ 28 29 #include "EXTERN.h" 30 #define PERL_IN_PP_SORT_C 31 #include "perl.h" 32 33 #ifndef SMALLSORT 34 #define SMALLSORT (200) 35 #endif 36 37 /* 38 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>. 39 * 40 * The original code was written in conjunction with BSD Computer Software 41 * Research Group at University of California, Berkeley. 42 * 43 * See also: "Optimistic Sorting and Information Theoretic Complexity" 44 * Peter McIlroy 45 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), 46 * pp 467-474, Austin, Texas, 25-27 January 1993. 47 * 48 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>. 49 * 50 * The code can be distributed under the same terms as Perl itself. 51 * 52 */ 53 54 55 typedef char * aptr; /* pointer for arithmetic on sizes */ 56 typedef SV * gptr; /* pointers in our lists */ 57 58 /* Binary merge internal sort, with a few special mods 59 ** for the special perl environment it now finds itself in. 60 ** 61 ** Things that were once options have been hotwired 62 ** to values suitable for this use. In particular, we'll always 63 ** initialize looking for natural runs, we'll always produce stable 64 ** output, and we'll always do Peter McIlroy's binary merge. 65 */ 66 67 /* Pointer types for arithmetic and storage and convenience casts */ 68 69 #define APTR(P) ((aptr)(P)) 70 #define GPTP(P) ((gptr *)(P)) 71 #define GPPP(P) ((gptr **)(P)) 72 73 74 /* byte offset from pointer P to (larger) pointer Q */ 75 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) 76 77 #define PSIZE sizeof(gptr) 78 79 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */ 80 81 #ifdef PSHIFT 82 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) 83 #define PNBYTE(N) ((N) << (PSHIFT)) 84 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) 85 #else 86 /* Leave optimization to compiler */ 87 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) 88 #define PNBYTE(N) ((N) * (PSIZE)) 89 #define PINDEX(P, N) (GPTP(P) + (N)) 90 #endif 91 92 /* Pointer into other corresponding to pointer into this */ 93 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) 94 95 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim) 96 97 98 /* Runs are identified by a pointer in the auxiliary list. 99 ** The pointer is at the start of the list, 100 ** and it points to the start of the next list. 101 ** NEXT is used as an lvalue, too. 102 */ 103 104 #define NEXT(P) (*GPPP(P)) 105 106 107 /* PTHRESH is the minimum number of pairs with the same sense to justify 108 ** checking for a run and extending it. Note that PTHRESH counts PAIRS, 109 ** not just elements, so PTHRESH == 8 means a run of 16. 110 */ 111 112 #define PTHRESH (8) 113 114 /* RTHRESH is the number of elements in a run that must compare low 115 ** to the low element from the opposing run before we justify 116 ** doing a binary rampup instead of single stepping. 117 ** In random input, N in a row low should only happen with 118 ** probability 2^(1-N), so we can risk that we are dealing 119 ** with orderly input without paying much when we aren't. 120 */ 121 122 #define RTHRESH (6) 123 124 125 /* 126 ** Overview of algorithm and variables. 127 ** The array of elements at list1 will be organized into runs of length 2, 128 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when 129 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order. 130 ** 131 ** Unless otherwise specified, pair pointers address the first of two elements. 132 ** 133 ** b and b+1 are a pair that compare with sense "sense". 134 ** b is the "bottom" of adjacent pairs that might form a longer run. 135 ** 136 ** p2 parallels b in the list2 array, where runs are defined by 137 ** a pointer chain. 138 ** 139 ** t represents the "top" of the adjacent pairs that might extend 140 ** the run beginning at b. Usually, t addresses a pair 141 ** that compares with opposite sense from (b,b+1). 142 ** However, it may also address a singleton element at the end of list1, 143 ** or it may be equal to "last", the first element beyond list1. 144 ** 145 ** r addresses the Nth pair following b. If this would be beyond t, 146 ** we back it off to t. Only when r is less than t do we consider the 147 ** run long enough to consider checking. 148 ** 149 ** q addresses a pair such that the pairs at b through q already form a run. 150 ** Often, q will equal b, indicating we only are sure of the pair itself. 151 ** However, a search on the previous cycle may have revealed a longer run, 152 ** so q may be greater than b. 153 ** 154 ** p is used to work back from a candidate r, trying to reach q, 155 ** which would mean b through r would be a run. If we discover such a run, 156 ** we start q at r and try to push it further towards t. 157 ** If b through r is NOT a run, we detect the wrong order at (p-1,p). 158 ** In any event, after the check (if any), we have two main cases. 159 ** 160 ** 1) Short run. b <= q < p <= r <= t. 161 ** b through q is a run (perhaps trivial) 162 ** q through p are uninteresting pairs 163 ** p through r is a run 164 ** 165 ** 2) Long run. b < r <= q < t. 166 ** b through q is a run (of length >= 2 * PTHRESH) 167 ** 168 ** Note that degenerate cases are not only possible, but likely. 169 ** For example, if the pair following b compares with opposite sense, 170 ** then b == q < p == r == t. 171 */ 172 173 174 PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__ 175 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) 176 { 177 I32 sense; 178 gptr *b, *p, *q, *t, *p2; 179 gptr *last, *r; 180 IV runs = 0; 181 182 b = list1; 183 last = PINDEX(b, nmemb); 184 sense = (cmp(aTHX_ *b, *(b+1)) > 0); 185 for (p2 = list2; b < last; ) { 186 /* We just started, or just reversed sense. 187 ** Set t at end of pairs with the prevailing sense. 188 */ 189 for (p = b+2, t = p; ++p < last; t = ++p) { 190 if ((cmp(aTHX_ *t, *p) > 0) != sense) break; 191 } 192 q = b; 193 /* Having laid out the playing field, look for long runs */ 194 do { 195 p = r = b + (2 * PTHRESH); 196 if (r >= t) p = r = t; /* too short to care about */ 197 else { 198 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && 199 ((p -= 2) > q)) {} 200 if (p <= q) { 201 /* b through r is a (long) run. 202 ** Extend it as far as possible. 203 */ 204 p = q = r; 205 while (((p += 2) < t) && 206 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; 207 r = p = q + 2; /* no simple pairs, no after-run */ 208 } 209 } 210 if (q > b) { /* run of greater than 2 at b */ 211 gptr *savep = p; 212 213 p = q += 2; 214 /* pick up singleton, if possible */ 215 if ((p == t) && 216 ((t + 1) == last) && 217 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) 218 savep = r = p = q = last; 219 p2 = NEXT(p2) = p2 + (p - b); ++runs; 220 if (sense) 221 while (b < --p) { 222 const gptr c = *b; 223 *b++ = *p; 224 *p = c; 225 } 226 p = savep; 227 } 228 while (q < p) { /* simple pairs */ 229 p2 = NEXT(p2) = p2 + 2; ++runs; 230 if (sense) { 231 const gptr c = *q++; 232 *(q-1) = *q; 233 *q++ = c; 234 } else q += 2; 235 } 236 if (((b = p) == t) && ((t+1) == last)) { 237 NEXT(p2) = p2 + 1; ++runs; 238 b++; 239 } 240 q = r; 241 } while (b < t); 242 sense = !sense; 243 } 244 return runs; 245 } 246 247 248 /* The original merge sort, in use since 5.7, was as fast as, or faster than, 249 * qsort on many platforms, but slower than qsort, conspicuously so, 250 * on others. The most likely explanation was platform-specific 251 * differences in cache sizes and relative speeds. 252 * 253 * The quicksort divide-and-conquer algorithm guarantees that, as the 254 * problem is subdivided into smaller and smaller parts, the parts 255 * fit into smaller (and faster) caches. So it doesn't matter how 256 * many levels of cache exist, quicksort will "find" them, and, 257 * as long as smaller is faster, take advantage of them. 258 * 259 * By contrast, consider how the original mergesort algorithm worked. 260 * Suppose we have five runs (each typically of length 2 after dynprep). 261 * 262 * pass base aux 263 * 0 1 2 3 4 5 264 * 1 12 34 5 265 * 2 1234 5 266 * 3 12345 267 * 4 12345 268 * 269 * Adjacent pairs are merged in "grand sweeps" through the input. 270 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until 271 * runs 3 and 4 are merged and the runs from run 5 have been copied. 272 * The only cache that matters is one large enough to hold *all* the input. 273 * On some platforms, this may be many times slower than smaller caches. 274 * 275 * The following pseudo-code uses the same basic merge algorithm, 276 * but in a divide-and-conquer way. 277 * 278 * # merge $runs runs at offset $offset of list $list1 into $list2. 279 * # all unmerged runs ($runs == 1) originate in list $base. 280 * sub mgsort2 { 281 * my ($offset, $runs, $base, $list1, $list2) = @_; 282 * 283 * if ($runs == 1) { 284 * if ($list1 is $base) copy run to $list2 285 * return offset of end of list (or copy) 286 * } else { 287 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) 288 * mgsort2($off2, $runs/2, $base, $list2, $list1) 289 * merge the adjacent runs at $offset of $list1 into $list2 290 * return the offset of the end of the merged runs 291 * } 292 * } 293 * mgsort2(0, $runs, $base, $aux, $base); 294 * 295 * For our 5 runs, the tree of calls looks like 296 * 297 * 5 298 * 3 2 299 * 2 1 1 1 300 * 1 1 301 * 302 * 1 2 3 4 5 303 * 304 * and the corresponding activity looks like 305 * 306 * copy runs 1 and 2 from base to aux 307 * merge runs 1 and 2 from aux to base 308 * (run 3 is where it belongs, no copy needed) 309 * merge runs 12 and 3 from base to aux 310 * (runs 4 and 5 are where they belong, no copy needed) 311 * merge runs 4 and 5 from base to aux 312 * merge runs 123 and 45 from aux to base 313 * 314 * Note that we merge runs 1 and 2 immediately after copying them, 315 * while they are still likely to be in fast cache. Similarly, 316 * run 3 is merged with run 12 while it still may be lingering in cache. 317 * This implementation should therefore enjoy much of the cache-friendly 318 * behavior that quicksort does. In addition, it does less copying 319 * than the original mergesort implementation (only runs 1 and 2 are copied) 320 * and the "balancing" of merges is better (merged runs comprise more nearly 321 * equal numbers of original runs). 322 * 323 * The actual cache-friendly implementation will use a pseudo-stack 324 * to avoid recursion, and will unroll processing of runs of length 2, 325 * but it is otherwise similar to the recursive implementation. 326 */ 327 328 typedef struct { 329 IV offset; /* offset of 1st of 2 runs at this level */ 330 IV runs; /* how many runs must be combined into 1 */ 331 } off_runs; /* pseudo-stack element */ 332 333 PERL_STATIC_FORCE_INLINE void 334 S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 335 { 336 IV i, run, offset; 337 I32 sense, level; 338 gptr *f1, *f2, *t, *b, *p; 339 int iwhich; 340 gptr *aux; 341 gptr *p1; 342 gptr small[SMALLSORT]; 343 gptr *which[3]; 344 off_runs stack[60], *stackp; 345 346 PERL_UNUSED_ARG(flags); 347 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL; 348 if (nmemb <= 1) return; /* sorted trivially */ 349 350 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ 351 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ 352 level = 0; 353 stackp = stack; 354 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); 355 stackp->offset = offset = 0; 356 which[0] = which[2] = base; 357 which[1] = aux; 358 for (;;) { 359 /* On levels where both runs have be constructed (stackp->runs == 0), 360 * merge them, and note the offset of their end, in case the offset 361 * is needed at the next level up. Hop up a level, and, 362 * as long as stackp->runs is 0, keep merging. 363 */ 364 IV runs = stackp->runs; 365 if (runs == 0) { 366 gptr *list1, *list2; 367 iwhich = level & 1; 368 list1 = which[iwhich]; /* area where runs are now */ 369 list2 = which[++iwhich]; /* area for merged runs */ 370 do { 371 gptr *l1, *l2, *tp2; 372 offset = stackp->offset; 373 f1 = p1 = list1 + offset; /* start of first run */ 374 p = tp2 = list2 + offset; /* where merged run will go */ 375 t = NEXT(p); /* where first run ends */ 376 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ 377 t = NEXT(t); /* where second runs ends */ 378 l2 = POTHER(t, list2, list1); /* ... on the other side */ 379 offset = PNELEM(list2, t); 380 while (f1 < l1 && f2 < l2) { 381 /* If head 1 is larger than head 2, find ALL the elements 382 ** in list 2 strictly less than head1, write them all, 383 ** then head 1. Then compare the new heads, and repeat, 384 ** until one or both lists are exhausted. 385 ** 386 ** In all comparisons (after establishing 387 ** which head to merge) the item to merge 388 ** (at pointer q) is the first operand of 389 ** the comparison. When we want to know 390 ** if "q is strictly less than the other", 391 ** we can't just do 392 ** cmp(q, other) < 0 393 ** because stability demands that we treat equality 394 ** as high when q comes from l2, and as low when 395 ** q was from l1. So we ask the question by doing 396 ** cmp(q, other) <= sense 397 ** and make sense == 0 when equality should look low, 398 ** and -1 when equality should look high. 399 */ 400 401 gptr *q; 402 if (cmp(aTHX_ *f1, *f2) <= 0) { 403 q = f2; b = f1; t = l1; 404 sense = -1; 405 } else { 406 q = f1; b = f2; t = l2; 407 sense = 0; 408 } 409 410 411 /* ramp up 412 ** 413 ** Leave t at something strictly 414 ** greater than q (or at the end of the list), 415 ** and b at something strictly less than q. 416 */ 417 for (i = 1, run = 0 ;;) { 418 if ((p = PINDEX(b, i)) >= t) { 419 /* off the end */ 420 if (((p = PINDEX(t, -1)) > b) && 421 (cmp(aTHX_ *q, *p) <= sense)) 422 t = p; 423 else b = p; 424 break; 425 } else if (cmp(aTHX_ *q, *p) <= sense) { 426 t = p; 427 break; 428 } else b = p; 429 if (++run >= RTHRESH) i += i; 430 } 431 432 433 /* q is known to follow b and must be inserted before t. 434 ** Increment b, so the range of possibilities is [b,t). 435 ** Round binary split down, to favor early appearance. 436 ** Adjust b and t until q belongs just before t. 437 */ 438 439 b++; 440 while (b < t) { 441 p = PINDEX(b, (PNELEM(b, t) - 1) / 2); 442 if (cmp(aTHX_ *q, *p) <= sense) { 443 t = p; 444 } else b = p + 1; 445 } 446 447 448 /* Copy all the strictly low elements */ 449 450 if (q == f1) { 451 FROMTOUPTO(f2, tp2, t); 452 *tp2++ = *f1++; 453 } else { 454 FROMTOUPTO(f1, tp2, t); 455 *tp2++ = *f2++; 456 } 457 } 458 459 460 /* Run out remaining list */ 461 if (f1 == l1) { 462 if (f2 < l2) FROMTOUPTO(f2, tp2, l2); 463 } else FROMTOUPTO(f1, tp2, l1); 464 p1 = NEXT(p1) = POTHER(tp2, list2, list1); 465 466 if (--level == 0) goto done; 467 --stackp; 468 t = list1; list1 = list2; list2 = t; /* swap lists */ 469 } while ((runs = stackp->runs) == 0); 470 } 471 472 473 stackp->runs = 0; /* current run will finish level */ 474 /* While there are more than 2 runs remaining, 475 * turn them into exactly 2 runs (at the "other" level), 476 * each made up of approximately half the runs. 477 * Stack the second half for later processing, 478 * and set about producing the first half now. 479 */ 480 while (runs > 2) { 481 ++level; 482 ++stackp; 483 stackp->offset = offset; 484 runs -= stackp->runs = runs / 2; 485 } 486 /* We must construct a single run from 1 or 2 runs. 487 * All the original runs are in which[0] == base. 488 * The run we construct must end up in which[level&1]. 489 */ 490 iwhich = level & 1; 491 if (runs == 1) { 492 /* Constructing a single run from a single run. 493 * If it's where it belongs already, there's nothing to do. 494 * Otherwise, copy it to where it belongs. 495 * A run of 1 is either a singleton at level 0, 496 * or the second half of a split 3. In neither event 497 * is it necessary to set offset. It will be set by the merge 498 * that immediately follows. 499 */ 500 if (iwhich) { /* Belongs in aux, currently in base */ 501 f1 = b = PINDEX(base, offset); /* where list starts */ 502 f2 = PINDEX(aux, offset); /* where list goes */ 503 t = NEXT(f2); /* where list will end */ 504 offset = PNELEM(aux, t); /* offset thereof */ 505 t = PINDEX(base, offset); /* where it currently ends */ 506 FROMTOUPTO(f1, f2, t); /* copy */ 507 NEXT(b) = t; /* set up parallel pointer */ 508 } else if (level == 0) goto done; /* single run at level 0 */ 509 } else { 510 /* Constructing a single run from two runs. 511 * The merge code at the top will do that. 512 * We need only make sure the two runs are in the "other" array, 513 * so they'll end up in the correct array after the merge. 514 */ 515 ++level; 516 ++stackp; 517 stackp->offset = offset; 518 stackp->runs = 0; /* take care of both runs, trigger merge */ 519 if (!iwhich) { /* Merged runs belong in aux, copy 1st */ 520 f1 = b = PINDEX(base, offset); /* where first run starts */ 521 f2 = PINDEX(aux, offset); /* where it will be copied */ 522 t = NEXT(f2); /* where first run will end */ 523 offset = PNELEM(aux, t); /* offset thereof */ 524 p = PINDEX(base, offset); /* end of first run */ 525 t = NEXT(t); /* where second run will end */ 526 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ 527 FROMTOUPTO(f1, f2, t); /* copy both runs */ 528 NEXT(b) = p; /* paralleled pointer for 1st */ 529 NEXT(p) = t; /* ... and for second */ 530 } 531 } 532 } 533 done: 534 if (aux != small) Safefree(aux); /* free iff allocated */ 535 536 return; 537 } 538 539 /* 540 =for apidoc sortsv_flags 541 542 In-place sort an array of SV pointers with the given comparison routine, 543 with various SORTf_* flag options. 544 545 =cut 546 */ 547 void 548 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 549 { 550 PERL_ARGS_ASSERT_SORTSV_FLAGS; 551 552 sortsv_flags_impl(base, nmemb, cmp, flags); 553 } 554 555 /* 556 * Each of sortsv_* functions contains an inlined copy of 557 * sortsv_flags_impl() with an inlined comparator. Basically, we are 558 * emulating C++ templates by using __attribute__((always_inline)). 559 * 560 * The purpose of that is to avoid the function call overhead inside 561 * the sorting routine, which calls the comparison function multiple 562 * times per sorted item. 563 */ 564 565 static void 566 sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 567 { 568 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags); 569 } 570 571 static void 572 sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 573 { 574 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags); 575 } 576 577 static void 578 sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 579 { 580 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags); 581 } 582 583 static void 584 sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 585 { 586 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags); 587 } 588 589 static void 590 sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 591 { 592 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags); 593 } 594 595 static void 596 sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 597 { 598 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags); 599 } 600 601 static void 602 sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 603 { 604 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags); 605 } 606 607 static void 608 sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 609 { 610 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags); 611 } 612 613 static void 614 sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 615 { 616 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags); 617 } 618 619 static void 620 sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 621 { 622 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags); 623 } 624 625 static void 626 sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 627 { 628 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags); 629 } 630 631 static void 632 sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 633 { 634 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags); 635 } 636 637 #ifdef USE_LOCALE_COLLATE 638 639 static void 640 sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 641 { 642 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags); 643 } 644 645 static void 646 sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 647 { 648 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags); 649 } 650 651 static void 652 sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 653 { 654 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags); 655 } 656 657 static void 658 sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 659 { 660 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags); 661 } 662 663 #endif 664 665 /* 666 667 =for apidoc sortsv 668 669 In-place sort an array of SV pointers with the given comparison routine. 670 671 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more 672 flexible routine. 673 674 =cut 675 */ 676 677 void 678 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) 679 { 680 PERL_ARGS_ASSERT_SORTSV; 681 682 sortsv_flags(array, nmemb, cmp, 0); 683 } 684 685 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) 686 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) 687 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) 688 689 PP(pp_sort) 690 { 691 dMARK; dORIGMARK; 692 SV **p1 = ORIGMARK+1, **p2; 693 SSize_t max, i; 694 AV* av = NULL; 695 GV *gv; 696 CV *cv = NULL; 697 U8 gimme = GIMME_V; 698 OP* const nextop = PL_op->op_next; 699 I32 overloading = 0; 700 bool hasargs = FALSE; /* the sort sub has proto($$)? */ 701 bool copytmps; 702 I32 is_xsub = 0; 703 const U8 priv = PL_op->op_private; 704 const U8 flags = PL_op->op_flags; 705 U32 sort_flags = 0; 706 I32 all_SIVs = 1, descending = 0; 707 708 if ((priv & OPpSORT_DESCEND) != 0) 709 descending = 1; 710 711 if (gimme != G_LIST) { 712 rpp_popfree_to_NN(mark); 713 rpp_xpush_IMM(&PL_sv_undef); 714 return NORMAL; 715 } 716 717 ENTER; 718 SAVEVPTR(PL_sortcop); 719 720 /* Important flag meanings: 721 * 722 * OPf_STACKED sort <function_name> args 723 * 724 * (OPf_STACKED 725 * |OPf_SPECIAL) sort { <block> } args 726 * 727 * ---- standard block; e.g. sort { $a <=> $b } args 728 * 729 * 730 * OPpSORT_NUMERIC { $a <=> $b } (as opposed to $a cmp $b) 731 * OPpSORT_INTEGER ditto in scope of 'use integer' 732 * OPpSORT_DESCEND { $b <=> $a } 733 * OPpSORT_REVERSE @a= reverse sort ....; 734 * OPpSORT_INPLACE @a = sort @a; 735 */ 736 737 if (flags & OPf_STACKED) { 738 if (flags & OPf_SPECIAL) { 739 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */ 740 assert(nullop->op_type == OP_NULL); 741 PL_sortcop = nullop->op_next; 742 } 743 else { 744 /* sort <function_name> list */ 745 GV *autogv = NULL; 746 HV *stash; 747 SV *fn = *++MARK; 748 cv = sv_2cv(fn, &stash, &gv, GV_ADD); 749 750 /* want to remove the function name from the stack, 751 * but mustn't trigger cv being freed at the same time. 752 * Normally the name is a PV while cv is CV (duh!) but 753 * for lexical subs, fn can already be the CV (but is kept 754 * alive by a reference from the pad */ 755 #ifdef PERL_RC_STACK 756 assert(fn != (SV*)cv || SvREFCNT(fn) > 1); 757 SvREFCNT_dec(fn); 758 #endif 759 *MARK = NULL; 760 761 check_cv: 762 if (cv && SvPOK(cv)) { 763 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); 764 if (proto && strEQ(proto, "$$")) { 765 hasargs = TRUE; 766 } 767 } 768 if (cv && CvISXSUB(cv) && CvXSUB(cv)) { 769 is_xsub = 1; 770 } 771 else if (!(cv && CvROOT(cv))) { 772 if (gv) { 773 goto autoload; 774 } 775 else if (!CvANON(cv) && (gv = CvGV(cv))) { 776 if (cv != GvCV(gv)) cv = GvCV(gv); 777 autoload: 778 if (!autogv && ( 779 autogv = gv_autoload_pvn( 780 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), 781 GvNAMEUTF8(gv) ? SVf_UTF8 : 0 782 ) 783 )) { 784 cv = GvCVu(autogv); 785 goto check_cv; 786 } 787 else { 788 SV *tmpstr = sv_newmortal(); 789 gv_efullname3(tmpstr, gv, NULL); 790 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called", 791 SVfARG(tmpstr)); 792 } 793 } 794 else { 795 DIE(aTHX_ "Undefined subroutine in sort"); 796 } 797 } 798 799 if (is_xsub) 800 PL_sortcop = (OP*)cv; 801 else 802 PL_sortcop = CvSTART(cv); 803 } 804 } 805 else { 806 PL_sortcop = NULL; 807 } 808 809 /* optimiser converts "@a = sort @a" to "sort \@a". In this case, 810 * push (@a) onto stack, then assign result back to @a at the end of 811 * this function */ 812 if (priv & OPpSORT_INPLACE) { 813 assert( MARK+1 == PL_stack_sp 814 && *PL_stack_sp 815 && SvTYPE(*PL_stack_sp) == SVt_PVAV); 816 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ 817 av = MUTABLE_AV((*PL_stack_sp)); 818 if (SvREADONLY(av)) 819 Perl_croak_no_modify(); 820 max = AvFILL(av) + 1; 821 822 I32 oldmark = MARK - PL_stack_base; 823 rpp_extend(max); 824 MARK = PL_stack_base + oldmark; 825 826 if (SvMAGICAL(av)) { 827 for (i=0; i < max; i++) { 828 SV **svp = av_fetch(av, i, FALSE); 829 SV *sv; 830 if (svp) { 831 sv = *svp; 832 #ifdef PERL_RC_STACK 833 SvREFCNT_inc_simple_void_NN(sv); 834 #endif 835 } 836 else 837 sv = NULL; 838 *++PL_stack_sp = sv; 839 } 840 } 841 else { 842 SV **svp = AvARRAY(av); 843 assert(svp || max == 0); 844 for (i = 0; i < max; i++) { 845 SV *sv = *svp++; 846 #ifdef PERL_RC_STACK 847 SvREFCNT_inc_simple_void(sv); 848 #endif 849 *++PL_stack_sp = sv; 850 } 851 } 852 p1 = p2 = PL_stack_sp - (max-1); 853 /* we've kept av on the stacck (just below the pushed contents) so 854 * that a reference-counted stack keeps a reference to it for now 855 */ 856 assert((SV*)av == p1[-1]); 857 } 858 else { 859 p2 = MARK+1; 860 max = PL_stack_sp - MARK; 861 } 862 863 /* shuffle stack down, removing optional initial cv (p1!=p2), plus 864 * any nulls; also stringify or converting to integer or number as 865 * required any args */ 866 867 /* no ref-counted SVs at base to be overwritten */ 868 assert(p1 == p2 || (p1+1 == p2 && !*p1)); 869 870 copytmps = cBOOL(PL_sortcop); 871 for (i=max; i > 0 ; i--) { 872 SV *sv = *p2++; 873 if (sv) { /* Weed out nulls. */ 874 if (copytmps && SvPADTMP(sv)) { 875 SV *nsv = sv_mortalcopy(sv); 876 #ifdef PERL_RC_STACK 877 SvREFCNT_dec_NN(sv); 878 SvREFCNT_inc_simple_void_NN(nsv); 879 #endif 880 sv = nsv; 881 } 882 SvTEMP_off(sv); 883 if (!PL_sortcop) { 884 if (priv & OPpSORT_NUMERIC) { 885 if (priv & OPpSORT_INTEGER) { 886 if (!SvIOK(sv)) 887 (void)sv_2iv_flags(sv, SV_GMAGIC|SV_SKIP_OVERLOAD); 888 } 889 else { 890 if (!SvNSIOK(sv)) 891 (void)sv_2nv_flags(sv, SV_GMAGIC|SV_SKIP_OVERLOAD); 892 if (all_SIVs && !SvSIOK(sv)) 893 all_SIVs = 0; 894 } 895 } 896 else { 897 if (!SvPOK(sv)) 898 (void)sv_2pv_flags(sv, 0, 899 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); 900 } 901 if (SvAMAGIC(sv)) 902 overloading = 1; 903 } 904 *p1++ = sv; 905 } 906 else 907 max--; 908 } 909 910 if (max > 1) { 911 SV **start; 912 if (PL_sortcop) { 913 PERL_CONTEXT *cx; 914 const bool oldcatch = CATCH_GET; 915 I32 old_savestack_ix = PL_savestack_ix; 916 917 SAVEOP(); 918 919 CATCH_SET(TRUE); 920 push_stackinfo(PERLSI_SORT, 1); 921 922 if (!hasargs && !is_xsub) { 923 /* standard perl sub with values passed as $a and $b */ 924 SAVEGENERICSV(PL_firstgv); 925 SAVEGENERICSV(PL_secondgv); 926 PL_firstgv = MUTABLE_GV(SvREFCNT_inc( 927 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV) 928 )); 929 PL_secondgv = MUTABLE_GV(SvREFCNT_inc( 930 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV) 931 )); 932 /* make sure the GP isn't removed out from under us for 933 * the SAVESPTR() */ 934 save_gp(PL_firstgv, 0); 935 save_gp(PL_secondgv, 0); 936 /* we don't want modifications localized */ 937 GvINTRO_off(PL_firstgv); 938 GvINTRO_off(PL_secondgv); 939 SAVEGENERICSV(GvSV(PL_firstgv)); 940 SvREFCNT_inc(GvSV(PL_firstgv)); 941 SAVEGENERICSV(GvSV(PL_secondgv)); 942 SvREFCNT_inc(GvSV(PL_secondgv)); 943 } 944 945 gimme = G_SCALAR; 946 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix); 947 if (!(flags & OPf_SPECIAL)) { 948 cx->cx_type = CXt_SUB|CXp_MULTICALL; 949 cx_pushsub(cx, cv, NULL, hasargs); 950 if (!is_xsub) { 951 PADLIST * const padlist = CvPADLIST(cv); 952 953 if (++CvDEPTH(cv) >= 2) 954 pad_push(padlist, CvDEPTH(cv)); 955 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); 956 957 if (hasargs) { 958 /* This is mostly copied from pp_entersub */ 959 AV * const av0 = MUTABLE_AV(PAD_SVl(0)); 960 961 cx->blk_sub.savearray = GvAV(PL_defgv); 962 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av0)); 963 } 964 965 } 966 } 967 968 start = p1 - max; 969 Perl_sortsv_flags(aTHX_ start, max, 970 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), 971 sort_flags); 972 973 /* Reset cx, in case the context stack has been reallocated. */ 974 cx = CX_CUR(); 975 976 /* the code used to think this could be > 0 */ 977 assert(cx->blk_oldsp == 0); 978 979 rpp_popfree_to_NN(PL_stack_base); 980 981 CX_LEAVE_SCOPE(cx); 982 if (!(flags & OPf_SPECIAL)) { 983 assert(CxTYPE(cx) == CXt_SUB); 984 cx_popsub(cx); 985 } 986 else 987 assert(CxTYPE(cx) == CXt_NULL); 988 /* there isn't a POPNULL ! */ 989 990 cx_popblock(cx); 991 CX_POP(cx); 992 pop_stackinfo(); 993 CATCH_SET(oldcatch); 994 } 995 else { 996 /* call one of the built-in sort functions */ 997 998 /* XXX this extend has been here since perl5.000. With safe 999 * signals, I don't think it's needed any more - DAPM. 1000 MEXTEND(SP, 20); Can't afford stack realloc on signal. 1001 */ 1002 start = p1 - max; 1003 if (priv & OPpSORT_NUMERIC) { 1004 if ((priv & OPpSORT_INTEGER) || all_SIVs) { 1005 if (overloading) 1006 if (descending) 1007 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags); 1008 else 1009 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags); 1010 else 1011 if (descending) 1012 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags); 1013 else 1014 sortsv_i_ncmp(aTHX_ start, max, sort_flags); 1015 } 1016 else { 1017 if (overloading) 1018 if (descending) 1019 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags); 1020 else 1021 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags); 1022 else 1023 if (descending) 1024 sortsv_ncmp_desc(aTHX_ start, max, sort_flags); 1025 else 1026 sortsv_ncmp(aTHX_ start, max, sort_flags); 1027 } 1028 } 1029 #ifdef USE_LOCALE_COLLATE 1030 else if(IN_LC_RUNTIME(LC_COLLATE)) { 1031 if (overloading) 1032 if (descending) 1033 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags); 1034 else 1035 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags); 1036 else 1037 if (descending) 1038 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags); 1039 else 1040 sortsv_cmp_locale(aTHX_ start, max, sort_flags); 1041 } 1042 #endif 1043 else { 1044 if (overloading) 1045 if (descending) 1046 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags); 1047 else 1048 sortsv_amagic_cmp(aTHX_ start, max, sort_flags); 1049 else 1050 if (descending) 1051 sortsv_cmp_desc(aTHX_ start, max, sort_flags); 1052 else 1053 sortsv_cmp(aTHX_ start, max, sort_flags); 1054 } 1055 } 1056 if ((priv & OPpSORT_REVERSE) != 0) { 1057 SV **q = start+max-1; 1058 while (start < q) { 1059 SV * const tmp = *start; 1060 *start++ = *q; 1061 *q-- = tmp; 1062 } 1063 } 1064 } 1065 1066 if (!av) { 1067 LEAVE; 1068 PL_stack_sp = ORIGMARK + max; 1069 return nextop; 1070 } 1071 1072 /* OPpSORT_INPLACE: copy back result to the array */ 1073 { 1074 SV** const base = MARK+2; 1075 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */ 1076 1077 /* we left the AV there so on a refcounted stack it wouldn't be 1078 * prematurely freed */ 1079 assert(base[-1] == (SV*)av); 1080 1081 if (SvMAGICAL(av)) { 1082 for (i = 0; i <= max_minus_one; i++) { 1083 SV *sv = base[i]; 1084 base[i] = newSVsv(sv); 1085 #ifdef PERL_RC_STACK 1086 SvREFCNT_dec_NN(sv); 1087 #endif 1088 } 1089 av_clear(av); 1090 if (max_minus_one >= 0) 1091 av_extend(av, max_minus_one); 1092 for (i=0; i <= max_minus_one; i++) { 1093 SV * const sv = base[i]; 1094 SV ** const didstore = av_store(av, i, sv); 1095 if (SvSMAGICAL(sv)) 1096 mg_set(sv); 1097 #ifdef PERL_RC_STACK 1098 if (didstore) 1099 SvREFCNT_inc_simple_void_NN(sv); 1100 #else 1101 if (!didstore) 1102 sv_2mortal(sv); 1103 #endif 1104 } 1105 } 1106 else { 1107 /* the elements of av are likely to be the same as the 1108 * (non-refcounted) elements on the stack, just in a different 1109 * order. However, its possible that someone's messed with av 1110 * in the meantime. 1111 * So to avoid freeing most/all the stack elements when 1112 * doing av_clear(), first bump the count on each element. 1113 * In addition, normally a *copy* of each sv should be 1114 * assigned to each array element; but if the only reference 1115 * to that sv was from the array, then we can skip the copy. 1116 * 1117 * For a refcounted stack, it's not necessary to bump the 1118 * refcounts initially, as the stack itself keeps the 1119 * elements alive during av_clear(). 1120 * 1121 */ 1122 for (i = 0; i <= max_minus_one; i++) { 1123 SV *sv = base[i]; 1124 assert(sv); 1125 #ifdef PERL_RC_STACK 1126 if (SvREFCNT(sv) > 2) { 1127 base[i] = newSVsv(sv); 1128 SvREFCNT_dec_NN(sv); 1129 } 1130 #else 1131 if (SvREFCNT(sv) > 1) 1132 base[i] = newSVsv(sv); 1133 else 1134 SvREFCNT_inc_simple_void_NN(sv); 1135 #endif 1136 } 1137 av_clear(av); 1138 if (max_minus_one >= 0) { 1139 av_extend(av, max_minus_one); 1140 Copy(base, AvARRAY(av), max, SV*); 1141 } 1142 AvFILLp(av) = max_minus_one; 1143 AvREIFY_off(av); 1144 AvREAL_on(av); 1145 } 1146 /* sort is only ever optimised with OPpSORT_INPLACE when the 1147 * (@a = sort @a) is in void context. (As an aside: the context 1148 * flag aught to be copied to the sort op: then we could assert 1149 * here that it's void). 1150 * Thus we can simply discard the stack elements now: their 1151 * reference counts have already claimed by av - hence not using 1152 * rpp_popfree_to() here. 1153 */ 1154 PL_stack_sp = ORIGMARK; 1155 #ifdef PERL_RC_STACK 1156 SvREFCNT_dec_NN(av); 1157 #endif 1158 LEAVE; 1159 return nextop; 1160 } 1161 } 1162 1163 1164 /* call a traditional perl compare function, setting $a and $b */ 1165 1166 static I32 1167 S_sortcv(pTHX_ SV *const a, SV *const b) 1168 { 1169 const I32 oldsaveix = PL_savestack_ix; 1170 I32 result; 1171 PMOP * const pm = PL_curpm; 1172 COP * const cop = PL_curcop; 1173 SV *olda, *oldb; 1174 1175 PERL_ARGS_ASSERT_SORTCV; 1176 1177 #ifdef PERL_RC_STACK 1178 assert(rpp_stack_is_rc()); 1179 #endif 1180 1181 olda = GvSV(PL_firstgv); 1182 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a); 1183 SvREFCNT_dec(olda); 1184 oldb = GvSV(PL_secondgv); 1185 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b); 1186 SvREFCNT_dec(oldb); 1187 assert(PL_stack_sp == PL_stack_base); 1188 PL_op = PL_sortcop; 1189 CALLRUNOPS(aTHX); 1190 PL_curcop = cop; 1191 /* entry zero of a stack is always PL_sv_undef, which 1192 * simplifies converting a '()' return into undef in scalar context */ 1193 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1194 result = SvIV(*PL_stack_sp); 1195 rpp_popfree_to_NN(PL_stack_base); 1196 1197 LEAVE_SCOPE(oldsaveix); 1198 PL_curpm = pm; 1199 return result; 1200 } 1201 1202 1203 /* call a perl compare function that has a ($$) prototype, setting @_ */ 1204 1205 static I32 1206 S_sortcv_stacked(pTHX_ SV *const a, SV *const b) 1207 { 1208 const I32 oldsaveix = PL_savestack_ix; 1209 I32 result; 1210 AV * const av = GvAV(PL_defgv); 1211 PMOP * const pm = PL_curpm; 1212 COP * const cop = PL_curcop; 1213 1214 PERL_ARGS_ASSERT_SORTCV_STACKED; 1215 1216 #ifdef PERL_RC_STACK 1217 assert(rpp_stack_is_rc()); 1218 #endif 1219 1220 #ifdef PERL_RC_STACK 1221 assert(AvREAL(av)); 1222 av_clear(av); 1223 #else 1224 if (AvREAL(av)) { 1225 av_clear(av); 1226 AvREAL_off(av); 1227 AvREIFY_on(av); 1228 } 1229 #endif 1230 1231 if (AvMAX(av) < 1) { 1232 SV **ary = AvALLOC(av); 1233 if (AvARRAY(av) != ary) { 1234 AvMAX(av) += AvARRAY(av) - AvALLOC(av); 1235 AvARRAY(av) = ary; 1236 } 1237 if (AvMAX(av) < 1) { 1238 Renew(ary,2,SV*); 1239 AvMAX(av) = 1; 1240 AvARRAY(av) = ary; 1241 AvALLOC(av) = ary; 1242 } 1243 } 1244 AvFILLp(av) = 1; 1245 1246 AvARRAY(av)[0] = a; 1247 AvARRAY(av)[1] = b; 1248 #ifdef PERL_RC_STACK 1249 SvREFCNT_inc_simple_void_NN(a); 1250 SvREFCNT_inc_simple_void_NN(b); 1251 #endif 1252 assert(PL_stack_sp == PL_stack_base); 1253 PL_op = PL_sortcop; 1254 CALLRUNOPS(aTHX); 1255 PL_curcop = cop; 1256 /* entry zero of a stack is always PL_sv_undef, which 1257 * simplifies converting a '()' return into undef in scalar context */ 1258 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1259 result = SvIV(*PL_stack_sp); 1260 rpp_popfree_to_NN(PL_stack_base); 1261 1262 LEAVE_SCOPE(oldsaveix); 1263 PL_curpm = pm; 1264 return result; 1265 } 1266 1267 1268 /* call an XS compare function. (The two args are always passed on the 1269 * stack, regardless of whether it has a ($$) prototype or not.) */ 1270 1271 static I32 1272 S_sortcv_xsub(pTHX_ SV *const a, SV *const b) 1273 { 1274 const I32 oldsaveix = PL_savestack_ix; 1275 CV * const cv=MUTABLE_CV(PL_sortcop); 1276 I32 result; 1277 PMOP * const pm = PL_curpm; 1278 1279 PERL_ARGS_ASSERT_SORTCV_XSUB; 1280 1281 #ifdef PERL_RC_STACK 1282 assert(rpp_stack_is_rc()); 1283 #endif 1284 1285 assert(PL_stack_sp == PL_stack_base); 1286 PUSHMARK(PL_stack_sp); 1287 rpp_xpush_2(a, b); 1288 1289 rpp_invoke_xs(cv); 1290 1291 /* entry zero of a stack is always PL_sv_undef, which 1292 * simplifies converting a '()' return into undef in scalar context */ 1293 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1294 result = SvIV(*PL_stack_sp); 1295 rpp_popfree_to_NN(PL_stack_base); 1296 1297 LEAVE_SCOPE(oldsaveix); 1298 PL_curpm = pm; 1299 return result; 1300 } 1301 1302 1303 PERL_STATIC_FORCE_INLINE I32 1304 S_sv_ncmp(pTHX_ SV *const a, SV *const b) 1305 { 1306 I32 cmp = do_ncmp(a, b); 1307 1308 PERL_ARGS_ASSERT_SV_NCMP; 1309 1310 if (cmp == 2) { 1311 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); 1312 return 0; 1313 } 1314 1315 return cmp; 1316 } 1317 1318 PERL_STATIC_FORCE_INLINE I32 1319 S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b) 1320 { 1321 PERL_ARGS_ASSERT_SV_NCMP_DESC; 1322 1323 return -S_sv_ncmp(aTHX_ a, b); 1324 } 1325 1326 PERL_STATIC_FORCE_INLINE I32 1327 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) 1328 { 1329 const IV iv1 = SvIV(a); 1330 const IV iv2 = SvIV(b); 1331 1332 PERL_ARGS_ASSERT_SV_I_NCMP; 1333 1334 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; 1335 } 1336 1337 PERL_STATIC_FORCE_INLINE I32 1338 S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1339 { 1340 PERL_ARGS_ASSERT_SV_I_NCMP_DESC; 1341 1342 return -S_sv_i_ncmp(aTHX_ a, b); 1343 } 1344 1345 #define tryCALL_AMAGICbin(left,right,meth) \ 1346 (SvAMAGIC(left)||SvAMAGIC(right)) \ 1347 ? amagic_call(left, right, meth, 0) \ 1348 : NULL; 1349 1350 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) 1351 1352 PERL_STATIC_FORCE_INLINE I32 1353 S_amagic_ncmp(pTHX_ SV *const a, SV *const b) 1354 { 1355 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1356 1357 PERL_ARGS_ASSERT_AMAGIC_NCMP; 1358 1359 if (tmpsv) { 1360 if (SvIOK(tmpsv)) { 1361 const I32 i = SvIVX(tmpsv); 1362 return SORT_NORMAL_RETURN_VALUE(i); 1363 } 1364 else { 1365 const NV d = SvNV(tmpsv); 1366 return SORT_NORMAL_RETURN_VALUE(d); 1367 } 1368 } 1369 return S_sv_ncmp(aTHX_ a, b); 1370 } 1371 1372 PERL_STATIC_FORCE_INLINE I32 1373 S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b) 1374 { 1375 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC; 1376 1377 return -S_amagic_ncmp(aTHX_ a, b); 1378 } 1379 1380 PERL_STATIC_FORCE_INLINE I32 1381 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) 1382 { 1383 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1384 1385 PERL_ARGS_ASSERT_AMAGIC_I_NCMP; 1386 1387 if (tmpsv) { 1388 if (SvIOK(tmpsv)) { 1389 const I32 i = SvIVX(tmpsv); 1390 return SORT_NORMAL_RETURN_VALUE(i); 1391 } 1392 else { 1393 const NV d = SvNV(tmpsv); 1394 return SORT_NORMAL_RETURN_VALUE(d); 1395 } 1396 } 1397 return S_sv_i_ncmp(aTHX_ a, b); 1398 } 1399 1400 PERL_STATIC_FORCE_INLINE I32 1401 S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1402 { 1403 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC; 1404 1405 return -S_amagic_i_ncmp(aTHX_ a, b); 1406 } 1407 1408 PERL_STATIC_FORCE_INLINE I32 1409 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) 1410 { 1411 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1412 1413 PERL_ARGS_ASSERT_AMAGIC_CMP; 1414 1415 if (tmpsv) { 1416 if (SvIOK(tmpsv)) { 1417 const I32 i = SvIVX(tmpsv); 1418 return SORT_NORMAL_RETURN_VALUE(i); 1419 } 1420 else { 1421 const NV d = SvNV(tmpsv); 1422 return SORT_NORMAL_RETURN_VALUE(d); 1423 } 1424 } 1425 return sv_cmp(str1, str2); 1426 } 1427 1428 PERL_STATIC_FORCE_INLINE I32 1429 S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1430 { 1431 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC; 1432 1433 return -S_amagic_cmp(aTHX_ str1, str2); 1434 } 1435 1436 PERL_STATIC_FORCE_INLINE I32 1437 S_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1438 { 1439 PERL_ARGS_ASSERT_CMP_DESC; 1440 1441 return -sv_cmp(str1, str2); 1442 } 1443 1444 #ifdef USE_LOCALE_COLLATE 1445 1446 PERL_STATIC_FORCE_INLINE I32 1447 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) 1448 { 1449 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1450 1451 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; 1452 1453 if (tmpsv) { 1454 if (SvIOK(tmpsv)) { 1455 const I32 i = SvIVX(tmpsv); 1456 return SORT_NORMAL_RETURN_VALUE(i); 1457 } 1458 else { 1459 const NV d = SvNV(tmpsv); 1460 return SORT_NORMAL_RETURN_VALUE(d); 1461 } 1462 } 1463 return sv_cmp_locale(str1, str2); 1464 } 1465 1466 PERL_STATIC_FORCE_INLINE I32 1467 S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1468 { 1469 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC; 1470 1471 return -S_amagic_cmp_locale(aTHX_ str1, str2); 1472 } 1473 1474 PERL_STATIC_FORCE_INLINE I32 1475 S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1476 { 1477 PERL_ARGS_ASSERT_CMP_LOCALE_DESC; 1478 1479 return -sv_cmp_locale(str1, str2); 1480 } 1481 1482 #endif 1483 1484 /* 1485 * ex: set ts=8 sts=4 sw=4 et: 1486 */ 1487