1 /* pp_sort.c 2 * 3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others 5 * 6 * You may distribute under the terms of either the GNU General Public 7 * License or the Artistic License, as specified in the README file. 8 * 9 */ 10 11 /* 12 * ...they shuffled back towards the rear of the line. 'No, not at the 13 * rear!' the slave-driver shouted. 'Three files up. And stay there... 14 * 15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] 16 */ 17 18 /* This file contains pp ("push/pop") functions that 19 * execute the opcodes that make up a perl program. A typical pp function 20 * expects to find its arguments on the stack, and usually pushes its 21 * results onto the stack, hence the 'pp' terminology. Each OP structure 22 * contains a pointer to the relevant pp_foo() function. 23 * 24 * This particular file just contains pp_sort(), which is complex 25 * enough to merit its own file! See the other pp*.c files for the rest of 26 * the pp_ functions. 27 */ 28 29 #include "EXTERN.h" 30 #define PERL_IN_PP_SORT_C 31 #include "perl.h" 32 33 #ifndef SMALLSORT 34 #define SMALLSORT (200) 35 #endif 36 37 /* 38 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>. 39 * 40 * The original code was written in conjunction with BSD Computer Software 41 * Research Group at University of California, Berkeley. 42 * 43 * See also: "Optimistic Sorting and Information Theoretic Complexity" 44 * Peter McIlroy 45 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), 46 * pp 467-474, Austin, Texas, 25-27 January 1993. 47 * 48 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>. 49 * 50 * The code can be distributed under the same terms as Perl itself. 51 * 52 */ 53 54 55 typedef char * aptr; /* pointer for arithmetic on sizes */ 56 typedef SV * gptr; /* pointers in our lists */ 57 58 /* Binary merge internal sort, with a few special mods 59 ** for the special perl environment it now finds itself in. 60 ** 61 ** Things that were once options have been hotwired 62 ** to values suitable for this use. In particular, we'll always 63 ** initialize looking for natural runs, we'll always produce stable 64 ** output, and we'll always do Peter McIlroy's binary merge. 65 */ 66 67 /* Pointer types for arithmetic and storage and convenience casts */ 68 69 #define APTR(P) ((aptr)(P)) 70 #define GPTP(P) ((gptr *)(P)) 71 #define GPPP(P) ((gptr **)(P)) 72 73 74 /* byte offset from pointer P to (larger) pointer Q */ 75 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) 76 77 #define PSIZE sizeof(gptr) 78 79 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */ 80 81 #ifdef PSHIFT 82 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) 83 #define PNBYTE(N) ((N) << (PSHIFT)) 84 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) 85 #else 86 /* Leave optimization to compiler */ 87 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) 88 #define PNBYTE(N) ((N) * (PSIZE)) 89 #define PINDEX(P, N) (GPTP(P) + (N)) 90 #endif 91 92 /* Pointer into other corresponding to pointer into this */ 93 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) 94 95 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim) 96 97 98 /* Runs are identified by a pointer in the auxiliary list. 99 ** The pointer is at the start of the list, 100 ** and it points to the start of the next list. 101 ** NEXT is used as an lvalue, too. 102 */ 103 104 #define NEXT(P) (*GPPP(P)) 105 106 107 /* PTHRESH is the minimum number of pairs with the same sense to justify 108 ** checking for a run and extending it. Note that PTHRESH counts PAIRS, 109 ** not just elements, so PTHRESH == 8 means a run of 16. 110 */ 111 112 #define PTHRESH (8) 113 114 /* RTHRESH is the number of elements in a run that must compare low 115 ** to the low element from the opposing run before we justify 116 ** doing a binary rampup instead of single stepping. 117 ** In random input, N in a row low should only happen with 118 ** probability 2^(1-N), so we can risk that we are dealing 119 ** with orderly input without paying much when we aren't. 120 */ 121 122 #define RTHRESH (6) 123 124 125 /* 126 ** Overview of algorithm and variables. 127 ** The array of elements at list1 will be organized into runs of length 2, 128 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when 129 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order. 130 ** 131 ** Unless otherwise specified, pair pointers address the first of two elements. 132 ** 133 ** b and b+1 are a pair that compare with sense "sense". 134 ** b is the "bottom" of adjacent pairs that might form a longer run. 135 ** 136 ** p2 parallels b in the list2 array, where runs are defined by 137 ** a pointer chain. 138 ** 139 ** t represents the "top" of the adjacent pairs that might extend 140 ** the run beginning at b. Usually, t addresses a pair 141 ** that compares with opposite sense from (b,b+1). 142 ** However, it may also address a singleton element at the end of list1, 143 ** or it may be equal to "last", the first element beyond list1. 144 ** 145 ** r addresses the Nth pair following b. If this would be beyond t, 146 ** we back it off to t. Only when r is less than t do we consider the 147 ** run long enough to consider checking. 148 ** 149 ** q addresses a pair such that the pairs at b through q already form a run. 150 ** Often, q will equal b, indicating we only are sure of the pair itself. 151 ** However, a search on the previous cycle may have revealed a longer run, 152 ** so q may be greater than b. 153 ** 154 ** p is used to work back from a candidate r, trying to reach q, 155 ** which would mean b through r would be a run. If we discover such a run, 156 ** we start q at r and try to push it further towards t. 157 ** If b through r is NOT a run, we detect the wrong order at (p-1,p). 158 ** In any event, after the check (if any), we have two main cases. 159 ** 160 ** 1) Short run. b <= q < p <= r <= t. 161 ** b through q is a run (perhaps trivial) 162 ** q through p are uninteresting pairs 163 ** p through r is a run 164 ** 165 ** 2) Long run. b < r <= q < t. 166 ** b through q is a run (of length >= 2 * PTHRESH) 167 ** 168 ** Note that degenerate cases are not only possible, but likely. 169 ** For example, if the pair following b compares with opposite sense, 170 ** then b == q < p == r == t. 171 */ 172 173 174 PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__ 175 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) 176 { 177 I32 sense; 178 gptr *b, *p, *q, *t, *p2; 179 gptr *last, *r; 180 IV runs = 0; 181 182 b = list1; 183 last = PINDEX(b, nmemb); 184 sense = (cmp(aTHX_ *b, *(b+1)) > 0); 185 for (p2 = list2; b < last; ) { 186 /* We just started, or just reversed sense. 187 ** Set t at end of pairs with the prevailing sense. 188 */ 189 for (p = b+2, t = p; ++p < last; t = ++p) { 190 if ((cmp(aTHX_ *t, *p) > 0) != sense) break; 191 } 192 q = b; 193 /* Having laid out the playing field, look for long runs */ 194 do { 195 p = r = b + (2 * PTHRESH); 196 if (r >= t) p = r = t; /* too short to care about */ 197 else { 198 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && 199 ((p -= 2) > q)) {} 200 if (p <= q) { 201 /* b through r is a (long) run. 202 ** Extend it as far as possible. 203 */ 204 p = q = r; 205 while (((p += 2) < t) && 206 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; 207 r = p = q + 2; /* no simple pairs, no after-run */ 208 } 209 } 210 if (q > b) { /* run of greater than 2 at b */ 211 gptr *savep = p; 212 213 p = q += 2; 214 /* pick up singleton, if possible */ 215 if ((p == t) && 216 ((t + 1) == last) && 217 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) 218 savep = r = p = q = last; 219 p2 = NEXT(p2) = p2 + (p - b); ++runs; 220 if (sense) 221 while (b < --p) { 222 const gptr c = *b; 223 *b++ = *p; 224 *p = c; 225 } 226 p = savep; 227 } 228 while (q < p) { /* simple pairs */ 229 p2 = NEXT(p2) = p2 + 2; ++runs; 230 if (sense) { 231 const gptr c = *q++; 232 *(q-1) = *q; 233 *q++ = c; 234 } else q += 2; 235 } 236 if (((b = p) == t) && ((t+1) == last)) { 237 NEXT(p2) = p2 + 1; ++runs; 238 b++; 239 } 240 q = r; 241 } while (b < t); 242 sense = !sense; 243 } 244 return runs; 245 } 246 247 248 /* The original merge sort, in use since 5.7, was as fast as, or faster than, 249 * qsort on many platforms, but slower than qsort, conspicuously so, 250 * on others. The most likely explanation was platform-specific 251 * differences in cache sizes and relative speeds. 252 * 253 * The quicksort divide-and-conquer algorithm guarantees that, as the 254 * problem is subdivided into smaller and smaller parts, the parts 255 * fit into smaller (and faster) caches. So it doesn't matter how 256 * many levels of cache exist, quicksort will "find" them, and, 257 * as long as smaller is faster, take advantage of them. 258 * 259 * By contrast, consider how the original mergesort algorithm worked. 260 * Suppose we have five runs (each typically of length 2 after dynprep). 261 * 262 * pass base aux 263 * 0 1 2 3 4 5 264 * 1 12 34 5 265 * 2 1234 5 266 * 3 12345 267 * 4 12345 268 * 269 * Adjacent pairs are merged in "grand sweeps" through the input. 270 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until 271 * runs 3 and 4 are merged and the runs from run 5 have been copied. 272 * The only cache that matters is one large enough to hold *all* the input. 273 * On some platforms, this may be many times slower than smaller caches. 274 * 275 * The following pseudo-code uses the same basic merge algorithm, 276 * but in a divide-and-conquer way. 277 * 278 * # merge $runs runs at offset $offset of list $list1 into $list2. 279 * # all unmerged runs ($runs == 1) originate in list $base. 280 * sub mgsort2 { 281 * my ($offset, $runs, $base, $list1, $list2) = @_; 282 * 283 * if ($runs == 1) { 284 * if ($list1 is $base) copy run to $list2 285 * return offset of end of list (or copy) 286 * } else { 287 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) 288 * mgsort2($off2, $runs/2, $base, $list2, $list1) 289 * merge the adjacent runs at $offset of $list1 into $list2 290 * return the offset of the end of the merged runs 291 * } 292 * } 293 * mgsort2(0, $runs, $base, $aux, $base); 294 * 295 * For our 5 runs, the tree of calls looks like 296 * 297 * 5 298 * 3 2 299 * 2 1 1 1 300 * 1 1 301 * 302 * 1 2 3 4 5 303 * 304 * and the corresponding activity looks like 305 * 306 * copy runs 1 and 2 from base to aux 307 * merge runs 1 and 2 from aux to base 308 * (run 3 is where it belongs, no copy needed) 309 * merge runs 12 and 3 from base to aux 310 * (runs 4 and 5 are where they belong, no copy needed) 311 * merge runs 4 and 5 from base to aux 312 * merge runs 123 and 45 from aux to base 313 * 314 * Note that we merge runs 1 and 2 immediately after copying them, 315 * while they are still likely to be in fast cache. Similarly, 316 * run 3 is merged with run 12 while it still may be lingering in cache. 317 * This implementation should therefore enjoy much of the cache-friendly 318 * behavior that quicksort does. In addition, it does less copying 319 * than the original mergesort implementation (only runs 1 and 2 are copied) 320 * and the "balancing" of merges is better (merged runs comprise more nearly 321 * equal numbers of original runs). 322 * 323 * The actual cache-friendly implementation will use a pseudo-stack 324 * to avoid recursion, and will unroll processing of runs of length 2, 325 * but it is otherwise similar to the recursive implementation. 326 */ 327 328 typedef struct { 329 IV offset; /* offset of 1st of 2 runs at this level */ 330 IV runs; /* how many runs must be combined into 1 */ 331 } off_runs; /* pseudo-stack element */ 332 333 PERL_STATIC_FORCE_INLINE void 334 S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 335 { 336 IV i, run, offset; 337 I32 sense, level; 338 gptr *f1, *f2, *t, *b, *p; 339 int iwhich; 340 gptr *aux; 341 gptr *p1; 342 gptr small[SMALLSORT]; 343 gptr *which[3]; 344 off_runs stack[60], *stackp; 345 346 PERL_UNUSED_ARG(flags); 347 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL; 348 if (nmemb <= 1) return; /* sorted trivially */ 349 350 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ 351 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ 352 level = 0; 353 stackp = stack; 354 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); 355 stackp->offset = offset = 0; 356 which[0] = which[2] = base; 357 which[1] = aux; 358 for (;;) { 359 /* On levels where both runs have be constructed (stackp->runs == 0), 360 * merge them, and note the offset of their end, in case the offset 361 * is needed at the next level up. Hop up a level, and, 362 * as long as stackp->runs is 0, keep merging. 363 */ 364 IV runs = stackp->runs; 365 if (runs == 0) { 366 gptr *list1, *list2; 367 iwhich = level & 1; 368 list1 = which[iwhich]; /* area where runs are now */ 369 list2 = which[++iwhich]; /* area for merged runs */ 370 do { 371 gptr *l1, *l2, *tp2; 372 offset = stackp->offset; 373 f1 = p1 = list1 + offset; /* start of first run */ 374 p = tp2 = list2 + offset; /* where merged run will go */ 375 t = NEXT(p); /* where first run ends */ 376 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ 377 t = NEXT(t); /* where second runs ends */ 378 l2 = POTHER(t, list2, list1); /* ... on the other side */ 379 offset = PNELEM(list2, t); 380 while (f1 < l1 && f2 < l2) { 381 /* If head 1 is larger than head 2, find ALL the elements 382 ** in list 2 strictly less than head1, write them all, 383 ** then head 1. Then compare the new heads, and repeat, 384 ** until one or both lists are exhausted. 385 ** 386 ** In all comparisons (after establishing 387 ** which head to merge) the item to merge 388 ** (at pointer q) is the first operand of 389 ** the comparison. When we want to know 390 ** if "q is strictly less than the other", 391 ** we can't just do 392 ** cmp(q, other) < 0 393 ** because stability demands that we treat equality 394 ** as high when q comes from l2, and as low when 395 ** q was from l1. So we ask the question by doing 396 ** cmp(q, other) <= sense 397 ** and make sense == 0 when equality should look low, 398 ** and -1 when equality should look high. 399 */ 400 401 gptr *q; 402 if (cmp(aTHX_ *f1, *f2) <= 0) { 403 q = f2; b = f1; t = l1; 404 sense = -1; 405 } else { 406 q = f1; b = f2; t = l2; 407 sense = 0; 408 } 409 410 411 /* ramp up 412 ** 413 ** Leave t at something strictly 414 ** greater than q (or at the end of the list), 415 ** and b at something strictly less than q. 416 */ 417 for (i = 1, run = 0 ;;) { 418 if ((p = PINDEX(b, i)) >= t) { 419 /* off the end */ 420 if (((p = PINDEX(t, -1)) > b) && 421 (cmp(aTHX_ *q, *p) <= sense)) 422 t = p; 423 else b = p; 424 break; 425 } else if (cmp(aTHX_ *q, *p) <= sense) { 426 t = p; 427 break; 428 } else b = p; 429 if (++run >= RTHRESH) i += i; 430 } 431 432 433 /* q is known to follow b and must be inserted before t. 434 ** Increment b, so the range of possibilities is [b,t). 435 ** Round binary split down, to favor early appearance. 436 ** Adjust b and t until q belongs just before t. 437 */ 438 439 b++; 440 while (b < t) { 441 p = PINDEX(b, (PNELEM(b, t) - 1) / 2); 442 if (cmp(aTHX_ *q, *p) <= sense) { 443 t = p; 444 } else b = p + 1; 445 } 446 447 448 /* Copy all the strictly low elements */ 449 450 if (q == f1) { 451 FROMTOUPTO(f2, tp2, t); 452 *tp2++ = *f1++; 453 } else { 454 FROMTOUPTO(f1, tp2, t); 455 *tp2++ = *f2++; 456 } 457 } 458 459 460 /* Run out remaining list */ 461 if (f1 == l1) { 462 if (f2 < l2) FROMTOUPTO(f2, tp2, l2); 463 } else FROMTOUPTO(f1, tp2, l1); 464 p1 = NEXT(p1) = POTHER(tp2, list2, list1); 465 466 if (--level == 0) goto done; 467 --stackp; 468 t = list1; list1 = list2; list2 = t; /* swap lists */ 469 } while ((runs = stackp->runs) == 0); 470 } 471 472 473 stackp->runs = 0; /* current run will finish level */ 474 /* While there are more than 2 runs remaining, 475 * turn them into exactly 2 runs (at the "other" level), 476 * each made up of approximately half the runs. 477 * Stack the second half for later processing, 478 * and set about producing the first half now. 479 */ 480 while (runs > 2) { 481 ++level; 482 ++stackp; 483 stackp->offset = offset; 484 runs -= stackp->runs = runs / 2; 485 } 486 /* We must construct a single run from 1 or 2 runs. 487 * All the original runs are in which[0] == base. 488 * The run we construct must end up in which[level&1]. 489 */ 490 iwhich = level & 1; 491 if (runs == 1) { 492 /* Constructing a single run from a single run. 493 * If it's where it belongs already, there's nothing to do. 494 * Otherwise, copy it to where it belongs. 495 * A run of 1 is either a singleton at level 0, 496 * or the second half of a split 3. In neither event 497 * is it necessary to set offset. It will be set by the merge 498 * that immediately follows. 499 */ 500 if (iwhich) { /* Belongs in aux, currently in base */ 501 f1 = b = PINDEX(base, offset); /* where list starts */ 502 f2 = PINDEX(aux, offset); /* where list goes */ 503 t = NEXT(f2); /* where list will end */ 504 offset = PNELEM(aux, t); /* offset thereof */ 505 t = PINDEX(base, offset); /* where it currently ends */ 506 FROMTOUPTO(f1, f2, t); /* copy */ 507 NEXT(b) = t; /* set up parallel pointer */ 508 } else if (level == 0) goto done; /* single run at level 0 */ 509 } else { 510 /* Constructing a single run from two runs. 511 * The merge code at the top will do that. 512 * We need only make sure the two runs are in the "other" array, 513 * so they'll end up in the correct array after the merge. 514 */ 515 ++level; 516 ++stackp; 517 stackp->offset = offset; 518 stackp->runs = 0; /* take care of both runs, trigger merge */ 519 if (!iwhich) { /* Merged runs belong in aux, copy 1st */ 520 f1 = b = PINDEX(base, offset); /* where first run starts */ 521 f2 = PINDEX(aux, offset); /* where it will be copied */ 522 t = NEXT(f2); /* where first run will end */ 523 offset = PNELEM(aux, t); /* offset thereof */ 524 p = PINDEX(base, offset); /* end of first run */ 525 t = NEXT(t); /* where second run will end */ 526 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ 527 FROMTOUPTO(f1, f2, t); /* copy both runs */ 528 NEXT(b) = p; /* paralleled pointer for 1st */ 529 NEXT(p) = t; /* ... and for second */ 530 } 531 } 532 } 533 done: 534 if (aux != small) Safefree(aux); /* free iff allocated */ 535 536 return; 537 } 538 539 /* 540 =for apidoc sortsv_flags 541 542 In-place sort an array of SV pointers with the given comparison routine, 543 with various SORTf_* flag options. 544 545 =cut 546 */ 547 void 548 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 549 { 550 PERL_ARGS_ASSERT_SORTSV_FLAGS; 551 552 sortsv_flags_impl(base, nmemb, cmp, flags); 553 } 554 555 /* 556 * Each of sortsv_* functions contains an inlined copy of 557 * sortsv_flags_impl() with an inlined comparator. Basically, we are 558 * emulating C++ templates by using __attribute__((always_inline)). 559 * 560 * The purpose of that is to avoid the function call overhead inside 561 * the sorting routine, which calls the comparison function multiple 562 * times per sorted item. 563 */ 564 565 static void 566 sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 567 { 568 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags); 569 } 570 571 static void 572 sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 573 { 574 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags); 575 } 576 577 static void 578 sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 579 { 580 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags); 581 } 582 583 static void 584 sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 585 { 586 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags); 587 } 588 589 static void 590 sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 591 { 592 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags); 593 } 594 595 static void 596 sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 597 { 598 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags); 599 } 600 601 static void 602 sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 603 { 604 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags); 605 } 606 607 static void 608 sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 609 { 610 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags); 611 } 612 613 static void 614 sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 615 { 616 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags); 617 } 618 619 static void 620 sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 621 { 622 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags); 623 } 624 625 static void 626 sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 627 { 628 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags); 629 } 630 631 static void 632 sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 633 { 634 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags); 635 } 636 637 #ifdef USE_LOCALE_COLLATE 638 639 static void 640 sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 641 { 642 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags); 643 } 644 645 static void 646 sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 647 { 648 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags); 649 } 650 651 static void 652 sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 653 { 654 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags); 655 } 656 657 static void 658 sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 659 { 660 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags); 661 } 662 663 #endif 664 665 /* 666 667 =for apidoc sortsv 668 669 In-place sort an array of SV pointers with the given comparison routine. 670 671 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more 672 flexible routine. 673 674 =cut 675 */ 676 677 void 678 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) 679 { 680 PERL_ARGS_ASSERT_SORTSV; 681 682 sortsv_flags(array, nmemb, cmp, 0); 683 } 684 685 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) 686 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) 687 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) 688 689 PP(pp_sort) 690 { 691 dSP; dMARK; dORIGMARK; 692 SV **p1 = ORIGMARK+1, **p2; 693 SSize_t max, i; 694 AV* av = NULL; 695 GV *gv; 696 CV *cv = NULL; 697 U8 gimme = GIMME_V; 698 OP* const nextop = PL_op->op_next; 699 I32 overloading = 0; 700 bool hasargs = FALSE; 701 bool copytmps; 702 I32 is_xsub = 0; 703 const U8 priv = PL_op->op_private; 704 const U8 flags = PL_op->op_flags; 705 U32 sort_flags = 0; 706 I32 all_SIVs = 1, descending = 0; 707 708 if ((priv & OPpSORT_DESCEND) != 0) 709 descending = 1; 710 711 if (gimme != G_LIST) { 712 SP = MARK; 713 EXTEND(SP,1); 714 RETPUSHUNDEF; 715 } 716 717 ENTER; 718 SAVEVPTR(PL_sortcop); 719 if (flags & OPf_STACKED) { 720 if (flags & OPf_SPECIAL) { 721 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */ 722 assert(nullop->op_type == OP_NULL); 723 PL_sortcop = nullop->op_next; 724 } 725 else { 726 GV *autogv = NULL; 727 HV *stash; 728 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD); 729 check_cv: 730 if (cv && SvPOK(cv)) { 731 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); 732 if (proto && strEQ(proto, "$$")) { 733 hasargs = TRUE; 734 } 735 } 736 if (cv && CvISXSUB(cv) && CvXSUB(cv)) { 737 is_xsub = 1; 738 } 739 else if (!(cv && CvROOT(cv))) { 740 if (gv) { 741 goto autoload; 742 } 743 else if (!CvANON(cv) && (gv = CvGV(cv))) { 744 if (cv != GvCV(gv)) cv = GvCV(gv); 745 autoload: 746 if (!autogv && ( 747 autogv = gv_autoload_pvn( 748 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), 749 GvNAMEUTF8(gv) ? SVf_UTF8 : 0 750 ) 751 )) { 752 cv = GvCVu(autogv); 753 goto check_cv; 754 } 755 else { 756 SV *tmpstr = sv_newmortal(); 757 gv_efullname3(tmpstr, gv, NULL); 758 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called", 759 SVfARG(tmpstr)); 760 } 761 } 762 else { 763 DIE(aTHX_ "Undefined subroutine in sort"); 764 } 765 } 766 767 if (is_xsub) 768 PL_sortcop = (OP*)cv; 769 else 770 PL_sortcop = CvSTART(cv); 771 } 772 } 773 else { 774 PL_sortcop = NULL; 775 } 776 777 /* optimiser converts "@a = sort @a" to "sort \@a". In this case, 778 * push (@a) onto stack, then assign result back to @a at the end of 779 * this function */ 780 if (priv & OPpSORT_INPLACE) { 781 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV); 782 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ 783 av = MUTABLE_AV((*SP)); 784 if (SvREADONLY(av)) 785 Perl_croak_no_modify(); 786 max = AvFILL(av) + 1; 787 MEXTEND(SP, max); 788 if (SvMAGICAL(av)) { 789 for (i=0; i < max; i++) { 790 SV **svp = av_fetch(av, i, FALSE); 791 *SP++ = (svp) ? *svp : NULL; 792 } 793 } 794 else { 795 SV **svp = AvARRAY(av); 796 assert(svp || max == 0); 797 for (i = 0; i < max; i++) 798 *SP++ = *svp++; 799 } 800 SP--; 801 p1 = p2 = SP - (max-1); 802 } 803 else { 804 p2 = MARK+1; 805 max = SP - MARK; 806 } 807 808 /* shuffle stack down, removing optional initial cv (p1!=p2), plus 809 * any nulls; also stringify or converting to integer or number as 810 * required any args */ 811 copytmps = cBOOL(PL_sortcop); 812 for (i=max; i > 0 ; i--) { 813 if ((*p1 = *p2++)) { /* Weed out nulls. */ 814 if (copytmps && SvPADTMP(*p1)) { 815 *p1 = sv_mortalcopy(*p1); 816 } 817 SvTEMP_off(*p1); 818 if (!PL_sortcop) { 819 if (priv & OPpSORT_NUMERIC) { 820 if (priv & OPpSORT_INTEGER) { 821 if (!SvIOK(*p1)) 822 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 823 } 824 else { 825 if (!SvNSIOK(*p1)) 826 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 827 if (all_SIVs && !SvSIOK(*p1)) 828 all_SIVs = 0; 829 } 830 } 831 else { 832 if (!SvPOK(*p1)) 833 (void)sv_2pv_flags(*p1, 0, 834 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); 835 } 836 if (SvAMAGIC(*p1)) 837 overloading = 1; 838 } 839 p1++; 840 } 841 else 842 max--; 843 } 844 if (max > 1) { 845 SV **start; 846 if (PL_sortcop) { 847 PERL_CONTEXT *cx; 848 const bool oldcatch = CATCH_GET; 849 I32 old_savestack_ix = PL_savestack_ix; 850 851 SAVEOP(); 852 853 CATCH_SET(TRUE); 854 PUSHSTACKi(PERLSI_SORT); 855 if (!hasargs && !is_xsub) { 856 SAVEGENERICSV(PL_firstgv); 857 SAVEGENERICSV(PL_secondgv); 858 PL_firstgv = MUTABLE_GV(SvREFCNT_inc( 859 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV) 860 )); 861 PL_secondgv = MUTABLE_GV(SvREFCNT_inc( 862 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV) 863 )); 864 /* make sure the GP isn't removed out from under us for 865 * the SAVESPTR() */ 866 save_gp(PL_firstgv, 0); 867 save_gp(PL_secondgv, 0); 868 /* we don't want modifications localized */ 869 GvINTRO_off(PL_firstgv); 870 GvINTRO_off(PL_secondgv); 871 SAVEGENERICSV(GvSV(PL_firstgv)); 872 SvREFCNT_inc(GvSV(PL_firstgv)); 873 SAVEGENERICSV(GvSV(PL_secondgv)); 874 SvREFCNT_inc(GvSV(PL_secondgv)); 875 } 876 877 gimme = G_SCALAR; 878 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix); 879 if (!(flags & OPf_SPECIAL)) { 880 cx->cx_type = CXt_SUB|CXp_MULTICALL; 881 cx_pushsub(cx, cv, NULL, hasargs); 882 if (!is_xsub) { 883 PADLIST * const padlist = CvPADLIST(cv); 884 885 if (++CvDEPTH(cv) >= 2) 886 pad_push(padlist, CvDEPTH(cv)); 887 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); 888 889 if (hasargs) { 890 /* This is mostly copied from pp_entersub */ 891 AV * const av = MUTABLE_AV(PAD_SVl(0)); 892 893 cx->blk_sub.savearray = GvAV(PL_defgv); 894 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av)); 895 } 896 897 } 898 } 899 900 start = p1 - max; 901 Perl_sortsv_flags(aTHX_ start, max, 902 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), 903 sort_flags); 904 905 /* Reset cx, in case the context stack has been reallocated. */ 906 cx = CX_CUR(); 907 908 PL_stack_sp = PL_stack_base + cx->blk_oldsp; 909 910 CX_LEAVE_SCOPE(cx); 911 if (!(flags & OPf_SPECIAL)) { 912 assert(CxTYPE(cx) == CXt_SUB); 913 cx_popsub(cx); 914 } 915 else 916 assert(CxTYPE(cx) == CXt_NULL); 917 /* there isn't a POPNULL ! */ 918 919 cx_popblock(cx); 920 CX_POP(cx); 921 POPSTACK; 922 CATCH_SET(oldcatch); 923 } 924 else { 925 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */ 926 start = ORIGMARK+1; 927 if (priv & OPpSORT_NUMERIC) { 928 if ((priv & OPpSORT_INTEGER) || all_SIVs) { 929 if (overloading) 930 if (descending) 931 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags); 932 else 933 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags); 934 else 935 if (descending) 936 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags); 937 else 938 sortsv_i_ncmp(aTHX_ start, max, sort_flags); 939 } 940 else { 941 if (overloading) 942 if (descending) 943 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags); 944 else 945 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags); 946 else 947 if (descending) 948 sortsv_ncmp_desc(aTHX_ start, max, sort_flags); 949 else 950 sortsv_ncmp(aTHX_ start, max, sort_flags); 951 } 952 } 953 #ifdef USE_LOCALE_COLLATE 954 else if(IN_LC_RUNTIME(LC_COLLATE)) { 955 if (overloading) 956 if (descending) 957 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags); 958 else 959 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags); 960 else 961 if (descending) 962 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags); 963 else 964 sortsv_cmp_locale(aTHX_ start, max, sort_flags); 965 } 966 #endif 967 else { 968 if (overloading) 969 if (descending) 970 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags); 971 else 972 sortsv_amagic_cmp(aTHX_ start, max, sort_flags); 973 else 974 if (descending) 975 sortsv_cmp_desc(aTHX_ start, max, sort_flags); 976 else 977 sortsv_cmp(aTHX_ start, max, sort_flags); 978 } 979 } 980 if ((priv & OPpSORT_REVERSE) != 0) { 981 SV **q = start+max-1; 982 while (start < q) { 983 SV * const tmp = *start; 984 *start++ = *q; 985 *q-- = tmp; 986 } 987 } 988 } 989 990 if (av) { 991 /* copy back result to the array */ 992 SV** const base = MARK+1; 993 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */ 994 if (SvMAGICAL(av)) { 995 for (i = 0; i <= max_minus_one; i++) 996 base[i] = newSVsv(base[i]); 997 av_clear(av); 998 if (max_minus_one >= 0) 999 av_extend(av, max_minus_one); 1000 for (i=0; i <= max_minus_one; i++) { 1001 SV * const sv = base[i]; 1002 SV ** const didstore = av_store(av, i, sv); 1003 if (SvSMAGICAL(sv)) 1004 mg_set(sv); 1005 if (!didstore) 1006 sv_2mortal(sv); 1007 } 1008 } 1009 else { 1010 /* the elements of av are likely to be the same as the 1011 * (non-refcounted) elements on the stack, just in a different 1012 * order. However, its possible that someone's messed with av 1013 * in the meantime. So bump and unbump the relevant refcounts 1014 * first. 1015 */ 1016 for (i = 0; i <= max_minus_one; i++) { 1017 SV *sv = base[i]; 1018 assert(sv); 1019 if (SvREFCNT(sv) > 1) 1020 base[i] = newSVsv(sv); 1021 else 1022 SvREFCNT_inc_simple_void_NN(sv); 1023 } 1024 av_clear(av); 1025 if (max_minus_one >= 0) { 1026 av_extend(av, max_minus_one); 1027 Copy(base, AvARRAY(av), max, SV*); 1028 } 1029 AvFILLp(av) = max_minus_one; 1030 AvREIFY_off(av); 1031 AvREAL_on(av); 1032 } 1033 } 1034 LEAVE; 1035 PL_stack_sp = ORIGMARK + max; 1036 return nextop; 1037 } 1038 1039 static I32 1040 S_sortcv(pTHX_ SV *const a, SV *const b) 1041 { 1042 const I32 oldsaveix = PL_savestack_ix; 1043 I32 result; 1044 PMOP * const pm = PL_curpm; 1045 COP * const cop = PL_curcop; 1046 SV *olda, *oldb; 1047 1048 PERL_ARGS_ASSERT_SORTCV; 1049 1050 olda = GvSV(PL_firstgv); 1051 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a); 1052 SvREFCNT_dec(olda); 1053 oldb = GvSV(PL_secondgv); 1054 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b); 1055 SvREFCNT_dec(oldb); 1056 PL_stack_sp = PL_stack_base; 1057 PL_op = PL_sortcop; 1058 CALLRUNOPS(aTHX); 1059 PL_curcop = cop; 1060 /* entry zero of a stack is always PL_sv_undef, which 1061 * simplifies converting a '()' return into undef in scalar context */ 1062 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1063 result = SvIV(*PL_stack_sp); 1064 1065 LEAVE_SCOPE(oldsaveix); 1066 PL_curpm = pm; 1067 return result; 1068 } 1069 1070 static I32 1071 S_sortcv_stacked(pTHX_ SV *const a, SV *const b) 1072 { 1073 const I32 oldsaveix = PL_savestack_ix; 1074 I32 result; 1075 AV * const av = GvAV(PL_defgv); 1076 PMOP * const pm = PL_curpm; 1077 COP * const cop = PL_curcop; 1078 1079 PERL_ARGS_ASSERT_SORTCV_STACKED; 1080 1081 if (AvREAL(av)) { 1082 av_clear(av); 1083 AvREAL_off(av); 1084 AvREIFY_on(av); 1085 } 1086 if (AvMAX(av) < 1) { 1087 SV **ary = AvALLOC(av); 1088 if (AvARRAY(av) != ary) { 1089 AvMAX(av) += AvARRAY(av) - AvALLOC(av); 1090 AvARRAY(av) = ary; 1091 } 1092 if (AvMAX(av) < 1) { 1093 Renew(ary,2,SV*); 1094 AvMAX(av) = 1; 1095 AvARRAY(av) = ary; 1096 AvALLOC(av) = ary; 1097 } 1098 } 1099 AvFILLp(av) = 1; 1100 1101 AvARRAY(av)[0] = a; 1102 AvARRAY(av)[1] = b; 1103 PL_stack_sp = PL_stack_base; 1104 PL_op = PL_sortcop; 1105 CALLRUNOPS(aTHX); 1106 PL_curcop = cop; 1107 /* entry zero of a stack is always PL_sv_undef, which 1108 * simplifies converting a '()' return into undef in scalar context */ 1109 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1110 result = SvIV(*PL_stack_sp); 1111 1112 LEAVE_SCOPE(oldsaveix); 1113 PL_curpm = pm; 1114 return result; 1115 } 1116 1117 static I32 1118 S_sortcv_xsub(pTHX_ SV *const a, SV *const b) 1119 { 1120 dSP; 1121 const I32 oldsaveix = PL_savestack_ix; 1122 CV * const cv=MUTABLE_CV(PL_sortcop); 1123 I32 result; 1124 PMOP * const pm = PL_curpm; 1125 1126 PERL_ARGS_ASSERT_SORTCV_XSUB; 1127 1128 SP = PL_stack_base; 1129 PUSHMARK(SP); 1130 EXTEND(SP, 2); 1131 *++SP = a; 1132 *++SP = b; 1133 PUTBACK; 1134 (void)(*CvXSUB(cv))(aTHX_ cv); 1135 /* entry zero of a stack is always PL_sv_undef, which 1136 * simplifies converting a '()' return into undef in scalar context */ 1137 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1138 result = SvIV(*PL_stack_sp); 1139 1140 LEAVE_SCOPE(oldsaveix); 1141 PL_curpm = pm; 1142 return result; 1143 } 1144 1145 1146 PERL_STATIC_FORCE_INLINE I32 1147 S_sv_ncmp(pTHX_ SV *const a, SV *const b) 1148 { 1149 I32 cmp = do_ncmp(a, b); 1150 1151 PERL_ARGS_ASSERT_SV_NCMP; 1152 1153 if (cmp == 2) { 1154 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); 1155 return 0; 1156 } 1157 1158 return cmp; 1159 } 1160 1161 PERL_STATIC_FORCE_INLINE I32 1162 S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b) 1163 { 1164 PERL_ARGS_ASSERT_SV_NCMP_DESC; 1165 1166 return -S_sv_ncmp(aTHX_ a, b); 1167 } 1168 1169 PERL_STATIC_FORCE_INLINE I32 1170 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) 1171 { 1172 const IV iv1 = SvIV(a); 1173 const IV iv2 = SvIV(b); 1174 1175 PERL_ARGS_ASSERT_SV_I_NCMP; 1176 1177 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; 1178 } 1179 1180 PERL_STATIC_FORCE_INLINE I32 1181 S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1182 { 1183 PERL_ARGS_ASSERT_SV_I_NCMP_DESC; 1184 1185 return -S_sv_i_ncmp(aTHX_ a, b); 1186 } 1187 1188 #define tryCALL_AMAGICbin(left,right,meth) \ 1189 (SvAMAGIC(left)||SvAMAGIC(right)) \ 1190 ? amagic_call(left, right, meth, 0) \ 1191 : NULL; 1192 1193 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) 1194 1195 PERL_STATIC_FORCE_INLINE I32 1196 S_amagic_ncmp(pTHX_ SV *const a, SV *const b) 1197 { 1198 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1199 1200 PERL_ARGS_ASSERT_AMAGIC_NCMP; 1201 1202 if (tmpsv) { 1203 if (SvIOK(tmpsv)) { 1204 const I32 i = SvIVX(tmpsv); 1205 return SORT_NORMAL_RETURN_VALUE(i); 1206 } 1207 else { 1208 const NV d = SvNV(tmpsv); 1209 return SORT_NORMAL_RETURN_VALUE(d); 1210 } 1211 } 1212 return S_sv_ncmp(aTHX_ a, b); 1213 } 1214 1215 PERL_STATIC_FORCE_INLINE I32 1216 S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b) 1217 { 1218 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC; 1219 1220 return -S_amagic_ncmp(aTHX_ a, b); 1221 } 1222 1223 PERL_STATIC_FORCE_INLINE I32 1224 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) 1225 { 1226 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1227 1228 PERL_ARGS_ASSERT_AMAGIC_I_NCMP; 1229 1230 if (tmpsv) { 1231 if (SvIOK(tmpsv)) { 1232 const I32 i = SvIVX(tmpsv); 1233 return SORT_NORMAL_RETURN_VALUE(i); 1234 } 1235 else { 1236 const NV d = SvNV(tmpsv); 1237 return SORT_NORMAL_RETURN_VALUE(d); 1238 } 1239 } 1240 return S_sv_i_ncmp(aTHX_ a, b); 1241 } 1242 1243 PERL_STATIC_FORCE_INLINE I32 1244 S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1245 { 1246 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC; 1247 1248 return -S_amagic_i_ncmp(aTHX_ a, b); 1249 } 1250 1251 PERL_STATIC_FORCE_INLINE I32 1252 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) 1253 { 1254 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1255 1256 PERL_ARGS_ASSERT_AMAGIC_CMP; 1257 1258 if (tmpsv) { 1259 if (SvIOK(tmpsv)) { 1260 const I32 i = SvIVX(tmpsv); 1261 return SORT_NORMAL_RETURN_VALUE(i); 1262 } 1263 else { 1264 const NV d = SvNV(tmpsv); 1265 return SORT_NORMAL_RETURN_VALUE(d); 1266 } 1267 } 1268 return sv_cmp(str1, str2); 1269 } 1270 1271 PERL_STATIC_FORCE_INLINE I32 1272 S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1273 { 1274 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC; 1275 1276 return -S_amagic_cmp(aTHX_ str1, str2); 1277 } 1278 1279 PERL_STATIC_FORCE_INLINE I32 1280 S_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1281 { 1282 PERL_ARGS_ASSERT_CMP_DESC; 1283 1284 return -sv_cmp(str1, str2); 1285 } 1286 1287 #ifdef USE_LOCALE_COLLATE 1288 1289 PERL_STATIC_FORCE_INLINE I32 1290 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) 1291 { 1292 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1293 1294 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; 1295 1296 if (tmpsv) { 1297 if (SvIOK(tmpsv)) { 1298 const I32 i = SvIVX(tmpsv); 1299 return SORT_NORMAL_RETURN_VALUE(i); 1300 } 1301 else { 1302 const NV d = SvNV(tmpsv); 1303 return SORT_NORMAL_RETURN_VALUE(d); 1304 } 1305 } 1306 return sv_cmp_locale(str1, str2); 1307 } 1308 1309 PERL_STATIC_FORCE_INLINE I32 1310 S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1311 { 1312 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC; 1313 1314 return -S_amagic_cmp_locale(aTHX_ str1, str2); 1315 } 1316 1317 PERL_STATIC_FORCE_INLINE I32 1318 S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1319 { 1320 PERL_ARGS_ASSERT_CMP_LOCALE_DESC; 1321 1322 return -sv_cmp_locale(str1, str2); 1323 } 1324 1325 #endif 1326 1327 /* 1328 * ex: set ts=8 sts=4 sw=4 et: 1329 */ 1330