1 /* pp_sort.c 2 * 3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others 5 * 6 * You may distribute under the terms of either the GNU General Public 7 * License or the Artistic License, as specified in the README file. 8 * 9 */ 10 11 /* 12 * ...they shuffled back towards the rear of the line. 'No, not at the 13 * rear!' the slave-driver shouted. 'Three files up. And stay there... 14 * 15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] 16 */ 17 18 /* This file contains pp ("push/pop") functions that 19 * execute the opcodes that make up a perl program. A typical pp function 20 * expects to find its arguments on the stack, and usually pushes its 21 * results onto the stack, hence the 'pp' terminology. Each OP structure 22 * contains a pointer to the relevant pp_foo() function. 23 * 24 * This particular file just contains pp_sort(), which is complex 25 * enough to merit its own file! See the other pp*.c files for the rest of 26 * the pp_ functions. 27 */ 28 29 #include "EXTERN.h" 30 #define PERL_IN_PP_SORT_C 31 #include "perl.h" 32 33 #if defined(UNDER_CE) 34 /* looks like 'small' is reserved word for WINCE (or somesuch)*/ 35 #define small xsmall 36 #endif 37 38 #define sv_cmp_static Perl_sv_cmp 39 #define sv_cmp_locale_static Perl_sv_cmp_locale 40 41 #ifndef SMALLSORT 42 #define SMALLSORT (200) 43 #endif 44 45 /* Flags for qsortsv and mergesortsv */ 46 #define SORTf_DESC 1 47 #define SORTf_STABLE 2 48 #define SORTf_QSORT 4 49 50 /* 51 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>. 52 * 53 * The original code was written in conjunction with BSD Computer Software 54 * Research Group at University of California, Berkeley. 55 * 56 * See also: "Optimistic Merge Sort" (SODA '92) 57 * 58 * The integration to Perl is by John P. Linderman <jpl@research.att.com>. 59 * 60 * The code can be distributed under the same terms as Perl itself. 61 * 62 */ 63 64 65 typedef char * aptr; /* pointer for arithmetic on sizes */ 66 typedef SV * gptr; /* pointers in our lists */ 67 68 /* Binary merge internal sort, with a few special mods 69 ** for the special perl environment it now finds itself in. 70 ** 71 ** Things that were once options have been hotwired 72 ** to values suitable for this use. In particular, we'll always 73 ** initialize looking for natural runs, we'll always produce stable 74 ** output, and we'll always do Peter McIlroy's binary merge. 75 */ 76 77 /* Pointer types for arithmetic and storage and convenience casts */ 78 79 #define APTR(P) ((aptr)(P)) 80 #define GPTP(P) ((gptr *)(P)) 81 #define GPPP(P) ((gptr **)(P)) 82 83 84 /* byte offset from pointer P to (larger) pointer Q */ 85 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) 86 87 #define PSIZE sizeof(gptr) 88 89 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */ 90 91 #ifdef PSHIFT 92 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) 93 #define PNBYTE(N) ((N) << (PSHIFT)) 94 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) 95 #else 96 /* Leave optimization to compiler */ 97 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) 98 #define PNBYTE(N) ((N) * (PSIZE)) 99 #define PINDEX(P, N) (GPTP(P) + (N)) 100 #endif 101 102 /* Pointer into other corresponding to pointer into this */ 103 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) 104 105 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim) 106 107 108 /* Runs are identified by a pointer in the auxiliary list. 109 ** The pointer is at the start of the list, 110 ** and it points to the start of the next list. 111 ** NEXT is used as an lvalue, too. 112 */ 113 114 #define NEXT(P) (*GPPP(P)) 115 116 117 /* PTHRESH is the minimum number of pairs with the same sense to justify 118 ** checking for a run and extending it. Note that PTHRESH counts PAIRS, 119 ** not just elements, so PTHRESH == 8 means a run of 16. 120 */ 121 122 #define PTHRESH (8) 123 124 /* RTHRESH is the number of elements in a run that must compare low 125 ** to the low element from the opposing run before we justify 126 ** doing a binary rampup instead of single stepping. 127 ** In random input, N in a row low should only happen with 128 ** probability 2^(1-N), so we can risk that we are dealing 129 ** with orderly input without paying much when we aren't. 130 */ 131 132 #define RTHRESH (6) 133 134 135 /* 136 ** Overview of algorithm and variables. 137 ** The array of elements at list1 will be organized into runs of length 2, 138 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when 139 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order. 140 ** 141 ** Unless otherwise specified, pair pointers address the first of two elements. 142 ** 143 ** b and b+1 are a pair that compare with sense "sense". 144 ** b is the "bottom" of adjacent pairs that might form a longer run. 145 ** 146 ** p2 parallels b in the list2 array, where runs are defined by 147 ** a pointer chain. 148 ** 149 ** t represents the "top" of the adjacent pairs that might extend 150 ** the run beginning at b. Usually, t addresses a pair 151 ** that compares with opposite sense from (b,b+1). 152 ** However, it may also address a singleton element at the end of list1, 153 ** or it may be equal to "last", the first element beyond list1. 154 ** 155 ** r addresses the Nth pair following b. If this would be beyond t, 156 ** we back it off to t. Only when r is less than t do we consider the 157 ** run long enough to consider checking. 158 ** 159 ** q addresses a pair such that the pairs at b through q already form a run. 160 ** Often, q will equal b, indicating we only are sure of the pair itself. 161 ** However, a search on the previous cycle may have revealed a longer run, 162 ** so q may be greater than b. 163 ** 164 ** p is used to work back from a candidate r, trying to reach q, 165 ** which would mean b through r would be a run. If we discover such a run, 166 ** we start q at r and try to push it further towards t. 167 ** If b through r is NOT a run, we detect the wrong order at (p-1,p). 168 ** In any event, after the check (if any), we have two main cases. 169 ** 170 ** 1) Short run. b <= q < p <= r <= t. 171 ** b through q is a run (perhaps trivial) 172 ** q through p are uninteresting pairs 173 ** p through r is a run 174 ** 175 ** 2) Long run. b < r <= q < t. 176 ** b through q is a run (of length >= 2 * PTHRESH) 177 ** 178 ** Note that degenerate cases are not only possible, but likely. 179 ** For example, if the pair following b compares with opposite sense, 180 ** then b == q < p == r == t. 181 */ 182 183 184 static IV 185 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) 186 { 187 I32 sense; 188 gptr *b, *p, *q, *t, *p2; 189 gptr *last, *r; 190 IV runs = 0; 191 192 b = list1; 193 last = PINDEX(b, nmemb); 194 sense = (cmp(aTHX_ *b, *(b+1)) > 0); 195 for (p2 = list2; b < last; ) { 196 /* We just started, or just reversed sense. 197 ** Set t at end of pairs with the prevailing sense. 198 */ 199 for (p = b+2, t = p; ++p < last; t = ++p) { 200 if ((cmp(aTHX_ *t, *p) > 0) != sense) break; 201 } 202 q = b; 203 /* Having laid out the playing field, look for long runs */ 204 do { 205 p = r = b + (2 * PTHRESH); 206 if (r >= t) p = r = t; /* too short to care about */ 207 else { 208 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && 209 ((p -= 2) > q)) {} 210 if (p <= q) { 211 /* b through r is a (long) run. 212 ** Extend it as far as possible. 213 */ 214 p = q = r; 215 while (((p += 2) < t) && 216 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; 217 r = p = q + 2; /* no simple pairs, no after-run */ 218 } 219 } 220 if (q > b) { /* run of greater than 2 at b */ 221 gptr *savep = p; 222 223 p = q += 2; 224 /* pick up singleton, if possible */ 225 if ((p == t) && 226 ((t + 1) == last) && 227 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) 228 savep = r = p = q = last; 229 p2 = NEXT(p2) = p2 + (p - b); ++runs; 230 if (sense) 231 while (b < --p) { 232 const gptr c = *b; 233 *b++ = *p; 234 *p = c; 235 } 236 p = savep; 237 } 238 while (q < p) { /* simple pairs */ 239 p2 = NEXT(p2) = p2 + 2; ++runs; 240 if (sense) { 241 const gptr c = *q++; 242 *(q-1) = *q; 243 *q++ = c; 244 } else q += 2; 245 } 246 if (((b = p) == t) && ((t+1) == last)) { 247 NEXT(p2) = p2 + 1; ++runs; 248 b++; 249 } 250 q = r; 251 } while (b < t); 252 sense = !sense; 253 } 254 return runs; 255 } 256 257 258 /* The original merge sort, in use since 5.7, was as fast as, or faster than, 259 * qsort on many platforms, but slower than qsort, conspicuously so, 260 * on others. The most likely explanation was platform-specific 261 * differences in cache sizes and relative speeds. 262 * 263 * The quicksort divide-and-conquer algorithm guarantees that, as the 264 * problem is subdivided into smaller and smaller parts, the parts 265 * fit into smaller (and faster) caches. So it doesn't matter how 266 * many levels of cache exist, quicksort will "find" them, and, 267 * as long as smaller is faster, take advantage of them. 268 * 269 * By contrast, consider how the original mergesort algorithm worked. 270 * Suppose we have five runs (each typically of length 2 after dynprep). 271 * 272 * pass base aux 273 * 0 1 2 3 4 5 274 * 1 12 34 5 275 * 2 1234 5 276 * 3 12345 277 * 4 12345 278 * 279 * Adjacent pairs are merged in "grand sweeps" through the input. 280 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until 281 * runs 3 and 4 are merged and the runs from run 5 have been copied. 282 * The only cache that matters is one large enough to hold *all* the input. 283 * On some platforms, this may be many times slower than smaller caches. 284 * 285 * The following pseudo-code uses the same basic merge algorithm, 286 * but in a divide-and-conquer way. 287 * 288 * # merge $runs runs at offset $offset of list $list1 into $list2. 289 * # all unmerged runs ($runs == 1) originate in list $base. 290 * sub mgsort2 { 291 * my ($offset, $runs, $base, $list1, $list2) = @_; 292 * 293 * if ($runs == 1) { 294 * if ($list1 is $base) copy run to $list2 295 * return offset of end of list (or copy) 296 * } else { 297 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) 298 * mgsort2($off2, $runs/2, $base, $list2, $list1) 299 * merge the adjacent runs at $offset of $list1 into $list2 300 * return the offset of the end of the merged runs 301 * } 302 * } 303 * mgsort2(0, $runs, $base, $aux, $base); 304 * 305 * For our 5 runs, the tree of calls looks like 306 * 307 * 5 308 * 3 2 309 * 2 1 1 1 310 * 1 1 311 * 312 * 1 2 3 4 5 313 * 314 * and the corresponding activity looks like 315 * 316 * copy runs 1 and 2 from base to aux 317 * merge runs 1 and 2 from aux to base 318 * (run 3 is where it belongs, no copy needed) 319 * merge runs 12 and 3 from base to aux 320 * (runs 4 and 5 are where they belong, no copy needed) 321 * merge runs 4 and 5 from base to aux 322 * merge runs 123 and 45 from aux to base 323 * 324 * Note that we merge runs 1 and 2 immediately after copying them, 325 * while they are still likely to be in fast cache. Similarly, 326 * run 3 is merged with run 12 while it still may be lingering in cache. 327 * This implementation should therefore enjoy much of the cache-friendly 328 * behavior that quicksort does. In addition, it does less copying 329 * than the original mergesort implementation (only runs 1 and 2 are copied) 330 * and the "balancing" of merges is better (merged runs comprise more nearly 331 * equal numbers of original runs). 332 * 333 * The actual cache-friendly implementation will use a pseudo-stack 334 * to avoid recursion, and will unroll processing of runs of length 2, 335 * but it is otherwise similar to the recursive implementation. 336 */ 337 338 typedef struct { 339 IV offset; /* offset of 1st of 2 runs at this level */ 340 IV runs; /* how many runs must be combined into 1 */ 341 } off_runs; /* pseudo-stack element */ 342 343 344 static I32 345 cmp_desc(pTHX_ gptr const a, gptr const b) 346 { 347 dVAR; 348 return -PL_sort_RealCmp(aTHX_ a, b); 349 } 350 351 STATIC void 352 S_mergesortsv(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 353 { 354 dVAR; 355 IV i, run, offset; 356 I32 sense, level; 357 gptr *f1, *f2, *t, *b, *p; 358 int iwhich; 359 gptr *aux; 360 gptr *p1; 361 gptr small[SMALLSORT]; 362 gptr *which[3]; 363 off_runs stack[60], *stackp; 364 SVCOMPARE_t savecmp = NULL; 365 366 if (nmemb <= 1) return; /* sorted trivially */ 367 368 if ((flags & SORTf_DESC) != 0) { 369 savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ 370 PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */ 371 cmp = cmp_desc; 372 } 373 374 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ 375 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ 376 level = 0; 377 stackp = stack; 378 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); 379 stackp->offset = offset = 0; 380 which[0] = which[2] = base; 381 which[1] = aux; 382 for (;;) { 383 /* On levels where both runs have be constructed (stackp->runs == 0), 384 * merge them, and note the offset of their end, in case the offset 385 * is needed at the next level up. Hop up a level, and, 386 * as long as stackp->runs is 0, keep merging. 387 */ 388 IV runs = stackp->runs; 389 if (runs == 0) { 390 gptr *list1, *list2; 391 iwhich = level & 1; 392 list1 = which[iwhich]; /* area where runs are now */ 393 list2 = which[++iwhich]; /* area for merged runs */ 394 do { 395 gptr *l1, *l2, *tp2; 396 offset = stackp->offset; 397 f1 = p1 = list1 + offset; /* start of first run */ 398 p = tp2 = list2 + offset; /* where merged run will go */ 399 t = NEXT(p); /* where first run ends */ 400 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ 401 t = NEXT(t); /* where second runs ends */ 402 l2 = POTHER(t, list2, list1); /* ... on the other side */ 403 offset = PNELEM(list2, t); 404 while (f1 < l1 && f2 < l2) { 405 /* If head 1 is larger than head 2, find ALL the elements 406 ** in list 2 strictly less than head1, write them all, 407 ** then head 1. Then compare the new heads, and repeat, 408 ** until one or both lists are exhausted. 409 ** 410 ** In all comparisons (after establishing 411 ** which head to merge) the item to merge 412 ** (at pointer q) is the first operand of 413 ** the comparison. When we want to know 414 ** if "q is strictly less than the other", 415 ** we can't just do 416 ** cmp(q, other) < 0 417 ** because stability demands that we treat equality 418 ** as high when q comes from l2, and as low when 419 ** q was from l1. So we ask the question by doing 420 ** cmp(q, other) <= sense 421 ** and make sense == 0 when equality should look low, 422 ** and -1 when equality should look high. 423 */ 424 425 gptr *q; 426 if (cmp(aTHX_ *f1, *f2) <= 0) { 427 q = f2; b = f1; t = l1; 428 sense = -1; 429 } else { 430 q = f1; b = f2; t = l2; 431 sense = 0; 432 } 433 434 435 /* ramp up 436 ** 437 ** Leave t at something strictly 438 ** greater than q (or at the end of the list), 439 ** and b at something strictly less than q. 440 */ 441 for (i = 1, run = 0 ;;) { 442 if ((p = PINDEX(b, i)) >= t) { 443 /* off the end */ 444 if (((p = PINDEX(t, -1)) > b) && 445 (cmp(aTHX_ *q, *p) <= sense)) 446 t = p; 447 else b = p; 448 break; 449 } else if (cmp(aTHX_ *q, *p) <= sense) { 450 t = p; 451 break; 452 } else b = p; 453 if (++run >= RTHRESH) i += i; 454 } 455 456 457 /* q is known to follow b and must be inserted before t. 458 ** Increment b, so the range of possibilities is [b,t). 459 ** Round binary split down, to favor early appearance. 460 ** Adjust b and t until q belongs just before t. 461 */ 462 463 b++; 464 while (b < t) { 465 p = PINDEX(b, (PNELEM(b, t) - 1) / 2); 466 if (cmp(aTHX_ *q, *p) <= sense) { 467 t = p; 468 } else b = p + 1; 469 } 470 471 472 /* Copy all the strictly low elements */ 473 474 if (q == f1) { 475 FROMTOUPTO(f2, tp2, t); 476 *tp2++ = *f1++; 477 } else { 478 FROMTOUPTO(f1, tp2, t); 479 *tp2++ = *f2++; 480 } 481 } 482 483 484 /* Run out remaining list */ 485 if (f1 == l1) { 486 if (f2 < l2) FROMTOUPTO(f2, tp2, l2); 487 } else FROMTOUPTO(f1, tp2, l1); 488 p1 = NEXT(p1) = POTHER(tp2, list2, list1); 489 490 if (--level == 0) goto done; 491 --stackp; 492 t = list1; list1 = list2; list2 = t; /* swap lists */ 493 } while ((runs = stackp->runs) == 0); 494 } 495 496 497 stackp->runs = 0; /* current run will finish level */ 498 /* While there are more than 2 runs remaining, 499 * turn them into exactly 2 runs (at the "other" level), 500 * each made up of approximately half the runs. 501 * Stack the second half for later processing, 502 * and set about producing the first half now. 503 */ 504 while (runs > 2) { 505 ++level; 506 ++stackp; 507 stackp->offset = offset; 508 runs -= stackp->runs = runs / 2; 509 } 510 /* We must construct a single run from 1 or 2 runs. 511 * All the original runs are in which[0] == base. 512 * The run we construct must end up in which[level&1]. 513 */ 514 iwhich = level & 1; 515 if (runs == 1) { 516 /* Constructing a single run from a single run. 517 * If it's where it belongs already, there's nothing to do. 518 * Otherwise, copy it to where it belongs. 519 * A run of 1 is either a singleton at level 0, 520 * or the second half of a split 3. In neither event 521 * is it necessary to set offset. It will be set by the merge 522 * that immediately follows. 523 */ 524 if (iwhich) { /* Belongs in aux, currently in base */ 525 f1 = b = PINDEX(base, offset); /* where list starts */ 526 f2 = PINDEX(aux, offset); /* where list goes */ 527 t = NEXT(f2); /* where list will end */ 528 offset = PNELEM(aux, t); /* offset thereof */ 529 t = PINDEX(base, offset); /* where it currently ends */ 530 FROMTOUPTO(f1, f2, t); /* copy */ 531 NEXT(b) = t; /* set up parallel pointer */ 532 } else if (level == 0) goto done; /* single run at level 0 */ 533 } else { 534 /* Constructing a single run from two runs. 535 * The merge code at the top will do that. 536 * We need only make sure the two runs are in the "other" array, 537 * so they'll end up in the correct array after the merge. 538 */ 539 ++level; 540 ++stackp; 541 stackp->offset = offset; 542 stackp->runs = 0; /* take care of both runs, trigger merge */ 543 if (!iwhich) { /* Merged runs belong in aux, copy 1st */ 544 f1 = b = PINDEX(base, offset); /* where first run starts */ 545 f2 = PINDEX(aux, offset); /* where it will be copied */ 546 t = NEXT(f2); /* where first run will end */ 547 offset = PNELEM(aux, t); /* offset thereof */ 548 p = PINDEX(base, offset); /* end of first run */ 549 t = NEXT(t); /* where second run will end */ 550 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ 551 FROMTOUPTO(f1, f2, t); /* copy both runs */ 552 NEXT(b) = p; /* paralleled pointer for 1st */ 553 NEXT(p) = t; /* ... and for second */ 554 } 555 } 556 } 557 done: 558 if (aux != small) Safefree(aux); /* free iff allocated */ 559 if (flags) { 560 PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */ 561 } 562 return; 563 } 564 565 /* 566 * The quicksort implementation was derived from source code contributed 567 * by Tom Horsley. 568 * 569 * NOTE: this code was derived from Tom Horsley's qsort replacement 570 * and should not be confused with the original code. 571 */ 572 573 /* Copyright (C) Tom Horsley, 1997. All rights reserved. 574 575 Permission granted to distribute under the same terms as perl which are 576 (briefly): 577 578 This program is free software; you can redistribute it and/or modify 579 it under the terms of either: 580 581 a) the GNU General Public License as published by the Free 582 Software Foundation; either version 1, or (at your option) any 583 later version, or 584 585 b) the "Artistic License" which comes with this Kit. 586 587 Details on the perl license can be found in the perl source code which 588 may be located via the www.perl.com web page. 589 590 This is the most wonderfulest possible qsort I can come up with (and 591 still be mostly portable) My (limited) tests indicate it consistently 592 does about 20% fewer calls to compare than does the qsort in the Visual 593 C++ library, other vendors may vary. 594 595 Some of the ideas in here can be found in "Algorithms" by Sedgewick, 596 others I invented myself (or more likely re-invented since they seemed 597 pretty obvious once I watched the algorithm operate for a while). 598 599 Most of this code was written while watching the Marlins sweep the Giants 600 in the 1997 National League Playoffs - no Braves fans allowed to use this 601 code (just kidding :-). 602 603 I realize that if I wanted to be true to the perl tradition, the only 604 comment in this file would be something like: 605 606 ...they shuffled back towards the rear of the line. 'No, not at the 607 rear!' the slave-driver shouted. 'Three files up. And stay there... 608 609 However, I really needed to violate that tradition just so I could keep 610 track of what happens myself, not to mention some poor fool trying to 611 understand this years from now :-). 612 */ 613 614 /* ********************************************************** Configuration */ 615 616 #ifndef QSORT_ORDER_GUESS 617 #define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */ 618 #endif 619 620 /* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for 621 future processing - a good max upper bound is log base 2 of memory size 622 (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can 623 safely be smaller than that since the program is taking up some space and 624 most operating systems only let you grab some subset of contiguous 625 memory (not to mention that you are normally sorting data larger than 626 1 byte element size :-). 627 */ 628 #ifndef QSORT_MAX_STACK 629 #define QSORT_MAX_STACK 32 630 #endif 631 632 /* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort. 633 Anything bigger and we use qsort. If you make this too small, the qsort 634 will probably break (or become less efficient), because it doesn't expect 635 the middle element of a partition to be the same as the right or left - 636 you have been warned). 637 */ 638 #ifndef QSORT_BREAK_EVEN 639 #define QSORT_BREAK_EVEN 6 640 #endif 641 642 /* QSORT_PLAY_SAFE is the size of the largest partition we're willing 643 to go quadratic on. We innoculate larger partitions against 644 quadratic behavior by shuffling them before sorting. This is not 645 an absolute guarantee of non-quadratic behavior, but it would take 646 staggeringly bad luck to pick extreme elements as the pivot 647 from randomized data. 648 */ 649 #ifndef QSORT_PLAY_SAFE 650 #define QSORT_PLAY_SAFE 255 651 #endif 652 653 /* ************************************************************* Data Types */ 654 655 /* hold left and right index values of a partition waiting to be sorted (the 656 partition includes both left and right - right is NOT one past the end or 657 anything like that). 658 */ 659 struct partition_stack_entry { 660 int left; 661 int right; 662 #ifdef QSORT_ORDER_GUESS 663 int qsort_break_even; 664 #endif 665 }; 666 667 /* ******************************************************* Shorthand Macros */ 668 669 /* Note that these macros will be used from inside the qsort function where 670 we happen to know that the variable 'elt_size' contains the size of an 671 array element and the variable 'temp' points to enough space to hold a 672 temp element and the variable 'array' points to the array being sorted 673 and 'compare' is the pointer to the compare routine. 674 675 Also note that there are very many highly architecture specific ways 676 these might be sped up, but this is simply the most generally portable 677 code I could think of. 678 */ 679 680 /* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2 681 */ 682 #define qsort_cmp(elt1, elt2) \ 683 ((*compare)(aTHX_ array[elt1], array[elt2])) 684 685 #ifdef QSORT_ORDER_GUESS 686 #define QSORT_NOTICE_SWAP swapped++; 687 #else 688 #define QSORT_NOTICE_SWAP 689 #endif 690 691 /* swaps contents of array elements elt1, elt2. 692 */ 693 #define qsort_swap(elt1, elt2) \ 694 STMT_START { \ 695 QSORT_NOTICE_SWAP \ 696 temp = array[elt1]; \ 697 array[elt1] = array[elt2]; \ 698 array[elt2] = temp; \ 699 } STMT_END 700 701 /* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets 702 elt3 and elt3 gets elt1. 703 */ 704 #define qsort_rotate(elt1, elt2, elt3) \ 705 STMT_START { \ 706 QSORT_NOTICE_SWAP \ 707 temp = array[elt1]; \ 708 array[elt1] = array[elt2]; \ 709 array[elt2] = array[elt3]; \ 710 array[elt3] = temp; \ 711 } STMT_END 712 713 /* ************************************************************ Debug stuff */ 714 715 #ifdef QSORT_DEBUG 716 717 static void 718 break_here() 719 { 720 return; /* good place to set a breakpoint */ 721 } 722 723 #define qsort_assert(t) (void)( (t) || (break_here(), 0) ) 724 725 static void 726 doqsort_all_asserts( 727 void * array, 728 size_t num_elts, 729 size_t elt_size, 730 int (*compare)(const void * elt1, const void * elt2), 731 int pc_left, int pc_right, int u_left, int u_right) 732 { 733 int i; 734 735 qsort_assert(pc_left <= pc_right); 736 qsort_assert(u_right < pc_left); 737 qsort_assert(pc_right < u_left); 738 for (i = u_right + 1; i < pc_left; ++i) { 739 qsort_assert(qsort_cmp(i, pc_left) < 0); 740 } 741 for (i = pc_left; i < pc_right; ++i) { 742 qsort_assert(qsort_cmp(i, pc_right) == 0); 743 } 744 for (i = pc_right + 1; i < u_left; ++i) { 745 qsort_assert(qsort_cmp(pc_right, i) < 0); 746 } 747 } 748 749 #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \ 750 doqsort_all_asserts(array, num_elts, elt_size, compare, \ 751 PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) 752 753 #else 754 755 #define qsort_assert(t) ((void)0) 756 757 #define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0) 758 759 #endif 760 761 /* ****************************************************************** qsort */ 762 763 STATIC void /* the standard unstable (u) quicksort (qsort) */ 764 S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare) 765 { 766 SV * temp; 767 struct partition_stack_entry partition_stack[QSORT_MAX_STACK]; 768 int next_stack_entry = 0; 769 int part_left; 770 int part_right; 771 #ifdef QSORT_ORDER_GUESS 772 int qsort_break_even; 773 int swapped; 774 #endif 775 776 PERL_ARGS_ASSERT_QSORTSVU; 777 778 /* Make sure we actually have work to do. 779 */ 780 if (num_elts <= 1) { 781 return; 782 } 783 784 /* Inoculate large partitions against quadratic behavior */ 785 if (num_elts > QSORT_PLAY_SAFE) { 786 size_t n; 787 SV ** const q = array; 788 for (n = num_elts; n > 1; ) { 789 const size_t j = (size_t)(n-- * Drand01()); 790 temp = q[j]; 791 q[j] = q[n]; 792 q[n] = temp; 793 } 794 } 795 796 /* Setup the initial partition definition and fall into the sorting loop 797 */ 798 part_left = 0; 799 part_right = (int)(num_elts - 1); 800 #ifdef QSORT_ORDER_GUESS 801 qsort_break_even = QSORT_BREAK_EVEN; 802 #else 803 #define qsort_break_even QSORT_BREAK_EVEN 804 #endif 805 for ( ; ; ) { 806 if ((part_right - part_left) >= qsort_break_even) { 807 /* OK, this is gonna get hairy, so lets try to document all the 808 concepts and abbreviations and variables and what they keep 809 track of: 810 811 pc: pivot chunk - the set of array elements we accumulate in the 812 middle of the partition, all equal in value to the original 813 pivot element selected. The pc is defined by: 814 815 pc_left - the leftmost array index of the pc 816 pc_right - the rightmost array index of the pc 817 818 we start with pc_left == pc_right and only one element 819 in the pivot chunk (but it can grow during the scan). 820 821 u: uncompared elements - the set of elements in the partition 822 we have not yet compared to the pivot value. There are two 823 uncompared sets during the scan - one to the left of the pc 824 and one to the right. 825 826 u_right - the rightmost index of the left side's uncompared set 827 u_left - the leftmost index of the right side's uncompared set 828 829 The leftmost index of the left sides's uncompared set 830 doesn't need its own variable because it is always defined 831 by the leftmost edge of the whole partition (part_left). The 832 same goes for the rightmost edge of the right partition 833 (part_right). 834 835 We know there are no uncompared elements on the left once we 836 get u_right < part_left and no uncompared elements on the 837 right once u_left > part_right. When both these conditions 838 are met, we have completed the scan of the partition. 839 840 Any elements which are between the pivot chunk and the 841 uncompared elements should be less than the pivot value on 842 the left side and greater than the pivot value on the right 843 side (in fact, the goal of the whole algorithm is to arrange 844 for that to be true and make the groups of less-than and 845 greater-then elements into new partitions to sort again). 846 847 As you marvel at the complexity of the code and wonder why it 848 has to be so confusing. Consider some of the things this level 849 of confusion brings: 850 851 Once I do a compare, I squeeze every ounce of juice out of it. I 852 never do compare calls I don't have to do, and I certainly never 853 do redundant calls. 854 855 I also never swap any elements unless I can prove there is a 856 good reason. Many sort algorithms will swap a known value with 857 an uncompared value just to get things in the right place (or 858 avoid complexity :-), but that uncompared value, once it gets 859 compared, may then have to be swapped again. A lot of the 860 complexity of this code is due to the fact that it never swaps 861 anything except compared values, and it only swaps them when the 862 compare shows they are out of position. 863 */ 864 int pc_left, pc_right; 865 int u_right, u_left; 866 867 int s; 868 869 pc_left = ((part_left + part_right) / 2); 870 pc_right = pc_left; 871 u_right = pc_left - 1; 872 u_left = pc_right + 1; 873 874 /* Qsort works best when the pivot value is also the median value 875 in the partition (unfortunately you can't find the median value 876 without first sorting :-), so to give the algorithm a helping 877 hand, we pick 3 elements and sort them and use the median value 878 of that tiny set as the pivot value. 879 880 Some versions of qsort like to use the left middle and right as 881 the 3 elements to sort so they can insure the ends of the 882 partition will contain values which will stop the scan in the 883 compare loop, but when you have to call an arbitrarily complex 884 routine to do a compare, its really better to just keep track of 885 array index values to know when you hit the edge of the 886 partition and avoid the extra compare. An even better reason to 887 avoid using a compare call is the fact that you can drop off the 888 edge of the array if someone foolishly provides you with an 889 unstable compare function that doesn't always provide consistent 890 results. 891 892 So, since it is simpler for us to compare the three adjacent 893 elements in the middle of the partition, those are the ones we 894 pick here (conveniently pointed at by u_right, pc_left, and 895 u_left). The values of the left, center, and right elements 896 are refered to as l c and r in the following comments. 897 */ 898 899 #ifdef QSORT_ORDER_GUESS 900 swapped = 0; 901 #endif 902 s = qsort_cmp(u_right, pc_left); 903 if (s < 0) { 904 /* l < c */ 905 s = qsort_cmp(pc_left, u_left); 906 /* if l < c, c < r - already in order - nothing to do */ 907 if (s == 0) { 908 /* l < c, c == r - already in order, pc grows */ 909 ++pc_right; 910 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 911 } else if (s > 0) { 912 /* l < c, c > r - need to know more */ 913 s = qsort_cmp(u_right, u_left); 914 if (s < 0) { 915 /* l < c, c > r, l < r - swap c & r to get ordered */ 916 qsort_swap(pc_left, u_left); 917 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 918 } else if (s == 0) { 919 /* l < c, c > r, l == r - swap c&r, grow pc */ 920 qsort_swap(pc_left, u_left); 921 --pc_left; 922 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 923 } else { 924 /* l < c, c > r, l > r - make lcr into rlc to get ordered */ 925 qsort_rotate(pc_left, u_right, u_left); 926 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 927 } 928 } 929 } else if (s == 0) { 930 /* l == c */ 931 s = qsort_cmp(pc_left, u_left); 932 if (s < 0) { 933 /* l == c, c < r - already in order, grow pc */ 934 --pc_left; 935 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 936 } else if (s == 0) { 937 /* l == c, c == r - already in order, grow pc both ways */ 938 --pc_left; 939 ++pc_right; 940 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 941 } else { 942 /* l == c, c > r - swap l & r, grow pc */ 943 qsort_swap(u_right, u_left); 944 ++pc_right; 945 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 946 } 947 } else { 948 /* l > c */ 949 s = qsort_cmp(pc_left, u_left); 950 if (s < 0) { 951 /* l > c, c < r - need to know more */ 952 s = qsort_cmp(u_right, u_left); 953 if (s < 0) { 954 /* l > c, c < r, l < r - swap l & c to get ordered */ 955 qsort_swap(u_right, pc_left); 956 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 957 } else if (s == 0) { 958 /* l > c, c < r, l == r - swap l & c, grow pc */ 959 qsort_swap(u_right, pc_left); 960 ++pc_right; 961 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 962 } else { 963 /* l > c, c < r, l > r - rotate lcr into crl to order */ 964 qsort_rotate(u_right, pc_left, u_left); 965 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 966 } 967 } else if (s == 0) { 968 /* l > c, c == r - swap ends, grow pc */ 969 qsort_swap(u_right, u_left); 970 --pc_left; 971 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 972 } else { 973 /* l > c, c > r - swap ends to get in order */ 974 qsort_swap(u_right, u_left); 975 qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); 976 } 977 } 978 /* We now know the 3 middle elements have been compared and 979 arranged in the desired order, so we can shrink the uncompared 980 sets on both sides 981 */ 982 --u_right; 983 ++u_left; 984 qsort_all_asserts(pc_left, pc_right, u_left, u_right); 985 986 /* The above massive nested if was the simple part :-). We now have 987 the middle 3 elements ordered and we need to scan through the 988 uncompared sets on either side, swapping elements that are on 989 the wrong side or simply shuffling equal elements around to get 990 all equal elements into the pivot chunk. 991 */ 992 993 for ( ; ; ) { 994 int still_work_on_left; 995 int still_work_on_right; 996 997 /* Scan the uncompared values on the left. If I find a value 998 equal to the pivot value, move it over so it is adjacent to 999 the pivot chunk and expand the pivot chunk. If I find a value 1000 less than the pivot value, then just leave it - its already 1001 on the correct side of the partition. If I find a greater 1002 value, then stop the scan. 1003 */ 1004 while ((still_work_on_left = (u_right >= part_left))) { 1005 s = qsort_cmp(u_right, pc_left); 1006 if (s < 0) { 1007 --u_right; 1008 } else if (s == 0) { 1009 --pc_left; 1010 if (pc_left != u_right) { 1011 qsort_swap(u_right, pc_left); 1012 } 1013 --u_right; 1014 } else { 1015 break; 1016 } 1017 qsort_assert(u_right < pc_left); 1018 qsort_assert(pc_left <= pc_right); 1019 qsort_assert(qsort_cmp(u_right + 1, pc_left) <= 0); 1020 qsort_assert(qsort_cmp(pc_left, pc_right) == 0); 1021 } 1022 1023 /* Do a mirror image scan of uncompared values on the right 1024 */ 1025 while ((still_work_on_right = (u_left <= part_right))) { 1026 s = qsort_cmp(pc_right, u_left); 1027 if (s < 0) { 1028 ++u_left; 1029 } else if (s == 0) { 1030 ++pc_right; 1031 if (pc_right != u_left) { 1032 qsort_swap(pc_right, u_left); 1033 } 1034 ++u_left; 1035 } else { 1036 break; 1037 } 1038 qsort_assert(u_left > pc_right); 1039 qsort_assert(pc_left <= pc_right); 1040 qsort_assert(qsort_cmp(pc_right, u_left - 1) <= 0); 1041 qsort_assert(qsort_cmp(pc_left, pc_right) == 0); 1042 } 1043 1044 if (still_work_on_left) { 1045 /* I know I have a value on the left side which needs to be 1046 on the right side, but I need to know more to decide 1047 exactly the best thing to do with it. 1048 */ 1049 if (still_work_on_right) { 1050 /* I know I have values on both side which are out of 1051 position. This is a big win because I kill two birds 1052 with one swap (so to speak). I can advance the 1053 uncompared pointers on both sides after swapping both 1054 of them into the right place. 1055 */ 1056 qsort_swap(u_right, u_left); 1057 --u_right; 1058 ++u_left; 1059 qsort_all_asserts(pc_left, pc_right, u_left, u_right); 1060 } else { 1061 /* I have an out of position value on the left, but the 1062 right is fully scanned, so I "slide" the pivot chunk 1063 and any less-than values left one to make room for the 1064 greater value over on the right. If the out of position 1065 value is immediately adjacent to the pivot chunk (there 1066 are no less-than values), I can do that with a swap, 1067 otherwise, I have to rotate one of the less than values 1068 into the former position of the out of position value 1069 and the right end of the pivot chunk into the left end 1070 (got all that?). 1071 */ 1072 --pc_left; 1073 if (pc_left == u_right) { 1074 qsort_swap(u_right, pc_right); 1075 qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1); 1076 } else { 1077 qsort_rotate(u_right, pc_left, pc_right); 1078 qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1); 1079 } 1080 --pc_right; 1081 --u_right; 1082 } 1083 } else if (still_work_on_right) { 1084 /* Mirror image of complex case above: I have an out of 1085 position value on the right, but the left is fully 1086 scanned, so I need to shuffle things around to make room 1087 for the right value on the left. 1088 */ 1089 ++pc_right; 1090 if (pc_right == u_left) { 1091 qsort_swap(u_left, pc_left); 1092 qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right); 1093 } else { 1094 qsort_rotate(pc_right, pc_left, u_left); 1095 qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right); 1096 } 1097 ++pc_left; 1098 ++u_left; 1099 } else { 1100 /* No more scanning required on either side of partition, 1101 break out of loop and figure out next set of partitions 1102 */ 1103 break; 1104 } 1105 } 1106 1107 /* The elements in the pivot chunk are now in the right place. They 1108 will never move or be compared again. All I have to do is decide 1109 what to do with the stuff to the left and right of the pivot 1110 chunk. 1111 1112 Notes on the QSORT_ORDER_GUESS ifdef code: 1113 1114 1. If I just built these partitions without swapping any (or 1115 very many) elements, there is a chance that the elements are 1116 already ordered properly (being properly ordered will 1117 certainly result in no swapping, but the converse can't be 1118 proved :-). 1119 1120 2. A (properly written) insertion sort will run faster on 1121 already ordered data than qsort will. 1122 1123 3. Perhaps there is some way to make a good guess about 1124 switching to an insertion sort earlier than partition size 6 1125 (for instance - we could save the partition size on the stack 1126 and increase the size each time we find we didn't swap, thus 1127 switching to insertion sort earlier for partitions with a 1128 history of not swapping). 1129 1130 4. Naturally, if I just switch right away, it will make 1131 artificial benchmarks with pure ascending (or descending) 1132 data look really good, but is that a good reason in general? 1133 Hard to say... 1134 */ 1135 1136 #ifdef QSORT_ORDER_GUESS 1137 if (swapped < 3) { 1138 #if QSORT_ORDER_GUESS == 1 1139 qsort_break_even = (part_right - part_left) + 1; 1140 #endif 1141 #if QSORT_ORDER_GUESS == 2 1142 qsort_break_even *= 2; 1143 #endif 1144 #if QSORT_ORDER_GUESS == 3 1145 const int prev_break = qsort_break_even; 1146 qsort_break_even *= qsort_break_even; 1147 if (qsort_break_even < prev_break) { 1148 qsort_break_even = (part_right - part_left) + 1; 1149 } 1150 #endif 1151 } else { 1152 qsort_break_even = QSORT_BREAK_EVEN; 1153 } 1154 #endif 1155 1156 if (part_left < pc_left) { 1157 /* There are elements on the left which need more processing. 1158 Check the right as well before deciding what to do. 1159 */ 1160 if (pc_right < part_right) { 1161 /* We have two partitions to be sorted. Stack the biggest one 1162 and process the smallest one on the next iteration. This 1163 minimizes the stack height by insuring that any additional 1164 stack entries must come from the smallest partition which 1165 (because it is smallest) will have the fewest 1166 opportunities to generate additional stack entries. 1167 */ 1168 if ((part_right - pc_right) > (pc_left - part_left)) { 1169 /* stack the right partition, process the left */ 1170 partition_stack[next_stack_entry].left = pc_right + 1; 1171 partition_stack[next_stack_entry].right = part_right; 1172 #ifdef QSORT_ORDER_GUESS 1173 partition_stack[next_stack_entry].qsort_break_even = qsort_break_even; 1174 #endif 1175 part_right = pc_left - 1; 1176 } else { 1177 /* stack the left partition, process the right */ 1178 partition_stack[next_stack_entry].left = part_left; 1179 partition_stack[next_stack_entry].right = pc_left - 1; 1180 #ifdef QSORT_ORDER_GUESS 1181 partition_stack[next_stack_entry].qsort_break_even = qsort_break_even; 1182 #endif 1183 part_left = pc_right + 1; 1184 } 1185 qsort_assert(next_stack_entry < QSORT_MAX_STACK); 1186 ++next_stack_entry; 1187 } else { 1188 /* The elements on the left are the only remaining elements 1189 that need sorting, arrange for them to be processed as the 1190 next partition. 1191 */ 1192 part_right = pc_left - 1; 1193 } 1194 } else if (pc_right < part_right) { 1195 /* There is only one chunk on the right to be sorted, make it 1196 the new partition and loop back around. 1197 */ 1198 part_left = pc_right + 1; 1199 } else { 1200 /* This whole partition wound up in the pivot chunk, so 1201 we need to get a new partition off the stack. 1202 */ 1203 if (next_stack_entry == 0) { 1204 /* the stack is empty - we are done */ 1205 break; 1206 } 1207 --next_stack_entry; 1208 part_left = partition_stack[next_stack_entry].left; 1209 part_right = partition_stack[next_stack_entry].right; 1210 #ifdef QSORT_ORDER_GUESS 1211 qsort_break_even = partition_stack[next_stack_entry].qsort_break_even; 1212 #endif 1213 } 1214 } else { 1215 /* This partition is too small to fool with qsort complexity, just 1216 do an ordinary insertion sort to minimize overhead. 1217 */ 1218 int i; 1219 /* Assume 1st element is in right place already, and start checking 1220 at 2nd element to see where it should be inserted. 1221 */ 1222 for (i = part_left + 1; i <= part_right; ++i) { 1223 int j; 1224 /* Scan (backwards - just in case 'i' is already in right place) 1225 through the elements already sorted to see if the ith element 1226 belongs ahead of one of them. 1227 */ 1228 for (j = i - 1; j >= part_left; --j) { 1229 if (qsort_cmp(i, j) >= 0) { 1230 /* i belongs right after j 1231 */ 1232 break; 1233 } 1234 } 1235 ++j; 1236 if (j != i) { 1237 /* Looks like we really need to move some things 1238 */ 1239 int k; 1240 temp = array[i]; 1241 for (k = i - 1; k >= j; --k) 1242 array[k + 1] = array[k]; 1243 array[j] = temp; 1244 } 1245 } 1246 1247 /* That partition is now sorted, grab the next one, or get out 1248 of the loop if there aren't any more. 1249 */ 1250 1251 if (next_stack_entry == 0) { 1252 /* the stack is empty - we are done */ 1253 break; 1254 } 1255 --next_stack_entry; 1256 part_left = partition_stack[next_stack_entry].left; 1257 part_right = partition_stack[next_stack_entry].right; 1258 #ifdef QSORT_ORDER_GUESS 1259 qsort_break_even = partition_stack[next_stack_entry].qsort_break_even; 1260 #endif 1261 } 1262 } 1263 1264 /* Believe it or not, the array is sorted at this point! */ 1265 } 1266 1267 /* Stabilize what is, presumably, an otherwise unstable sort method. 1268 * We do that by allocating (or having on hand) an array of pointers 1269 * that is the same size as the original array of elements to be sorted. 1270 * We initialize this parallel array with the addresses of the original 1271 * array elements. This indirection can make you crazy. 1272 * Some pictures can help. After initializing, we have 1273 * 1274 * indir list1 1275 * +----+ +----+ 1276 * | | --------------> | | ------> first element to be sorted 1277 * +----+ +----+ 1278 * | | --------------> | | ------> second element to be sorted 1279 * +----+ +----+ 1280 * | | --------------> | | ------> third element to be sorted 1281 * +----+ +----+ 1282 * ... 1283 * +----+ +----+ 1284 * | | --------------> | | ------> n-1st element to be sorted 1285 * +----+ +----+ 1286 * | | --------------> | | ------> n-th element to be sorted 1287 * +----+ +----+ 1288 * 1289 * During the sort phase, we leave the elements of list1 where they are, 1290 * and sort the pointers in the indirect array in the same order determined 1291 * by the original comparison routine on the elements pointed to. 1292 * Because we don't move the elements of list1 around through 1293 * this phase, we can break ties on elements that compare equal 1294 * using their address in the list1 array, ensuring stability. 1295 * This leaves us with something looking like 1296 * 1297 * indir list1 1298 * +----+ +----+ 1299 * | | --+ +---> | | ------> first element to be sorted 1300 * +----+ | | +----+ 1301 * | | --|-------|---> | | ------> second element to be sorted 1302 * +----+ | | +----+ 1303 * | | --|-------+ +-> | | ------> third element to be sorted 1304 * +----+ | | +----+ 1305 * ... 1306 * +----+ | | | | +----+ 1307 * | | ---|-+ | +--> | | ------> n-1st element to be sorted 1308 * +----+ | | +----+ 1309 * | | ---+ +----> | | ------> n-th element to be sorted 1310 * +----+ +----+ 1311 * 1312 * where the i-th element of the indirect array points to the element 1313 * that should be i-th in the sorted array. After the sort phase, 1314 * we have to put the elements of list1 into the places 1315 * dictated by the indirect array. 1316 */ 1317 1318 1319 static I32 1320 cmpindir(pTHX_ gptr const a, gptr const b) 1321 { 1322 dVAR; 1323 gptr * const ap = (gptr *)a; 1324 gptr * const bp = (gptr *)b; 1325 const I32 sense = PL_sort_RealCmp(aTHX_ *ap, *bp); 1326 1327 if (sense) 1328 return sense; 1329 return (ap > bp) ? 1 : ((ap < bp) ? -1 : 0); 1330 } 1331 1332 static I32 1333 cmpindir_desc(pTHX_ gptr const a, gptr const b) 1334 { 1335 dVAR; 1336 gptr * const ap = (gptr *)a; 1337 gptr * const bp = (gptr *)b; 1338 const I32 sense = PL_sort_RealCmp(aTHX_ *ap, *bp); 1339 1340 /* Reverse the default */ 1341 if (sense) 1342 return -sense; 1343 /* But don't reverse the stability test. */ 1344 return (ap > bp) ? 1 : ((ap < bp) ? -1 : 0); 1345 1346 } 1347 1348 STATIC void 1349 S_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 1350 { 1351 dVAR; 1352 if ((flags & SORTf_STABLE) != 0) { 1353 gptr **pp, *q; 1354 size_t n, j, i; 1355 gptr *small[SMALLSORT], **indir, tmp; 1356 SVCOMPARE_t savecmp; 1357 if (nmemb <= 1) return; /* sorted trivially */ 1358 1359 /* Small arrays can use the stack, big ones must be allocated */ 1360 if (nmemb <= SMALLSORT) indir = small; 1361 else { Newx(indir, nmemb, gptr *); } 1362 1363 /* Copy pointers to original array elements into indirect array */ 1364 for (n = nmemb, pp = indir, q = list1; n--; ) *pp++ = q++; 1365 1366 savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ 1367 PL_sort_RealCmp = cmp; /* Put comparison routine where cmpindir can find it */ 1368 1369 /* sort, with indirection */ 1370 if (flags & SORTf_DESC) 1371 qsortsvu((gptr *)indir, nmemb, cmpindir_desc); 1372 else 1373 qsortsvu((gptr *)indir, nmemb, cmpindir); 1374 1375 pp = indir; 1376 q = list1; 1377 for (n = nmemb; n--; ) { 1378 /* Assert A: all elements of q with index > n are already 1379 * in place. This is vacuously true at the start, and we 1380 * put element n where it belongs below (if it wasn't 1381 * already where it belonged). Assert B: we only move 1382 * elements that aren't where they belong, 1383 * so, by A, we never tamper with elements above n. 1384 */ 1385 j = pp[n] - q; /* This sets j so that q[j] is 1386 * at pp[n]. *pp[j] belongs in 1387 * q[j], by construction. 1388 */ 1389 if (n != j) { /* all's well if n == j */ 1390 tmp = q[j]; /* save what's in q[j] */ 1391 do { 1392 q[j] = *pp[j]; /* put *pp[j] where it belongs */ 1393 i = pp[j] - q; /* the index in q of the element 1394 * just moved */ 1395 pp[j] = q + j; /* this is ok now */ 1396 } while ((j = i) != n); 1397 /* There are only finitely many (nmemb) addresses 1398 * in the pp array. 1399 * So we must eventually revisit an index we saw before. 1400 * Suppose the first revisited index is k != n. 1401 * An index is visited because something else belongs there. 1402 * If we visit k twice, then two different elements must 1403 * belong in the same place, which cannot be. 1404 * So j must get back to n, the loop terminates, 1405 * and we put the saved element where it belongs. 1406 */ 1407 q[n] = tmp; /* put what belongs into 1408 * the n-th element */ 1409 } 1410 } 1411 1412 /* free iff allocated */ 1413 if (indir != small) { Safefree(indir); } 1414 /* restore prevailing comparison routine */ 1415 PL_sort_RealCmp = savecmp; 1416 } else if ((flags & SORTf_DESC) != 0) { 1417 const SVCOMPARE_t savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ 1418 PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */ 1419 cmp = cmp_desc; 1420 qsortsvu(list1, nmemb, cmp); 1421 /* restore prevailing comparison routine */ 1422 PL_sort_RealCmp = savecmp; 1423 } else { 1424 qsortsvu(list1, nmemb, cmp); 1425 } 1426 } 1427 1428 /* 1429 =head1 Array Manipulation Functions 1430 1431 =for apidoc sortsv 1432 1433 Sort an array. Here is an example: 1434 1435 sortsv(AvARRAY(av), av_top_index(av)+1, Perl_sv_cmp_locale); 1436 1437 Currently this always uses mergesort. See sortsv_flags for a more 1438 flexible routine. 1439 1440 =cut 1441 */ 1442 1443 void 1444 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) 1445 { 1446 PERL_ARGS_ASSERT_SORTSV; 1447 1448 sortsv_flags(array, nmemb, cmp, 0); 1449 } 1450 1451 /* 1452 =for apidoc sortsv_flags 1453 1454 Sort an array, with various options. 1455 1456 =cut 1457 */ 1458 void 1459 Perl_sortsv_flags(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 1460 { 1461 PERL_ARGS_ASSERT_SORTSV_FLAGS; 1462 1463 if (flags & SORTf_QSORT) 1464 S_qsortsv(aTHX_ array, nmemb, cmp, flags); 1465 else 1466 S_mergesortsv(aTHX_ array, nmemb, cmp, flags); 1467 } 1468 1469 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) 1470 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) 1471 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) 1472 1473 PP(pp_sort) 1474 { 1475 dVAR; dSP; dMARK; dORIGMARK; 1476 SV **p1 = ORIGMARK+1, **p2; 1477 I32 max, i; 1478 AV* av = NULL; 1479 HV *stash; 1480 GV *gv; 1481 CV *cv = NULL; 1482 I32 gimme = GIMME; 1483 OP* const nextop = PL_op->op_next; 1484 I32 overloading = 0; 1485 bool hasargs = FALSE; 1486 I32 is_xsub = 0; 1487 I32 sorting_av = 0; 1488 const U8 priv = PL_op->op_private; 1489 const U8 flags = PL_op->op_flags; 1490 U32 sort_flags = 0; 1491 void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 1492 = Perl_sortsv_flags; 1493 I32 all_SIVs = 1; 1494 1495 if ((priv & OPpSORT_DESCEND) != 0) 1496 sort_flags |= SORTf_DESC; 1497 if ((priv & OPpSORT_QSORT) != 0) 1498 sort_flags |= SORTf_QSORT; 1499 if ((priv & OPpSORT_STABLE) != 0) 1500 sort_flags |= SORTf_STABLE; 1501 1502 if (gimme != G_ARRAY) { 1503 SP = MARK; 1504 EXTEND(SP,1); 1505 RETPUSHUNDEF; 1506 } 1507 1508 ENTER; 1509 SAVEVPTR(PL_sortcop); 1510 if (flags & OPf_STACKED) { 1511 if (flags & OPf_SPECIAL) { 1512 OP *kid = cLISTOP->op_first->op_sibling; /* pass pushmark */ 1513 kid = kUNOP->op_first; /* pass rv2gv */ 1514 kid = kUNOP->op_first; /* pass leave */ 1515 PL_sortcop = kid->op_next; 1516 stash = CopSTASH(PL_curcop); 1517 } 1518 else { 1519 GV *autogv = NULL; 1520 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD); 1521 check_cv: 1522 if (cv && SvPOK(cv)) { 1523 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); 1524 if (proto && strEQ(proto, "$$")) { 1525 hasargs = TRUE; 1526 } 1527 } 1528 if (cv && CvISXSUB(cv) && CvXSUB(cv)) { 1529 is_xsub = 1; 1530 } 1531 else if (!(cv && CvROOT(cv))) { 1532 if (gv) { 1533 goto autoload; 1534 } 1535 else if (!CvANON(cv) && (gv = CvGV(cv))) { 1536 if (cv != GvCV(gv)) cv = GvCV(gv); 1537 autoload: 1538 if (!autogv && ( 1539 autogv = gv_autoload_pvn( 1540 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), 1541 GvNAMEUTF8(gv) ? SVf_UTF8 : 0 1542 ) 1543 )) { 1544 cv = GvCVu(autogv); 1545 goto check_cv; 1546 } 1547 else { 1548 SV *tmpstr = sv_newmortal(); 1549 gv_efullname3(tmpstr, gv, NULL); 1550 DIE(aTHX_ "Undefined sort subroutine \"%"SVf"\" called", 1551 SVfARG(tmpstr)); 1552 } 1553 } 1554 else { 1555 DIE(aTHX_ "Undefined subroutine in sort"); 1556 } 1557 } 1558 1559 if (is_xsub) 1560 PL_sortcop = (OP*)cv; 1561 else 1562 PL_sortcop = CvSTART(cv); 1563 } 1564 } 1565 else { 1566 PL_sortcop = NULL; 1567 stash = CopSTASH(PL_curcop); 1568 } 1569 1570 /* optimiser converts "@a = sort @a" to "sort \@a"; 1571 * in case of tied @a, pessimise: push (@a) onto stack, then assign 1572 * result back to @a at the end of this function */ 1573 if (priv & OPpSORT_INPLACE) { 1574 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV); 1575 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ 1576 av = MUTABLE_AV((*SP)); 1577 max = AvFILL(av) + 1; 1578 if (SvMAGICAL(av)) { 1579 MEXTEND(SP, max); 1580 for (i=0; i < max; i++) { 1581 SV **svp = av_fetch(av, i, FALSE); 1582 *SP++ = (svp) ? *svp : NULL; 1583 } 1584 SP--; 1585 p1 = p2 = SP - (max-1); 1586 } 1587 else { 1588 if (SvREADONLY(av)) 1589 Perl_croak_no_modify(); 1590 else 1591 SvREADONLY_on(av); 1592 p1 = p2 = AvARRAY(av); 1593 sorting_av = 1; 1594 } 1595 } 1596 else { 1597 p2 = MARK+1; 1598 max = SP - MARK; 1599 } 1600 1601 /* shuffle stack down, removing optional initial cv (p1!=p2), plus 1602 * any nulls; also stringify or converting to integer or number as 1603 * required any args */ 1604 for (i=max; i > 0 ; i--) { 1605 if ((*p1 = *p2++)) { /* Weed out nulls. */ 1606 SvTEMP_off(*p1); 1607 if (!PL_sortcop) { 1608 if (priv & OPpSORT_NUMERIC) { 1609 if (priv & OPpSORT_INTEGER) { 1610 if (!SvIOK(*p1)) 1611 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 1612 } 1613 else { 1614 if (!SvNSIOK(*p1)) 1615 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 1616 if (all_SIVs && !SvSIOK(*p1)) 1617 all_SIVs = 0; 1618 } 1619 } 1620 else { 1621 if (!SvPOK(*p1)) 1622 (void)sv_2pv_flags(*p1, 0, 1623 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); 1624 } 1625 if (SvAMAGIC(*p1)) 1626 overloading = 1; 1627 } 1628 p1++; 1629 } 1630 else 1631 max--; 1632 } 1633 if (sorting_av) 1634 AvFILLp(av) = max-1; 1635 1636 if (max > 1) { 1637 SV **start; 1638 if (PL_sortcop) { 1639 PERL_CONTEXT *cx; 1640 SV** newsp; 1641 const bool oldcatch = CATCH_GET; 1642 1643 SAVETMPS; 1644 SAVEOP(); 1645 1646 CATCH_SET(TRUE); 1647 PUSHSTACKi(PERLSI_SORT); 1648 if (!hasargs && !is_xsub) { 1649 SAVESPTR(PL_firstgv); 1650 SAVESPTR(PL_secondgv); 1651 SAVESPTR(PL_sortstash); 1652 PL_firstgv = gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV); 1653 PL_secondgv = gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV); 1654 PL_sortstash = stash; 1655 SAVESPTR(GvSV(PL_firstgv)); 1656 SAVESPTR(GvSV(PL_secondgv)); 1657 } 1658 1659 PUSHBLOCK(cx, CXt_NULL, PL_stack_base); 1660 if (!(flags & OPf_SPECIAL)) { 1661 cx->cx_type = CXt_SUB; 1662 cx->blk_gimme = G_SCALAR; 1663 /* If our comparison routine is already active (CvDEPTH is 1664 * is not 0), then PUSHSUB does not increase the refcount, 1665 * so we have to do it ourselves, because the LEAVESUB fur- 1666 * ther down lowers it. */ 1667 if (CvDEPTH(cv)) SvREFCNT_inc_simple_void_NN(cv); 1668 PUSHSUB(cx); 1669 if (!is_xsub) { 1670 PADLIST * const padlist = CvPADLIST(cv); 1671 1672 if (++CvDEPTH(cv) >= 2) { 1673 PERL_STACK_OVERFLOW_CHECK(); 1674 pad_push(padlist, CvDEPTH(cv)); 1675 } 1676 SAVECOMPPAD(); 1677 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); 1678 1679 if (hasargs) { 1680 /* This is mostly copied from pp_entersub */ 1681 AV * const av = MUTABLE_AV(PAD_SVl(0)); 1682 1683 cx->blk_sub.savearray = GvAV(PL_defgv); 1684 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av)); 1685 CX_CURPAD_SAVE(cx->blk_sub); 1686 cx->blk_sub.argarray = av; 1687 } 1688 1689 } 1690 } 1691 cx->cx_type |= CXp_MULTICALL; 1692 1693 start = p1 - max; 1694 sortsvp(aTHX_ start, max, 1695 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), 1696 sort_flags); 1697 1698 if (!(flags & OPf_SPECIAL)) { 1699 SV *sv; 1700 /* Reset cx, in case the context stack has been 1701 reallocated. */ 1702 cx = &cxstack[cxstack_ix]; 1703 POPSUB(cx, sv); 1704 LEAVESUB(sv); 1705 } 1706 POPBLOCK(cx,PL_curpm); 1707 PL_stack_sp = newsp; 1708 POPSTACK; 1709 CATCH_SET(oldcatch); 1710 } 1711 else { 1712 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */ 1713 start = sorting_av ? AvARRAY(av) : ORIGMARK+1; 1714 sortsvp(aTHX_ start, max, 1715 (priv & OPpSORT_NUMERIC) 1716 ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs) 1717 ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp) 1718 : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) ) 1719 : ( IN_LOCALE_RUNTIME 1720 ? ( overloading 1721 ? (SVCOMPARE_t)S_amagic_cmp_locale 1722 : (SVCOMPARE_t)sv_cmp_locale_static) 1723 : ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)), 1724 sort_flags); 1725 } 1726 if ((priv & OPpSORT_REVERSE) != 0) { 1727 SV **q = start+max-1; 1728 while (start < q) { 1729 SV * const tmp = *start; 1730 *start++ = *q; 1731 *q-- = tmp; 1732 } 1733 } 1734 } 1735 if (sorting_av) 1736 SvREADONLY_off(av); 1737 else if (av && !sorting_av) { 1738 /* simulate pp_aassign of tied AV */ 1739 SV** const base = MARK+1; 1740 for (i=0; i < max; i++) { 1741 base[i] = newSVsv(base[i]); 1742 } 1743 av_clear(av); 1744 av_extend(av, max); 1745 for (i=0; i < max; i++) { 1746 SV * const sv = base[i]; 1747 SV ** const didstore = av_store(av, i, sv); 1748 if (SvSMAGICAL(sv)) 1749 mg_set(sv); 1750 if (!didstore) 1751 sv_2mortal(sv); 1752 } 1753 } 1754 LEAVE; 1755 PL_stack_sp = ORIGMARK + (sorting_av ? 0 : max); 1756 return nextop; 1757 } 1758 1759 static I32 1760 S_sortcv(pTHX_ SV *const a, SV *const b) 1761 { 1762 dVAR; 1763 const I32 oldsaveix = PL_savestack_ix; 1764 const I32 oldscopeix = PL_scopestack_ix; 1765 I32 result; 1766 SV *resultsv; 1767 PMOP * const pm = PL_curpm; 1768 OP * const sortop = PL_op; 1769 COP * const cop = PL_curcop; 1770 1771 PERL_ARGS_ASSERT_SORTCV; 1772 1773 GvSV(PL_firstgv) = a; 1774 GvSV(PL_secondgv) = b; 1775 PL_stack_sp = PL_stack_base; 1776 PL_op = PL_sortcop; 1777 CALLRUNOPS(aTHX); 1778 PL_op = sortop; 1779 PL_curcop = cop; 1780 if (PL_stack_sp != PL_stack_base + 1) { 1781 assert(PL_stack_sp == PL_stack_base); 1782 resultsv = &PL_sv_undef; 1783 } 1784 else resultsv = *PL_stack_sp; 1785 if (SvNIOK_nog(resultsv)) result = SvIV(resultsv); 1786 else { 1787 ENTER; 1788 SAVEVPTR(PL_curpad); 1789 PL_curpad = 0; 1790 result = SvIV(resultsv); 1791 LEAVE; 1792 } 1793 while (PL_scopestack_ix > oldscopeix) { 1794 LEAVE; 1795 } 1796 leave_scope(oldsaveix); 1797 PL_curpm = pm; 1798 return result; 1799 } 1800 1801 static I32 1802 S_sortcv_stacked(pTHX_ SV *const a, SV *const b) 1803 { 1804 dVAR; 1805 const I32 oldsaveix = PL_savestack_ix; 1806 const I32 oldscopeix = PL_scopestack_ix; 1807 I32 result; 1808 AV * const av = GvAV(PL_defgv); 1809 PMOP * const pm = PL_curpm; 1810 OP * const sortop = PL_op; 1811 COP * const cop = PL_curcop; 1812 SV **pad; 1813 1814 PERL_ARGS_ASSERT_SORTCV_STACKED; 1815 1816 if (AvREAL(av)) { 1817 av_clear(av); 1818 AvREAL_off(av); 1819 AvREIFY_on(av); 1820 } 1821 if (AvMAX(av) < 1) { 1822 SV **ary = AvALLOC(av); 1823 if (AvARRAY(av) != ary) { 1824 AvMAX(av) += AvARRAY(av) - AvALLOC(av); 1825 AvARRAY(av) = ary; 1826 } 1827 if (AvMAX(av) < 1) { 1828 AvMAX(av) = 1; 1829 Renew(ary,2,SV*); 1830 AvARRAY(av) = ary; 1831 AvALLOC(av) = ary; 1832 } 1833 } 1834 AvFILLp(av) = 1; 1835 1836 AvARRAY(av)[0] = a; 1837 AvARRAY(av)[1] = b; 1838 PL_stack_sp = PL_stack_base; 1839 PL_op = PL_sortcop; 1840 CALLRUNOPS(aTHX); 1841 PL_op = sortop; 1842 PL_curcop = cop; 1843 pad = PL_curpad; PL_curpad = 0; 1844 if (PL_stack_sp != PL_stack_base + 1) { 1845 assert(PL_stack_sp == PL_stack_base); 1846 result = SvIV(&PL_sv_undef); 1847 } 1848 else result = SvIV(*PL_stack_sp); 1849 PL_curpad = pad; 1850 while (PL_scopestack_ix > oldscopeix) { 1851 LEAVE; 1852 } 1853 leave_scope(oldsaveix); 1854 PL_curpm = pm; 1855 return result; 1856 } 1857 1858 static I32 1859 S_sortcv_xsub(pTHX_ SV *const a, SV *const b) 1860 { 1861 dVAR; dSP; 1862 const I32 oldsaveix = PL_savestack_ix; 1863 const I32 oldscopeix = PL_scopestack_ix; 1864 CV * const cv=MUTABLE_CV(PL_sortcop); 1865 I32 result; 1866 PMOP * const pm = PL_curpm; 1867 1868 PERL_ARGS_ASSERT_SORTCV_XSUB; 1869 1870 SP = PL_stack_base; 1871 PUSHMARK(SP); 1872 EXTEND(SP, 2); 1873 *++SP = a; 1874 *++SP = b; 1875 PUTBACK; 1876 (void)(*CvXSUB(cv))(aTHX_ cv); 1877 if (PL_stack_sp != PL_stack_base + 1) 1878 Perl_croak(aTHX_ "Sort subroutine didn't return single value"); 1879 result = SvIV(*PL_stack_sp); 1880 while (PL_scopestack_ix > oldscopeix) { 1881 LEAVE; 1882 } 1883 leave_scope(oldsaveix); 1884 PL_curpm = pm; 1885 return result; 1886 } 1887 1888 1889 static I32 1890 S_sv_ncmp(pTHX_ SV *const a, SV *const b) 1891 { 1892 const NV nv1 = SvNSIV(a); 1893 const NV nv2 = SvNSIV(b); 1894 1895 PERL_ARGS_ASSERT_SV_NCMP; 1896 1897 #if defined(NAN_COMPARE_BROKEN) && defined(Perl_isnan) 1898 if (Perl_isnan(nv1) || Perl_isnan(nv2)) { 1899 #else 1900 if (nv1 != nv1 || nv2 != nv2) { 1901 #endif 1902 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); 1903 return 0; 1904 } 1905 return nv1 < nv2 ? -1 : nv1 > nv2 ? 1 : 0; 1906 } 1907 1908 static I32 1909 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) 1910 { 1911 const IV iv1 = SvIV(a); 1912 const IV iv2 = SvIV(b); 1913 1914 PERL_ARGS_ASSERT_SV_I_NCMP; 1915 1916 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; 1917 } 1918 1919 #define tryCALL_AMAGICbin(left,right,meth) \ 1920 (SvAMAGIC(left)||SvAMAGIC(right)) \ 1921 ? amagic_call(left, right, meth, 0) \ 1922 : NULL; 1923 1924 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) 1925 1926 static I32 1927 S_amagic_ncmp(pTHX_ SV *const a, SV *const b) 1928 { 1929 dVAR; 1930 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1931 1932 PERL_ARGS_ASSERT_AMAGIC_NCMP; 1933 1934 if (tmpsv) { 1935 if (SvIOK(tmpsv)) { 1936 const I32 i = SvIVX(tmpsv); 1937 return SORT_NORMAL_RETURN_VALUE(i); 1938 } 1939 else { 1940 const NV d = SvNV(tmpsv); 1941 return SORT_NORMAL_RETURN_VALUE(d); 1942 } 1943 } 1944 return S_sv_ncmp(aTHX_ a, b); 1945 } 1946 1947 static I32 1948 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) 1949 { 1950 dVAR; 1951 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1952 1953 PERL_ARGS_ASSERT_AMAGIC_I_NCMP; 1954 1955 if (tmpsv) { 1956 if (SvIOK(tmpsv)) { 1957 const I32 i = SvIVX(tmpsv); 1958 return SORT_NORMAL_RETURN_VALUE(i); 1959 } 1960 else { 1961 const NV d = SvNV(tmpsv); 1962 return SORT_NORMAL_RETURN_VALUE(d); 1963 } 1964 } 1965 return S_sv_i_ncmp(aTHX_ a, b); 1966 } 1967 1968 static I32 1969 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) 1970 { 1971 dVAR; 1972 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1973 1974 PERL_ARGS_ASSERT_AMAGIC_CMP; 1975 1976 if (tmpsv) { 1977 if (SvIOK(tmpsv)) { 1978 const I32 i = SvIVX(tmpsv); 1979 return SORT_NORMAL_RETURN_VALUE(i); 1980 } 1981 else { 1982 const NV d = SvNV(tmpsv); 1983 return SORT_NORMAL_RETURN_VALUE(d); 1984 } 1985 } 1986 return sv_cmp(str1, str2); 1987 } 1988 1989 static I32 1990 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) 1991 { 1992 dVAR; 1993 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1994 1995 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; 1996 1997 if (tmpsv) { 1998 if (SvIOK(tmpsv)) { 1999 const I32 i = SvIVX(tmpsv); 2000 return SORT_NORMAL_RETURN_VALUE(i); 2001 } 2002 else { 2003 const NV d = SvNV(tmpsv); 2004 return SORT_NORMAL_RETURN_VALUE(d); 2005 } 2006 } 2007 return sv_cmp_locale(str1, str2); 2008 } 2009 2010 /* 2011 * Local variables: 2012 * c-indentation-style: bsd 2013 * c-basic-offset: 4 2014 * indent-tabs-mode: nil 2015 * End: 2016 * 2017 * ex: set ts=8 sts=4 sw=4 et: 2018 */ 2019