1 /** 2 * This module contains all functions related to an object's lifetime: 3 * allocation, resizing, deallocation, and finalization. 4 * 5 * Copyright: Copyright Digital Mars 2000 - 2012. 6 * License: Distributed under the 7 * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0). 8 * (See accompanying file LICENSE) 9 * Authors: Walter Bright, Sean Kelly, Steven Schveighoffer 10 * Source: $(DRUNTIMESRC src/rt/_lifetime.d) 11 */ 12 13 module rt.lifetime; 14 15 import core.stdc.stdlib; 16 import core.stdc.string; 17 import core.stdc.stdarg; 18 import core.bitop; 19 import core.memory; 20 debug(PRINTF) import core.stdc.stdio; 21 static import rt.tlsgc; 22 23 alias BlkInfo = GC.BlkInfo; 24 alias BlkAttr = GC.BlkAttr; 25 import core.exception : onOutOfMemoryError, onFinalizeError, onInvalidMemoryOperationError; 26 27 private 28 { 29 alias bool function(Object) CollectHandler; 30 __gshared CollectHandler collectHandler = null; 31 32 extern (C) void _d_monitordelete(Object h, bool det); 33 34 enum : size_t 35 { 36 PAGESIZE = 4096, 37 BIGLENGTHMASK = ~(PAGESIZE - 1), 38 SMALLPAD = 1, 39 MEDPAD = ushort.sizeof, 40 LARGEPREFIX = 16, // 16 bytes padding at the front of the array 41 LARGEPAD = LARGEPREFIX + 1, 42 MAXSMALLSIZE = 256-SMALLPAD, 43 MAXMEDSIZE = (PAGESIZE / 2) - MEDPAD 44 } 45 } 46 47 private immutable bool callStructDtorsDuringGC; 48 49 extern (C) void lifetime_init() 50 { 51 // this is run before static ctors, so it is safe to modify immutables 52 import rt.config; 53 string s = rt_configOption("callStructDtorsDuringGC"); 54 if (s != null) 55 cast() callStructDtorsDuringGC = s[0] == '1' || s[0] == 'y' || s[0] == 'Y'; 56 else 57 cast() callStructDtorsDuringGC = true; 58 } 59 60 /** 61 * 62 */ 63 extern (C) void* _d_allocmemory(size_t sz) 64 { 65 return GC.malloc(sz); 66 } 67 68 /** 69 * 70 */ 71 extern (C) Object _d_newclass(const ClassInfo ci) 72 { 73 void* p; 74 75 debug(PRINTF) printf("_d_newclass(ci = %p, %s)\n", ci, cast(char *)ci.name); 76 if (ci.m_flags & TypeInfo_Class.ClassFlags.isCOMclass) 77 { /* COM objects are not garbage collected, they are reference counted 78 * using AddRef() and Release(). They get free'd by C's free() 79 * function called by Release() when Release()'s reference count goes 80 * to zero. 81 */ 82 p = malloc(ci.initializer.length); 83 if (!p) 84 onOutOfMemoryError(); 85 } 86 else 87 { 88 // TODO: should this be + 1 to avoid having pointers to the next block? 89 BlkAttr attr = BlkAttr.NONE; 90 // extern(C++) classes don't have a classinfo pointer in their vtable so the GC can't finalize them 91 if (ci.m_flags & TypeInfo_Class.ClassFlags.hasDtor 92 && !(ci.m_flags & TypeInfo_Class.ClassFlags.isCPPclass)) 93 attr |= BlkAttr.FINALIZE; 94 if (ci.m_flags & TypeInfo_Class.ClassFlags.noPointers) 95 attr |= BlkAttr.NO_SCAN; 96 p = GC.malloc(ci.initializer.length, attr, ci); 97 debug(PRINTF) printf(" p = %p\n", p); 98 } 99 100 debug(PRINTF) 101 { 102 printf("p = %p\n", p); 103 printf("ci = %p, ci.init.ptr = %p, len = %llu\n", ci, ci.initializer.ptr, cast(ulong)ci.initializer.length); 104 printf("vptr = %p\n", *cast(void**) ci.initializer); 105 printf("vtbl[0] = %p\n", (*cast(void***) ci.initializer)[0]); 106 printf("vtbl[1] = %p\n", (*cast(void***) ci.initializer)[1]); 107 printf("init[0] = %x\n", (cast(uint*) ci.initializer)[0]); 108 printf("init[1] = %x\n", (cast(uint*) ci.initializer)[1]); 109 printf("init[2] = %x\n", (cast(uint*) ci.initializer)[2]); 110 printf("init[3] = %x\n", (cast(uint*) ci.initializer)[3]); 111 printf("init[4] = %x\n", (cast(uint*) ci.initializer)[4]); 112 } 113 114 // initialize it 115 p[0 .. ci.initializer.length] = ci.initializer[]; 116 117 debug(PRINTF) printf("initialization done\n"); 118 return cast(Object) p; 119 } 120 121 122 /** 123 * 124 */ 125 extern (C) void _d_delinterface(void** p) 126 { 127 if (*p) 128 { 129 Interface* pi = **cast(Interface ***)*p; 130 Object o = cast(Object)(*p - pi.offset); 131 132 _d_delclass(&o); 133 *p = null; 134 } 135 } 136 137 138 // used for deletion 139 private extern (D) alias void function (Object) fp_t; 140 141 142 /** 143 * 144 */ 145 extern (C) void _d_delclass(Object* p) 146 { 147 if (*p) 148 { 149 debug(PRINTF) printf("_d_delclass(%p)\n", *p); 150 151 ClassInfo **pc = cast(ClassInfo **)*p; 152 if (*pc) 153 { 154 ClassInfo c = **pc; 155 156 rt_finalize(cast(void*) *p); 157 158 if (c.deallocator) 159 { 160 fp_t fp = cast(fp_t)c.deallocator; 161 (*fp)(*p); // call deallocator 162 *p = null; 163 return; 164 } 165 } 166 else 167 { 168 rt_finalize(cast(void*) *p); 169 } 170 GC.free(cast(void*) *p); 171 *p = null; 172 } 173 } 174 175 /** 176 * This is called for a delete statement where the value 177 * being deleted is a pointer to a struct with a destructor 178 * but doesn't have an overloaded delete operator. 179 */ 180 extern (C) void _d_delstruct(void** p, TypeInfo_Struct inf) 181 { 182 if (*p) 183 { 184 debug(PRINTF) printf("_d_delstruct(%p, %p)\n", *p, cast(void*)inf); 185 186 inf.destroy(*p); 187 GC.free(*p); 188 *p = null; 189 } 190 } 191 192 // strip const/immutable/shared/inout from type info 193 inout(TypeInfo) unqualify(inout(TypeInfo) cti) pure nothrow @nogc 194 { 195 TypeInfo ti = cast() cti; 196 while (ti) 197 { 198 // avoid dynamic type casts 199 auto tti = typeid(ti); 200 if (tti is typeid(TypeInfo_Const)) 201 ti = (cast(TypeInfo_Const)cast(void*)ti).base; 202 else if (tti is typeid(TypeInfo_Invariant)) 203 ti = (cast(TypeInfo_Invariant)cast(void*)ti).base; 204 else if (tti is typeid(TypeInfo_Shared)) 205 ti = (cast(TypeInfo_Shared)cast(void*)ti).base; 206 else if (tti is typeid(TypeInfo_Inout)) 207 ti = (cast(TypeInfo_Inout)cast(void*)ti).base; 208 else 209 break; 210 } 211 return ti; 212 } 213 214 // size used to store the TypeInfo at the end of an allocation for structs that have a destructor 215 size_t structTypeInfoSize(const TypeInfo ti) pure nothrow @nogc 216 { 217 if (!callStructDtorsDuringGC) 218 return 0; 219 220 if (ti && typeid(ti) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 221 { 222 auto sti = cast(TypeInfo_Struct)cast(void*)ti; 223 if (sti.xdtor) 224 return size_t.sizeof; 225 } 226 return 0; 227 } 228 229 /** dummy class used to lock for shared array appending */ 230 private class ArrayAllocLengthLock 231 {} 232 233 234 /** 235 Set the allocated length of the array block. This is called 236 any time an array is appended to or its length is set. 237 238 The allocated block looks like this for blocks < PAGESIZE: 239 240 |elem0|elem1|elem2|...|elemN-1|emptyspace|N*elemsize| 241 242 243 The size of the allocated length at the end depends on the block size: 244 245 a block of 16 to 256 bytes has an 8-bit length. 246 247 a block with 512 to pagesize/2 bytes has a 16-bit length. 248 249 For blocks >= pagesize, the length is a size_t and is at the beginning of the 250 block. The reason we have to do this is because the block can extend into 251 more pages, so we cannot trust the block length if it sits at the end of the 252 block, because it might have just been extended. If we can prove in the 253 future that the block is unshared, we may be able to change this, but I'm not 254 sure it's important. 255 256 In order to do put the length at the front, we have to provide 16 bytes 257 buffer space in case the block has to be aligned properly. In x86, certain 258 SSE instructions will only work if the data is 16-byte aligned. In addition, 259 we need the sentinel byte to prevent accidental pointers to the next block. 260 Because of the extra overhead, we only do this for page size and above, where 261 the overhead is minimal compared to the block size. 262 263 So for those blocks, it looks like: 264 265 |N*elemsize|padding|elem0|elem1|...|elemN-1|emptyspace|sentinelbyte| 266 267 where elem0 starts 16 bytes after the first byte. 268 */ 269 bool __setArrayAllocLength(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength = ~0) pure nothrow 270 { 271 import core.atomic; 272 273 size_t typeInfoSize = structTypeInfoSize(tinext); 274 275 if (info.size <= 256) 276 { 277 import core.checkedint; 278 279 bool overflow; 280 auto newlength_padded = addu(newlength, 281 addu(SMALLPAD, typeInfoSize, overflow), 282 overflow); 283 284 if (newlength_padded > info.size || overflow) 285 // new size does not fit inside block 286 return false; 287 288 auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD); 289 if (oldlength != ~0) 290 { 291 if (isshared) 292 { 293 return cas(cast(shared)length, cast(ubyte)oldlength, cast(ubyte)newlength); 294 } 295 else 296 { 297 if (*length == cast(ubyte)oldlength) 298 *length = cast(ubyte)newlength; 299 else 300 return false; 301 } 302 } 303 else 304 { 305 // setting the initial length, no cas needed 306 *length = cast(ubyte)newlength; 307 } 308 if (typeInfoSize) 309 { 310 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 311 *typeInfo = cast() tinext; 312 } 313 } 314 else if (info.size < PAGESIZE) 315 { 316 if (newlength + MEDPAD + typeInfoSize > info.size) 317 // new size does not fit inside block 318 return false; 319 auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD); 320 if (oldlength != ~0) 321 { 322 if (isshared) 323 { 324 return cas(cast(shared)length, cast(ushort)oldlength, cast(ushort)newlength); 325 } 326 else 327 { 328 if (*length == oldlength) 329 *length = cast(ushort)newlength; 330 else 331 return false; 332 } 333 } 334 else 335 { 336 // setting the initial length, no cas needed 337 *length = cast(ushort)newlength; 338 } 339 if (typeInfoSize) 340 { 341 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 342 *typeInfo = cast() tinext; 343 } 344 } 345 else 346 { 347 if (newlength + LARGEPAD > info.size) 348 // new size does not fit inside block 349 return false; 350 auto length = cast(size_t *)(info.base); 351 if (oldlength != ~0) 352 { 353 if (isshared) 354 { 355 return cas(cast(shared)length, cast(size_t)oldlength, cast(size_t)newlength); 356 } 357 else 358 { 359 if (*length == oldlength) 360 *length = newlength; 361 else 362 return false; 363 } 364 } 365 else 366 { 367 // setting the initial length, no cas needed 368 *length = newlength; 369 } 370 if (typeInfoSize) 371 { 372 auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof); 373 *typeInfo = cast()tinext; 374 } 375 } 376 return true; // resize succeeded 377 } 378 379 /** 380 get the allocation size of the array for the given block (without padding or type info) 381 */ 382 size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow 383 { 384 if (info.size <= 256) 385 return *cast(ubyte *)(info.base + info.size - structTypeInfoSize(tinext) - SMALLPAD); 386 387 if (info.size < PAGESIZE) 388 return *cast(ushort *)(info.base + info.size - structTypeInfoSize(tinext) - MEDPAD); 389 390 return *cast(size_t *)(info.base); 391 } 392 393 /** 394 get the start of the array for the given block 395 */ 396 void *__arrayStart(BlkInfo info) nothrow pure 397 { 398 return info.base + ((info.size & BIGLENGTHMASK) ? LARGEPREFIX : 0); 399 } 400 401 /** 402 get the padding required to allocate size bytes. Note that the padding is 403 NOT included in the passed in size. Therefore, do NOT call this function 404 with the size of an allocated block. 405 */ 406 size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted 407 { 408 return size > MAXMEDSIZE ? LARGEPAD : ((size > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + structTypeInfoSize(tinext)); 409 } 410 411 /** 412 allocate an array memory block by applying the proper padding and 413 assigning block attributes if not inherited from the existing block 414 */ 415 BlkInfo __arrayAlloc(size_t arrsize, const TypeInfo ti, const TypeInfo tinext) nothrow pure 416 { 417 import core.checkedint; 418 419 size_t typeInfoSize = structTypeInfoSize(tinext); 420 size_t padsize = arrsize > MAXMEDSIZE ? LARGEPAD : ((arrsize > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + typeInfoSize); 421 422 bool overflow; 423 auto padded_size = addu(arrsize, padsize, overflow); 424 425 if (overflow) 426 return BlkInfo(); 427 428 uint attr = (!(tinext.flags & 1) ? BlkAttr.NO_SCAN : 0) | BlkAttr.APPENDABLE; 429 if (typeInfoSize) 430 attr |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 431 return GC.qalloc(padded_size, attr, ti); 432 } 433 434 BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const TypeInfo ti, const TypeInfo tinext) 435 { 436 import core.checkedint; 437 438 if (!info.base) 439 return __arrayAlloc(arrsize, ti, tinext); 440 441 bool overflow; 442 auto padded_size = addu(arrsize, __arrayPad(arrsize, tinext), overflow); 443 if (overflow) 444 { 445 return BlkInfo(); 446 } 447 448 return GC.qalloc(padded_size, info.attr, ti); 449 } 450 451 /** 452 cache for the lookup of the block info 453 */ 454 enum N_CACHE_BLOCKS=8; 455 456 // note this is TLS, so no need to sync. 457 BlkInfo *__blkcache_storage; 458 459 static if (N_CACHE_BLOCKS==1) 460 { 461 version=single_cache; 462 } 463 else 464 { 465 //version=simple_cache; // uncomment to test simple cache strategy 466 //version=random_cache; // uncomment to test random cache strategy 467 468 // ensure N_CACHE_BLOCKS is power of 2. 469 static assert(!((N_CACHE_BLOCKS - 1) & N_CACHE_BLOCKS)); 470 471 version (random_cache) 472 { 473 int __nextRndNum = 0; 474 } 475 int __nextBlkIdx; 476 } 477 478 @property BlkInfo *__blkcache() nothrow 479 { 480 if (!__blkcache_storage) 481 { 482 // allocate the block cache for the first time 483 immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS; 484 __blkcache_storage = cast(BlkInfo *)malloc(size); 485 memset(__blkcache_storage, 0, size); 486 } 487 return __blkcache_storage; 488 } 489 490 // called when thread is exiting. 491 static ~this() 492 { 493 // free the blkcache 494 if (__blkcache_storage) 495 { 496 free(__blkcache_storage); 497 __blkcache_storage = null; 498 } 499 } 500 501 502 // we expect this to be called with the lock in place 503 void processGCMarks(BlkInfo* cache, scope rt.tlsgc.IsMarkedDg isMarked) nothrow 504 { 505 // called after the mark routine to eliminate block cache data when it 506 // might be ready to sweep 507 508 debug(PRINTF) printf("processing GC Marks, %x\n", cache); 509 if (cache) 510 { 511 debug(PRINTF) foreach (i; 0 .. N_CACHE_BLOCKS) 512 { 513 printf("cache entry %d has base ptr %x\tsize %d\tflags %x\n", i, cache[i].base, cache[i].size, cache[i].attr); 514 } 515 auto cache_end = cache + N_CACHE_BLOCKS; 516 for (;cache < cache_end; ++cache) 517 { 518 if (cache.base != null && !isMarked(cache.base)) 519 { 520 debug(PRINTF) printf("clearing cache entry at %x\n", cache.base); 521 cache.base = null; // clear that data. 522 } 523 } 524 } 525 } 526 527 unittest 528 { 529 // Bugzilla 10701 - segfault in GC 530 ubyte[] result; result.length = 4096; 531 GC.free(result.ptr); 532 GC.collect(); 533 } 534 535 /** 536 Get the cached block info of an interior pointer. Returns null if the 537 interior pointer's block is not cached. 538 539 NOTE: The base ptr in this struct can be cleared asynchronously by the GC, 540 so any use of the returned BlkInfo should copy it and then check the 541 base ptr of the copy before actually using it. 542 543 TODO: Change this function so the caller doesn't have to be aware of this 544 issue. Either return by value and expect the caller to always check 545 the base ptr as an indication of whether the struct is valid, or set 546 the BlkInfo as a side-effect and return a bool to indicate success. 547 */ 548 BlkInfo *__getBlkInfo(void *interior) nothrow 549 { 550 BlkInfo *ptr = __blkcache; 551 version (single_cache) 552 { 553 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 554 return ptr; 555 return null; // not in cache. 556 } 557 else version (simple_cache) 558 { 559 foreach (i; 0..N_CACHE_BLOCKS) 560 { 561 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 562 return ptr; 563 ptr++; 564 } 565 } 566 else 567 { 568 // try to do a smart lookup, using __nextBlkIdx as the "head" 569 auto curi = ptr + __nextBlkIdx; 570 for (auto i = curi; i >= ptr; --i) 571 { 572 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 573 return i; 574 } 575 576 for (auto i = ptr + N_CACHE_BLOCKS - 1; i > curi; --i) 577 { 578 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 579 return i; 580 } 581 } 582 return null; // not in cache. 583 } 584 585 void __insertBlkInfoCache(BlkInfo bi, BlkInfo *curpos) nothrow 586 { 587 version (single_cache) 588 { 589 *__blkcache = bi; 590 } 591 else 592 { 593 version (simple_cache) 594 { 595 if (curpos) 596 *curpos = bi; 597 else 598 { 599 // note, this is a super-simple algorithm that does not care about 600 // most recently used. It simply uses a round-robin technique to 601 // cache block info. This means that the ordering of the cache 602 // doesn't mean anything. Certain patterns of allocation may 603 // render the cache near-useless. 604 __blkcache[__nextBlkIdx] = bi; 605 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 606 } 607 } 608 else version (random_cache) 609 { 610 // strategy: if the block currently is in the cache, move the 611 // current block index to the a random element and evict that 612 // element. 613 auto cache = __blkcache; 614 if (!curpos) 615 { 616 __nextBlkIdx = (__nextRndNum = 1664525 * __nextRndNum + 1013904223) & (N_CACHE_BLOCKS - 1); 617 curpos = cache + __nextBlkIdx; 618 } 619 else 620 { 621 __nextBlkIdx = curpos - cache; 622 } 623 *curpos = bi; 624 } 625 else 626 { 627 // 628 // strategy: If the block currently is in the cache, swap it with 629 // the head element. Otherwise, move the head element up by one, 630 // and insert it there. 631 // 632 auto cache = __blkcache; 633 if (!curpos) 634 { 635 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 636 curpos = cache + __nextBlkIdx; 637 } 638 else if (curpos !is cache + __nextBlkIdx) 639 { 640 *curpos = cache[__nextBlkIdx]; 641 curpos = cache + __nextBlkIdx; 642 } 643 *curpos = bi; 644 } 645 } 646 } 647 648 /** 649 * Shrink the "allocated" length of an array to be the exact size of the array. 650 * It doesn't matter what the current allocated length of the array is, the 651 * user is telling the runtime that he knows what he is doing. 652 */ 653 extern(C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) /+nothrow+/ 654 { 655 // note, we do not care about shared. We are setting the length no matter 656 // what, so no lock is required. 657 debug(PRINTF) printf("_d_arrayshrinkfit, elemsize = %d, arr.ptr = x%x arr.length = %d\n", ti.next.tsize, arr.ptr, arr.length); 658 auto tinext = unqualify(ti.next); 659 auto size = tinext.tsize; // array element size 660 auto cursize = arr.length * size; 661 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 662 auto bic = isshared ? null : __getBlkInfo(arr.ptr); 663 auto info = bic ? *bic : GC.query(arr.ptr); 664 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 665 { 666 auto newsize = (arr.ptr - __arrayStart(info)) + cursize; 667 668 debug(PRINTF) printf("setting allocated size to %d\n", (arr.ptr - info.base) + cursize); 669 670 // destroy structs that become unused memory when array size is shrinked 671 if (typeid(tinext) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 672 { 673 auto sti = cast(TypeInfo_Struct)cast(void*)tinext; 674 if (sti.xdtor) 675 { 676 auto oldsize = __arrayAllocLength(info, tinext); 677 if (oldsize > cursize) 678 finalize_array(arr.ptr + cursize, oldsize - cursize, sti); 679 } 680 } 681 // Note: Since we "assume" the append is safe, it means it is not shared. 682 // Since it is not shared, we also know it won't throw (no lock). 683 if (!__setArrayAllocLength(info, newsize, false, tinext)) 684 onInvalidMemoryOperationError(); 685 686 // cache the block if not already done. 687 if (!isshared && !bic) 688 __insertBlkInfoCache(info, null); 689 } 690 } 691 692 package bool hasPostblit(in TypeInfo ti) 693 { 694 return (&ti.postblit).funcptr !is &TypeInfo.postblit; 695 } 696 697 void __doPostblit(void *ptr, size_t len, const TypeInfo ti) 698 { 699 if (!hasPostblit(ti)) 700 return; 701 702 if (auto tis = cast(TypeInfo_Struct)ti) 703 { 704 // this is a struct, check the xpostblit member 705 auto pblit = tis.xpostblit; 706 if (!pblit) 707 // postblit not specified, no point in looping. 708 return; 709 710 // optimized for struct, call xpostblit directly for each element 711 immutable size = ti.tsize; 712 const eptr = ptr + len; 713 for (;ptr < eptr;ptr += size) 714 pblit(ptr); 715 } 716 else 717 { 718 // generic case, call the typeinfo's postblit function 719 immutable size = ti.tsize; 720 const eptr = ptr + len; 721 for (;ptr < eptr;ptr += size) 722 ti.postblit(ptr); 723 } 724 } 725 726 727 /** 728 * set the array capacity. If the array capacity isn't currently large enough 729 * to hold the requested capacity (in number of elements), then the array is 730 * resized/reallocated to the appropriate size. Pass in a requested capacity 731 * of 0 to get the current capacity. Returns the number of elements that can 732 * actually be stored once the resizing is done. 733 */ 734 extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p) 735 in 736 { 737 assert(ti); 738 assert(!(*p).length || (*p).ptr); 739 } 740 body 741 { 742 // step 1, get the block 743 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 744 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 745 auto info = bic ? *bic : GC.query((*p).ptr); 746 auto tinext = unqualify(ti.next); 747 auto size = tinext.tsize; 748 version (D_InlineAsm_X86) 749 { 750 size_t reqsize = void; 751 752 asm 753 { 754 mov EAX, newcapacity; 755 mul EAX, size; 756 mov reqsize, EAX; 757 jnc Lcontinue; 758 } 759 } 760 else version (D_InlineAsm_X86_64) 761 { 762 size_t reqsize = void; 763 764 asm 765 { 766 mov RAX, newcapacity; 767 mul RAX, size; 768 mov reqsize, RAX; 769 jnc Lcontinue; 770 } 771 } 772 else 773 { 774 import core.checkedint : mulu; 775 776 bool overflow = false; 777 size_t reqsize = mulu(size, newcapacity, overflow); 778 if (!overflow) 779 goto Lcontinue; 780 } 781 Loverflow: 782 onOutOfMemoryError(); 783 assert(0); 784 Lcontinue: 785 786 // step 2, get the actual "allocated" size. If the allocated size does not 787 // match what we expect, then we will need to reallocate anyways. 788 789 // TODO: this probably isn't correct for shared arrays 790 size_t curallocsize = void; 791 size_t curcapacity = void; 792 size_t offset = void; 793 size_t arraypad = void; 794 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 795 { 796 if (info.size <= 256) 797 { 798 arraypad = SMALLPAD + structTypeInfoSize(tinext); 799 curallocsize = *(cast(ubyte *)(info.base + info.size - arraypad)); 800 } 801 else if (info.size < PAGESIZE) 802 { 803 arraypad = MEDPAD + structTypeInfoSize(tinext); 804 curallocsize = *(cast(ushort *)(info.base + info.size - arraypad)); 805 } 806 else 807 { 808 curallocsize = *(cast(size_t *)(info.base)); 809 arraypad = LARGEPAD; 810 } 811 812 813 offset = (*p).ptr - __arrayStart(info); 814 if (offset + (*p).length * size != curallocsize) 815 { 816 curcapacity = 0; 817 } 818 else 819 { 820 // figure out the current capacity of the block from the point 821 // of view of the array. 822 curcapacity = info.size - offset - arraypad; 823 } 824 } 825 else 826 { 827 curallocsize = curcapacity = offset = 0; 828 } 829 debug(PRINTF) printf("_d_arraysetcapacity, p = x%d,%d, newcapacity=%d, info.size=%d, reqsize=%d, curallocsize=%d, curcapacity=%d, offset=%d\n", (*p).ptr, (*p).length, newcapacity, info.size, reqsize, curallocsize, curcapacity, offset); 830 831 if (curcapacity >= reqsize) 832 { 833 // no problems, the current allocated size is large enough. 834 return curcapacity / size; 835 } 836 837 // step 3, try to extend the array in place. 838 if (info.size >= PAGESIZE && curcapacity != 0) 839 { 840 auto extendsize = reqsize + offset + LARGEPAD - info.size; 841 auto u = GC.extend(info.base, extendsize, extendsize); 842 if (u) 843 { 844 // extend worked, save the new current allocated size 845 if (bic) 846 bic.size = u; // update cache 847 curcapacity = u - offset - LARGEPAD; 848 return curcapacity / size; 849 } 850 } 851 852 // step 4, if extending doesn't work, allocate a new array with at least the requested allocated size. 853 auto datasize = (*p).length * size; 854 // copy attributes from original block, or from the typeinfo if the 855 // original block doesn't exist. 856 info = __arrayAlloc(reqsize, info, ti, tinext); 857 if (info.base is null) 858 goto Loverflow; 859 // copy the data over. 860 // note that malloc will have initialized the data we did not request to 0. 861 auto tgt = __arrayStart(info); 862 memcpy(tgt, (*p).ptr, datasize); 863 864 // handle postblit 865 __doPostblit(tgt, datasize, tinext); 866 867 if (!(info.attr & BlkAttr.NO_SCAN)) 868 { 869 // need to memset the newly requested data, except for the data that 870 // malloc returned that we didn't request. 871 void *endptr = tgt + reqsize; 872 void *begptr = tgt + datasize; 873 874 // sanity check 875 assert(endptr >= begptr); 876 memset(begptr, 0, endptr - begptr); 877 } 878 879 // set up the correct length 880 __setArrayAllocLength(info, datasize, isshared, tinext); 881 if (!isshared) 882 __insertBlkInfoCache(info, bic); 883 884 *p = (cast(void*)tgt)[0 .. (*p).length]; 885 886 // determine the padding. This has to be done manually because __arrayPad 887 // assumes you are not counting the pad size, and info.size does include 888 // the pad. 889 if (info.size <= 256) 890 arraypad = SMALLPAD + structTypeInfoSize(tinext); 891 else if (info.size < PAGESIZE) 892 arraypad = MEDPAD + structTypeInfoSize(tinext); 893 else 894 arraypad = LARGEPAD; 895 896 curcapacity = info.size - arraypad; 897 return curcapacity / size; 898 } 899 900 /** 901 * Allocate a new uninitialized array of length elements. 902 * ti is the type of the resulting array, or pointer to element. 903 */ 904 extern (C) void[] _d_newarrayU(const TypeInfo ti, size_t length) pure nothrow 905 { 906 auto tinext = unqualify(ti.next); 907 auto size = tinext.tsize; 908 909 debug(PRINTF) printf("_d_newarrayU(length = x%x, size = %d)\n", length, size); 910 if (length == 0 || size == 0) 911 return null; 912 913 version (D_InlineAsm_X86) 914 { 915 asm pure nothrow @nogc 916 { 917 mov EAX,size ; 918 mul EAX,length ; 919 mov size,EAX ; 920 jnc Lcontinue ; 921 } 922 } 923 else version (D_InlineAsm_X86_64) 924 { 925 asm pure nothrow @nogc 926 { 927 mov RAX,size ; 928 mul RAX,length ; 929 mov size,RAX ; 930 jnc Lcontinue ; 931 } 932 } 933 else 934 { 935 import core.checkedint : mulu; 936 937 bool overflow = false; 938 size = mulu(size, length, overflow); 939 if (!overflow) 940 goto Lcontinue; 941 } 942 Loverflow: 943 onOutOfMemoryError(); 944 assert(0); 945 Lcontinue: 946 947 auto info = __arrayAlloc(size, ti, tinext); 948 if (!info.base) 949 goto Loverflow; 950 debug(PRINTF) printf(" p = %p\n", info.base); 951 // update the length of the array 952 auto arrstart = __arrayStart(info); 953 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 954 __setArrayAllocLength(info, size, isshared, tinext); 955 return arrstart[0..length]; 956 } 957 958 /** 959 * Allocate a new array of length elements. 960 * ti is the type of the resulting array, or pointer to element. 961 * (For when the array is initialized to 0) 962 */ 963 extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow 964 { 965 void[] result = _d_newarrayU(ti, length); 966 auto tinext = unqualify(ti.next); 967 auto size = tinext.tsize; 968 969 memset(result.ptr, 0, size * length); 970 return result; 971 } 972 973 /** 974 * For when the array has a non-zero initializer. 975 */ 976 extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow 977 { 978 import core.internal.traits : TypeTuple; 979 980 void[] result = _d_newarrayU(ti, length); 981 auto tinext = unqualify(ti.next); 982 auto size = tinext.tsize; 983 984 auto init = tinext.initializer(); 985 986 switch (init.length) 987 { 988 foreach (T; TypeTuple!(ubyte, ushort, uint, ulong)) 989 { 990 case T.sizeof: 991 (cast(T*)result.ptr)[0 .. size * length / T.sizeof] = *cast(T*)init.ptr; 992 return result; 993 } 994 995 default: 996 { 997 immutable sz = init.length; 998 for (size_t u = 0; u < size * length; u += sz) 999 memcpy(result.ptr + u, init.ptr, sz); 1000 return result; 1001 } 1002 } 1003 } 1004 1005 1006 /** 1007 * 1008 */ 1009 void[] _d_newarrayOpT(alias op)(const TypeInfo ti, size_t[] dims) 1010 { 1011 debug(PRINTF) printf("_d_newarrayOpT(ndims = %d)\n", dims.length); 1012 if (dims.length == 0) 1013 return null; 1014 1015 void[] foo(const TypeInfo ti, size_t[] dims) 1016 { 1017 auto tinext = unqualify(ti.next); 1018 auto dim = dims[0]; 1019 1020 debug(PRINTF) printf("foo(ti = %p, ti.next = %p, dim = %d, ndims = %d\n", ti, ti.next, dim, dims.length); 1021 if (dims.length == 1) 1022 { 1023 auto r = op(ti, dim); 1024 return *cast(void[]*)(&r); 1025 } 1026 1027 auto allocsize = (void[]).sizeof * dim; 1028 auto info = __arrayAlloc(allocsize, ti, tinext); 1029 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1030 __setArrayAllocLength(info, allocsize, isshared, tinext); 1031 auto p = __arrayStart(info)[0 .. dim]; 1032 1033 foreach (i; 0..dim) 1034 { 1035 (cast(void[]*)p.ptr)[i] = foo(tinext, dims[1..$]); 1036 } 1037 return p; 1038 } 1039 1040 auto result = foo(ti, dims); 1041 debug(PRINTF) printf("result = %llx\n", result.ptr); 1042 1043 return result; 1044 } 1045 1046 1047 /** 1048 * 1049 */ 1050 extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims) 1051 { 1052 debug(PRINTF) printf("_d_newarraymT(dims.length = %d)\n", dims.length); 1053 1054 if (dims.length == 0) 1055 return null; 1056 else 1057 { 1058 return _d_newarrayOpT!(_d_newarrayT)(ti, dims); 1059 } 1060 } 1061 1062 1063 /** 1064 * 1065 */ 1066 extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims) 1067 { 1068 debug(PRINTF) printf("_d_newarraymiT(dims.length = %d)\n", dims.length); 1069 1070 if (dims.length == 0) 1071 return null; 1072 else 1073 { 1074 return _d_newarrayOpT!(_d_newarrayiT)(ti, dims); 1075 } 1076 } 1077 1078 /** 1079 * Allocate an uninitialized non-array item. 1080 * This is an optimization to avoid things needed for arrays like the __arrayPad(size). 1081 */ 1082 extern (C) void* _d_newitemU(in TypeInfo _ti) 1083 { 1084 auto ti = unqualify(_ti); 1085 auto flags = !(ti.flags & 1) ? BlkAttr.NO_SCAN : 0; 1086 immutable tiSize = structTypeInfoSize(ti); 1087 immutable size = ti.tsize + tiSize; 1088 if (tiSize) 1089 flags |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 1090 1091 auto blkInf = GC.qalloc(size, flags, ti); 1092 auto p = blkInf.base; 1093 1094 if (tiSize) 1095 *cast(TypeInfo*)(p + blkInf.size - tiSize) = cast() ti; 1096 1097 return p; 1098 } 1099 1100 /// Same as above, zero initializes the item. 1101 extern (C) void* _d_newitemT(in TypeInfo _ti) 1102 { 1103 auto p = _d_newitemU(_ti); 1104 memset(p, 0, _ti.tsize); 1105 return p; 1106 } 1107 1108 /// Same as above, for item with non-zero initializer. 1109 extern (C) void* _d_newitemiT(in TypeInfo _ti) 1110 { 1111 auto p = _d_newitemU(_ti); 1112 auto init = _ti.initializer(); 1113 assert(init.length <= _ti.tsize); 1114 memcpy(p, init.ptr, init.length); 1115 return p; 1116 } 1117 1118 /** 1119 * 1120 */ 1121 struct Array 1122 { 1123 size_t length; 1124 byte* data; 1125 } 1126 1127 1128 /** 1129 * This function has been replaced by _d_delarray_t 1130 */ 1131 extern (C) void _d_delarray(void[]* p) 1132 { 1133 _d_delarray_t(p, null); 1134 } 1135 1136 debug(PRINTF) 1137 { 1138 extern(C) void printArrayCache() 1139 { 1140 auto ptr = __blkcache; 1141 printf("CACHE: \n"); 1142 foreach (i; 0 .. N_CACHE_BLOCKS) 1143 { 1144 printf(" %d\taddr:% .8x\tsize:% .10d\tflags:% .8x\n", i, ptr[i].base, ptr[i].size, ptr[i].attr); 1145 } 1146 } 1147 } 1148 1149 /** 1150 * 1151 */ 1152 extern (C) void _d_delarray_t(void[]* p, const TypeInfo_Struct ti) 1153 { 1154 if (p) 1155 { 1156 auto bic = __getBlkInfo(p.ptr); 1157 auto info = bic ? *bic : GC.query(p.ptr); 1158 1159 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1160 { 1161 if (ti) // ti non-null only if ti is a struct with dtor 1162 { 1163 void* start = __arrayStart(info); 1164 size_t length = __arrayAllocLength(info, ti); 1165 finalize_array(start, length, ti); 1166 } 1167 1168 // if p is in the cache, clear it there as well 1169 if (bic) 1170 bic.base = null; 1171 1172 GC.free(info.base); 1173 *p = null; 1174 } 1175 } 1176 } 1177 1178 unittest 1179 { 1180 __gshared size_t countDtor = 0; 1181 struct S 1182 { 1183 int x; 1184 ~this() { countDtor++; } 1185 } 1186 // destroy large array with x.ptr not base address of allocation 1187 auto x = new S[10000]; 1188 void* p = x.ptr; 1189 assert(GC.addrOf(p) != null); 1190 delete x; 1191 assert(GC.addrOf(p) == null); 1192 assert(countDtor == 10000); 1193 1194 // destroy full array even if only slice passed 1195 auto y = new S[400]; 1196 auto z = y[200 .. 300]; 1197 p = z.ptr; 1198 assert(GC.addrOf(p) != null); 1199 delete z; 1200 assert(GC.addrOf(p) == null); 1201 assert(countDtor == 10000 + 400); 1202 } 1203 1204 /** 1205 * 1206 */ 1207 extern (C) void _d_delmemory(void* *p) 1208 { 1209 if (*p) 1210 { 1211 GC.free(*p); 1212 *p = null; 1213 } 1214 } 1215 1216 1217 /** 1218 * 1219 */ 1220 extern (C) void _d_callinterfacefinalizer(void *p) 1221 { 1222 if (p) 1223 { 1224 Interface *pi = **cast(Interface ***)p; 1225 Object o = cast(Object)(p - pi.offset); 1226 rt_finalize(cast(void*)o); 1227 } 1228 } 1229 1230 1231 /** 1232 * 1233 */ 1234 extern (C) void _d_callfinalizer(void* p) 1235 { 1236 rt_finalize( p ); 1237 } 1238 1239 1240 /** 1241 * 1242 */ 1243 extern (C) void rt_setCollectHandler(CollectHandler h) 1244 { 1245 collectHandler = h; 1246 } 1247 1248 1249 /** 1250 * 1251 */ 1252 extern (C) CollectHandler rt_getCollectHandler() 1253 { 1254 return collectHandler; 1255 } 1256 1257 1258 /** 1259 * 1260 */ 1261 extern (C) int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, in void[] segment) nothrow 1262 { 1263 if (attr & BlkAttr.STRUCTFINAL) 1264 { 1265 if (attr & BlkAttr.APPENDABLE) 1266 return hasArrayFinalizerInSegment(p, size, segment); 1267 return hasStructFinalizerInSegment(p, size, segment); 1268 } 1269 1270 // otherwise class 1271 auto ppv = cast(void**) p; 1272 if (!p || !*ppv) 1273 return false; 1274 1275 auto c = *cast(ClassInfo*)*ppv; 1276 do 1277 { 1278 auto pf = c.destructor; 1279 if (cast(size_t)(pf - segment.ptr) < segment.length) return true; 1280 } 1281 while ((c = c.base) !is null); 1282 1283 return false; 1284 } 1285 1286 int hasStructFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1287 { 1288 if (!p) 1289 return false; 1290 1291 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1292 return cast(size_t)(cast(void*)ti.xdtor - segment.ptr) < segment.length; 1293 } 1294 1295 int hasArrayFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1296 { 1297 if (!p) 1298 return false; 1299 1300 TypeInfo_Struct si = void; 1301 if (size < PAGESIZE) 1302 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1303 else 1304 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1305 1306 return cast(size_t)(cast(void*)si.xdtor - segment.ptr) < segment.length; 1307 } 1308 1309 // called by the GC 1310 void finalize_array2(void* p, size_t size) nothrow 1311 { 1312 debug(PRINTF) printf("rt_finalize_array2(p = %p)\n", p); 1313 1314 TypeInfo_Struct si = void; 1315 if (size <= 256) 1316 { 1317 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1318 size = *cast(ubyte*)(p + size - size_t.sizeof - SMALLPAD); 1319 } 1320 else if (size < PAGESIZE) 1321 { 1322 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1323 size = *cast(ushort*)(p + size - size_t.sizeof - MEDPAD); 1324 } 1325 else 1326 { 1327 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1328 size = *cast(size_t*)p; 1329 p += LARGEPREFIX; 1330 } 1331 1332 try 1333 { 1334 finalize_array(p, size, si); 1335 } 1336 catch (Exception e) 1337 { 1338 onFinalizeError(si, e); 1339 } 1340 } 1341 1342 void finalize_array(void* p, size_t size, const TypeInfo_Struct si) 1343 { 1344 // Due to the fact that the delete operator calls destructors 1345 // for arrays from the last element to the first, we maintain 1346 // compatibility here by doing the same. 1347 auto tsize = si.tsize; 1348 for (auto curP = p + size - tsize; curP >= p; curP -= tsize) 1349 { 1350 // call destructor 1351 si.destroy(curP); 1352 } 1353 } 1354 1355 // called by the GC 1356 void finalize_struct(void* p, size_t size) nothrow 1357 { 1358 debug(PRINTF) printf("finalize_struct(p = %p)\n", p); 1359 1360 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1361 try 1362 { 1363 ti.destroy(p); // call destructor 1364 } 1365 catch (Exception e) 1366 { 1367 onFinalizeError(ti, e); 1368 } 1369 } 1370 1371 /** 1372 * 1373 */ 1374 extern (C) void rt_finalize2(void* p, bool det = true, bool resetMemory = true) nothrow 1375 { 1376 debug(PRINTF) printf("rt_finalize2(p = %p)\n", p); 1377 1378 auto ppv = cast(void**) p; 1379 if (!p || !*ppv) 1380 return; 1381 1382 auto pc = cast(ClassInfo*) *ppv; 1383 try 1384 { 1385 if (det || collectHandler is null || collectHandler(cast(Object) p)) 1386 { 1387 auto c = *pc; 1388 do 1389 { 1390 if (c.destructor) 1391 (cast(fp_t) c.destructor)(cast(Object) p); // call destructor 1392 } 1393 while ((c = c.base) !is null); 1394 } 1395 1396 if (ppv[1]) // if monitor is not null 1397 _d_monitordelete(cast(Object) p, det); 1398 1399 if (resetMemory) 1400 { 1401 auto w = (*pc).initializer; 1402 p[0 .. w.length] = w[]; 1403 } 1404 } 1405 catch (Exception e) 1406 { 1407 onFinalizeError(*pc, e); 1408 } 1409 finally 1410 { 1411 *ppv = null; // zero vptr even if `resetMemory` is false 1412 } 1413 } 1414 1415 extern (C) void rt_finalize(void* p, bool det = true) 1416 { 1417 rt_finalize2(p, det, true); 1418 } 1419 1420 extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr) 1421 { 1422 // to verify: reset memory necessary? 1423 if (!(attr & BlkAttr.STRUCTFINAL)) 1424 rt_finalize2(p, false, false); // class 1425 else if (attr & BlkAttr.APPENDABLE) 1426 finalize_array2(p, size); // array of structs 1427 else 1428 finalize_struct(p, size); // struct 1429 } 1430 1431 1432 /** 1433 * Resize dynamic arrays with 0 initializers. 1434 */ 1435 extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) 1436 in 1437 { 1438 assert(ti); 1439 assert(!(*p).length || (*p).ptr); 1440 } 1441 body 1442 { 1443 debug(PRINTF) 1444 { 1445 //printf("_d_arraysetlengthT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength); 1446 if (p) 1447 printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length); 1448 } 1449 1450 void* newdata = void; 1451 if (newlength) 1452 { 1453 if (newlength <= (*p).length) 1454 { 1455 *p = (*p)[0 .. newlength]; 1456 newdata = (*p).ptr; 1457 return newdata[0 .. newlength]; 1458 } 1459 auto tinext = unqualify(ti.next); 1460 size_t sizeelem = tinext.tsize; 1461 version (D_InlineAsm_X86) 1462 { 1463 size_t newsize = void; 1464 1465 asm pure nothrow @nogc 1466 { 1467 mov EAX, newlength; 1468 mul EAX, sizeelem; 1469 mov newsize, EAX; 1470 jc Loverflow; 1471 } 1472 } 1473 else version (D_InlineAsm_X86_64) 1474 { 1475 size_t newsize = void; 1476 1477 asm pure nothrow @nogc 1478 { 1479 mov RAX, newlength; 1480 mul RAX, sizeelem; 1481 mov newsize, RAX; 1482 jc Loverflow; 1483 } 1484 } 1485 else 1486 { 1487 import core.checkedint : mulu; 1488 1489 bool overflow = false; 1490 size_t newsize = mulu(sizeelem, newlength, overflow); 1491 if (overflow) 1492 goto Loverflow; 1493 } 1494 1495 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1496 1497 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1498 1499 if ((*p).ptr) 1500 { 1501 newdata = (*p).ptr; 1502 if (newlength > (*p).length) 1503 { 1504 size_t size = (*p).length * sizeelem; 1505 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1506 auto info = bic ? *bic : GC.query((*p).ptr); 1507 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1508 { 1509 // calculate the extent of the array given the base. 1510 size_t offset = (*p).ptr - __arrayStart(info); 1511 if (info.size >= PAGESIZE) 1512 { 1513 // size of array is at the front of the block 1514 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1515 { 1516 // check to see if it failed because there is not 1517 // enough space 1518 if (*(cast(size_t*)info.base) == size + offset) 1519 { 1520 // not enough space, try extending 1521 auto extendsize = newsize + offset + LARGEPAD - info.size; 1522 auto u = GC.extend(info.base, extendsize, extendsize); 1523 if (u) 1524 { 1525 // extend worked, now try setting the length 1526 // again. 1527 info.size = u; 1528 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1529 { 1530 if (!isshared) 1531 __insertBlkInfoCache(info, bic); 1532 goto L1; 1533 } 1534 } 1535 } 1536 1537 // couldn't do it, reallocate 1538 goto L2; 1539 } 1540 else if (!isshared && !bic) 1541 { 1542 // add this to the cache, it wasn't present previously. 1543 __insertBlkInfoCache(info, null); 1544 } 1545 } 1546 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1547 { 1548 // could not resize in place 1549 goto L2; 1550 } 1551 else if (!isshared && !bic) 1552 { 1553 // add this to the cache, it wasn't present previously. 1554 __insertBlkInfoCache(info, null); 1555 } 1556 } 1557 else 1558 { 1559 if (info.base) 1560 { 1561 L2: 1562 if (bic) 1563 { 1564 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1565 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1566 } 1567 info = __arrayAlloc(newsize, info, ti, tinext); 1568 } 1569 else 1570 { 1571 info = __arrayAlloc(newsize, ti, tinext); 1572 } 1573 1574 if (info.base is null) 1575 goto Loverflow; 1576 1577 __setArrayAllocLength(info, newsize, isshared, tinext); 1578 if (!isshared) 1579 __insertBlkInfoCache(info, bic); 1580 newdata = cast(byte *)__arrayStart(info); 1581 newdata[0 .. size] = (*p).ptr[0 .. size]; 1582 1583 // do postblit processing 1584 __doPostblit(newdata, size, tinext); 1585 } 1586 L1: 1587 memset(newdata + size, 0, newsize - size); 1588 } 1589 } 1590 else 1591 { 1592 // pointer was null, need to allocate 1593 auto info = __arrayAlloc(newsize, ti, tinext); 1594 if (info.base is null) 1595 goto Loverflow; 1596 __setArrayAllocLength(info, newsize, isshared, tinext); 1597 if (!isshared) 1598 __insertBlkInfoCache(info, null); 1599 newdata = cast(byte *)__arrayStart(info); 1600 memset(newdata, 0, newsize); 1601 } 1602 } 1603 else 1604 { 1605 newdata = (*p).ptr; 1606 } 1607 1608 *p = newdata[0 .. newlength]; 1609 return *p; 1610 1611 Loverflow: 1612 onOutOfMemoryError(); 1613 assert(0); 1614 } 1615 1616 1617 /** 1618 * Resize arrays for non-zero initializers. 1619 * p pointer to array lvalue to be updated 1620 * newlength new .length property of array 1621 * sizeelem size of each element of array 1622 * initsize size of initializer 1623 * ... initializer 1624 */ 1625 extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) 1626 in 1627 { 1628 assert(!(*p).length || (*p).ptr); 1629 } 1630 body 1631 { 1632 void* newdata; 1633 auto tinext = unqualify(ti.next); 1634 auto sizeelem = tinext.tsize; 1635 auto initializer = tinext.initializer(); 1636 auto initsize = initializer.length; 1637 1638 assert(sizeelem); 1639 assert(initsize); 1640 assert(initsize <= sizeelem); 1641 assert((sizeelem / initsize) * initsize == sizeelem); 1642 1643 debug(PRINTF) 1644 { 1645 printf("_d_arraysetlengthiT(p = %p, sizeelem = %d, newlength = %d, initsize = %d)\n", p, sizeelem, newlength, initsize); 1646 if (p) 1647 printf("\tp.data = %p, p.length = %d\n", (*p).ptr, (*p).length); 1648 } 1649 1650 if (newlength) 1651 { 1652 version (D_InlineAsm_X86) 1653 { 1654 size_t newsize = void; 1655 1656 asm 1657 { 1658 mov EAX,newlength ; 1659 mul EAX,sizeelem ; 1660 mov newsize,EAX ; 1661 jc Loverflow ; 1662 } 1663 } 1664 else version (D_InlineAsm_X86_64) 1665 { 1666 size_t newsize = void; 1667 1668 asm 1669 { 1670 mov RAX,newlength ; 1671 mul RAX,sizeelem ; 1672 mov newsize,RAX ; 1673 jc Loverflow ; 1674 } 1675 } 1676 else 1677 { 1678 import core.checkedint : mulu; 1679 1680 bool overflow = false; 1681 size_t newsize = mulu(sizeelem, newlength, overflow); 1682 if (overflow) 1683 goto Loverflow; 1684 } 1685 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1686 1687 1688 size_t size = (*p).length * sizeelem; 1689 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1690 if ((*p).ptr) 1691 { 1692 newdata = (*p).ptr; 1693 if (newlength > (*p).length) 1694 { 1695 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1696 auto info = bic ? *bic : GC.query((*p).ptr); 1697 1698 // calculate the extent of the array given the base. 1699 size_t offset = (*p).ptr - __arrayStart(info); 1700 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1701 { 1702 if (info.size >= PAGESIZE) 1703 { 1704 // size of array is at the front of the block 1705 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1706 { 1707 // check to see if it failed because there is not 1708 // enough space 1709 if (*(cast(size_t*)info.base) == size + offset) 1710 { 1711 // not enough space, try extending 1712 auto extendsize = newsize + offset + LARGEPAD - info.size; 1713 auto u = GC.extend(info.base, extendsize, extendsize); 1714 if (u) 1715 { 1716 // extend worked, now try setting the length 1717 // again. 1718 info.size = u; 1719 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1720 { 1721 if (!isshared) 1722 __insertBlkInfoCache(info, bic); 1723 goto L1; 1724 } 1725 } 1726 } 1727 1728 // couldn't do it, reallocate 1729 goto L2; 1730 } 1731 else if (!isshared && !bic) 1732 { 1733 // add this to the cache, it wasn't present previously. 1734 __insertBlkInfoCache(info, null); 1735 } 1736 } 1737 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1738 { 1739 // could not resize in place 1740 goto L2; 1741 } 1742 else if (!isshared && !bic) 1743 { 1744 // add this to the cache, it wasn't present previously. 1745 __insertBlkInfoCache(info, null); 1746 } 1747 } 1748 else 1749 { 1750 // not appendable or not part of the heap yet. 1751 if (info.base) 1752 { 1753 L2: 1754 if (bic) 1755 { 1756 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1757 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1758 } 1759 info = __arrayAlloc(newsize, info, ti, tinext); 1760 } 1761 else 1762 { 1763 info = __arrayAlloc(newsize, ti, tinext); 1764 } 1765 __setArrayAllocLength(info, newsize, isshared, tinext); 1766 if (!isshared) 1767 __insertBlkInfoCache(info, bic); 1768 newdata = cast(byte *)__arrayStart(info); 1769 newdata[0 .. size] = (*p).ptr[0 .. size]; 1770 1771 // do postblit processing 1772 __doPostblit(newdata, size, tinext); 1773 } 1774 L1: ; 1775 } 1776 } 1777 else 1778 { 1779 // length was zero, need to allocate 1780 auto info = __arrayAlloc(newsize, ti, tinext); 1781 __setArrayAllocLength(info, newsize, isshared, tinext); 1782 if (!isshared) 1783 __insertBlkInfoCache(info, null); 1784 newdata = cast(byte *)__arrayStart(info); 1785 } 1786 1787 auto q = initializer.ptr; // pointer to initializer 1788 1789 if (newsize > size) 1790 { 1791 if (initsize == 1) 1792 { 1793 debug(PRINTF) printf("newdata = %p, size = %d, newsize = %d, *q = %d\n", newdata, size, newsize, *cast(byte*)q); 1794 memset(newdata + size, *cast(byte*)q, newsize - size); 1795 } 1796 else 1797 { 1798 for (size_t u = size; u < newsize; u += initsize) 1799 { 1800 memcpy(newdata + u, q, initsize); 1801 } 1802 } 1803 } 1804 } 1805 else 1806 { 1807 newdata = (*p).ptr; 1808 } 1809 1810 *p = newdata[0 .. newlength]; 1811 return *p; 1812 1813 Loverflow: 1814 onOutOfMemoryError(); 1815 assert(0); 1816 } 1817 1818 1819 /** 1820 * Append y[] to array x[] 1821 */ 1822 extern (C) void[] _d_arrayappendT(const TypeInfo ti, ref byte[] x, byte[] y) 1823 { 1824 auto length = x.length; 1825 auto tinext = unqualify(ti.next); 1826 auto sizeelem = tinext.tsize; // array element size 1827 _d_arrayappendcTX(ti, x, y.length); 1828 memcpy(x.ptr + length * sizeelem, y.ptr, y.length * sizeelem); 1829 1830 // do postblit 1831 __doPostblit(x.ptr + length * sizeelem, y.length * sizeelem, tinext); 1832 return x; 1833 } 1834 1835 1836 /** 1837 * 1838 */ 1839 size_t newCapacity(size_t newlength, size_t size) 1840 { 1841 version (none) 1842 { 1843 size_t newcap = newlength * size; 1844 } 1845 else 1846 { 1847 /* 1848 * Better version by Dave Fladebo: 1849 * This uses an inverse logorithmic algorithm to pre-allocate a bit more 1850 * space for larger arrays. 1851 * - Arrays smaller than PAGESIZE bytes are left as-is, so for the most 1852 * common cases, memory allocation is 1 to 1. The small overhead added 1853 * doesn't affect small array perf. (it's virtually the same as 1854 * current). 1855 * - Larger arrays have some space pre-allocated. 1856 * - As the arrays grow, the relative pre-allocated space shrinks. 1857 * - The logorithmic algorithm allocates relatively more space for 1858 * mid-size arrays, making it very fast for medium arrays (for 1859 * mid-to-large arrays, this turns out to be quite a bit faster than the 1860 * equivalent realloc() code in C, on Linux at least. Small arrays are 1861 * just as fast as GCC). 1862 * - Perhaps most importantly, overall memory usage and stress on the GC 1863 * is decreased significantly for demanding environments. 1864 */ 1865 size_t newcap = newlength * size; 1866 size_t newext = 0; 1867 1868 if (newcap > PAGESIZE) 1869 { 1870 //double mult2 = 1.0 + (size / log10(pow(newcap * 2.0,2.0))); 1871 1872 // redo above line using only integer math 1873 1874 /*static int log2plus1(size_t c) 1875 { int i; 1876 1877 if (c == 0) 1878 i = -1; 1879 else 1880 for (i = 1; c >>= 1; i++) 1881 { 1882 } 1883 return i; 1884 }*/ 1885 1886 /* The following setting for mult sets how much bigger 1887 * the new size will be over what is actually needed. 1888 * 100 means the same size, more means proportionally more. 1889 * More means faster but more memory consumption. 1890 */ 1891 //long mult = 100 + (1000L * size) / (6 * log2plus1(newcap)); 1892 //long mult = 100 + (1000L * size) / log2plus1(newcap); 1893 long mult = 100 + (1000L) / (bsr(newcap) + 1); 1894 1895 // testing shows 1.02 for large arrays is about the point of diminishing return 1896 // 1897 // Commented out because the multipler will never be < 102. In order for it to be < 2, 1898 // then 1000L / (bsr(x) + 1) must be > 2. The highest bsr(x) + 1 1899 // could be is 65 (64th bit set), and 1000L / 64 is much larger 1900 // than 2. We need 500 bit integers for 101 to be achieved :) 1901 /*if (mult < 102) 1902 mult = 102;*/ 1903 /*newext = cast(size_t)((newcap * mult) / 100); 1904 newext -= newext % size;*/ 1905 // This version rounds up to the next element, and avoids using 1906 // mod. 1907 newext = cast(size_t)((newlength * mult + 99) / 100) * size; 1908 debug(PRINTF) printf("mult: %2.2f, alloc: %2.2f\n",mult/100.0,newext / cast(double)size); 1909 } 1910 newcap = newext > newcap ? newext : newcap; 1911 debug(PRINTF) printf("newcap = %d, newlength = %d, size = %d\n", newcap, newlength, size); 1912 } 1913 return newcap; 1914 } 1915 1916 1917 /************************************** 1918 * Extend an array by n elements. 1919 * Caller must initialize those elements. 1920 */ 1921 extern (C) 1922 byte[] _d_arrayappendcTX(const TypeInfo ti, ref byte[] px, size_t n) 1923 { 1924 // This is a cut&paste job from _d_arrayappendT(). Should be refactored. 1925 1926 // only optimize array append where ti is not a shared type 1927 auto tinext = unqualify(ti.next); 1928 auto sizeelem = tinext.tsize; // array element size 1929 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1930 auto bic = isshared ? null : __getBlkInfo(px.ptr); 1931 auto info = bic ? *bic : GC.query(px.ptr); 1932 auto length = px.length; 1933 auto newlength = length + n; 1934 auto newsize = newlength * sizeelem; 1935 auto size = length * sizeelem; 1936 size_t newcap = void; // for scratch space 1937 1938 // calculate the extent of the array given the base. 1939 size_t offset = cast(void*)px.ptr - __arrayStart(info); 1940 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1941 { 1942 if (info.size >= PAGESIZE) 1943 { 1944 // size of array is at the front of the block 1945 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1946 { 1947 // check to see if it failed because there is not 1948 // enough space 1949 newcap = newCapacity(newlength, sizeelem); 1950 if (*(cast(size_t*)info.base) == size + offset) 1951 { 1952 // not enough space, try extending 1953 auto extendoffset = offset + LARGEPAD - info.size; 1954 auto u = GC.extend(info.base, newsize + extendoffset, newcap + extendoffset); 1955 if (u) 1956 { 1957 // extend worked, now try setting the length 1958 // again. 1959 info.size = u; 1960 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1961 { 1962 if (!isshared) 1963 __insertBlkInfoCache(info, bic); 1964 goto L1; 1965 } 1966 } 1967 } 1968 1969 // couldn't do it, reallocate 1970 goto L2; 1971 } 1972 else if (!isshared && !bic) 1973 { 1974 __insertBlkInfoCache(info, null); 1975 } 1976 } 1977 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1978 { 1979 // could not resize in place 1980 newcap = newCapacity(newlength, sizeelem); 1981 goto L2; 1982 } 1983 else if (!isshared && !bic) 1984 { 1985 __insertBlkInfoCache(info, null); 1986 } 1987 } 1988 else 1989 { 1990 // not appendable or is null 1991 newcap = newCapacity(newlength, sizeelem); 1992 if (info.base) 1993 { 1994 L2: 1995 if (bic) 1996 { 1997 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1998 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1999 } 2000 info = __arrayAlloc(newcap, info, ti, tinext); 2001 } 2002 else 2003 { 2004 info = __arrayAlloc(newcap, ti, tinext); 2005 } 2006 __setArrayAllocLength(info, newsize, isshared, tinext); 2007 if (!isshared) 2008 __insertBlkInfoCache(info, bic); 2009 auto newdata = cast(byte *)__arrayStart(info); 2010 memcpy(newdata, px.ptr, length * sizeelem); 2011 // do postblit processing 2012 __doPostblit(newdata, length * sizeelem, tinext); 2013 (cast(void **)(&px))[1] = newdata; 2014 } 2015 2016 L1: 2017 *cast(size_t *)&px = newlength; 2018 return px; 2019 } 2020 2021 2022 /** 2023 * Append dchar to char[] 2024 */ 2025 extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c) 2026 { 2027 // c could encode into from 1 to 4 characters 2028 char[4] buf = void; 2029 byte[] appendthis; // passed to appendT 2030 if (c <= 0x7F) 2031 { 2032 buf.ptr[0] = cast(char)c; 2033 appendthis = (cast(byte *)buf.ptr)[0..1]; 2034 } 2035 else if (c <= 0x7FF) 2036 { 2037 buf.ptr[0] = cast(char)(0xC0 | (c >> 6)); 2038 buf.ptr[1] = cast(char)(0x80 | (c & 0x3F)); 2039 appendthis = (cast(byte *)buf.ptr)[0..2]; 2040 } 2041 else if (c <= 0xFFFF) 2042 { 2043 buf.ptr[0] = cast(char)(0xE0 | (c >> 12)); 2044 buf.ptr[1] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2045 buf.ptr[2] = cast(char)(0x80 | (c & 0x3F)); 2046 appendthis = (cast(byte *)buf.ptr)[0..3]; 2047 } 2048 else if (c <= 0x10FFFF) 2049 { 2050 buf.ptr[0] = cast(char)(0xF0 | (c >> 18)); 2051 buf.ptr[1] = cast(char)(0x80 | ((c >> 12) & 0x3F)); 2052 buf.ptr[2] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2053 buf.ptr[3] = cast(char)(0x80 | (c & 0x3F)); 2054 appendthis = (cast(byte *)buf.ptr)[0..4]; 2055 } 2056 else 2057 { 2058 import core.exception : onUnicodeError; 2059 onUnicodeError("Invalid UTF-8 sequence", 0); // invalid utf character 2060 } 2061 2062 // 2063 // TODO: This always assumes the array type is shared, because we do not 2064 // get a typeinfo from the compiler. Assuming shared is the safest option. 2065 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2066 // 2067 return _d_arrayappendT(typeid(shared char[]), x, appendthis); 2068 } 2069 2070 unittest 2071 { 2072 import core.exception : UnicodeException; 2073 2074 /* Using inline try {} catch {} blocks fails to catch the UnicodeException 2075 * thrown. 2076 * https://issues.dlang.org/show_bug.cgi?id=16799 2077 */ 2078 static void assertThrown(T : Throwable = Exception, E)(lazy E expr, string msg) 2079 { 2080 try 2081 expr; 2082 catch (T e) { 2083 assert(e.msg == msg); 2084 return; 2085 } 2086 } 2087 2088 static void f() 2089 { 2090 string ret; 2091 int i = -1; 2092 ret ~= i; 2093 } 2094 2095 assertThrown!UnicodeException(f(), "Invalid UTF-8 sequence"); 2096 } 2097 2098 2099 /** 2100 * Append dchar to wchar[] 2101 */ 2102 extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c) 2103 { 2104 // c could encode into from 1 to 2 w characters 2105 wchar[2] buf = void; 2106 byte[] appendthis; // passed to appendT 2107 if (c <= 0xFFFF) 2108 { 2109 buf.ptr[0] = cast(wchar) c; 2110 // note that although we are passing only 1 byte here, appendT 2111 // interprets this as being an array of wchar, making the necessary 2112 // casts. 2113 appendthis = (cast(byte *)buf.ptr)[0..1]; 2114 } 2115 else 2116 { 2117 buf.ptr[0] = cast(wchar) ((((c - 0x10000) >> 10) & 0x3FF) + 0xD800); 2118 buf.ptr[1] = cast(wchar) (((c - 0x10000) & 0x3FF) + 0xDC00); 2119 // ditto from above. 2120 appendthis = (cast(byte *)buf.ptr)[0..2]; 2121 } 2122 2123 // 2124 // TODO: This always assumes the array type is shared, because we do not 2125 // get a typeinfo from the compiler. Assuming shared is the safest option. 2126 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2127 // 2128 return _d_arrayappendT(typeid(shared wchar[]), x, appendthis); 2129 } 2130 2131 2132 /** 2133 * 2134 */ 2135 extern (C) byte[] _d_arraycatT(const TypeInfo ti, byte[] x, byte[] y) 2136 out (result) 2137 { 2138 auto tinext = unqualify(ti.next); 2139 auto sizeelem = tinext.tsize; // array element size 2140 debug(PRINTF) printf("_d_arraycatT(%d,%p ~ %d,%p sizeelem = %d => %d,%p)\n", x.length, x.ptr, y.length, y.ptr, sizeelem, result.length, result.ptr); 2141 assert(result.length == x.length + y.length); 2142 2143 // If a postblit is involved, the contents of result might rightly differ 2144 // from the bitwise concatenation of x and y. 2145 if (!hasPostblit(tinext)) 2146 { 2147 for (size_t i = 0; i < x.length * sizeelem; i++) 2148 assert((cast(byte*)result)[i] == (cast(byte*)x)[i]); 2149 for (size_t i = 0; i < y.length * sizeelem; i++) 2150 assert((cast(byte*)result)[x.length * sizeelem + i] == (cast(byte*)y)[i]); 2151 } 2152 2153 size_t cap = GC.sizeOf(result.ptr); 2154 assert(!cap || cap > result.length * sizeelem); 2155 } 2156 body 2157 { 2158 version (none) 2159 { 2160 /* Cannot use this optimization because: 2161 * char[] a, b; 2162 * char c = 'a'; 2163 * b = a ~ c; 2164 * c = 'b'; 2165 * will change the contents of b. 2166 */ 2167 if (!y.length) 2168 return x; 2169 if (!x.length) 2170 return y; 2171 } 2172 2173 auto tinext = unqualify(ti.next); 2174 auto sizeelem = tinext.tsize; // array element size 2175 debug(PRINTF) printf("_d_arraycatT(%d,%p ~ %d,%p sizeelem = %d)\n", x.length, x.ptr, y.length, y.ptr, sizeelem); 2176 size_t xlen = x.length * sizeelem; 2177 size_t ylen = y.length * sizeelem; 2178 size_t len = xlen + ylen; 2179 2180 if (!len) 2181 return null; 2182 2183 auto info = __arrayAlloc(len, ti, tinext); 2184 byte* p = cast(byte*)__arrayStart(info); 2185 p[len] = 0; // guessing this is to optimize for null-terminated arrays? 2186 memcpy(p, x.ptr, xlen); 2187 memcpy(p + xlen, y.ptr, ylen); 2188 // do postblit processing 2189 __doPostblit(p, xlen + ylen, tinext); 2190 2191 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2192 __setArrayAllocLength(info, len, isshared, tinext); 2193 return p[0 .. x.length + y.length]; 2194 } 2195 2196 2197 /** 2198 * 2199 */ 2200 extern (C) void[] _d_arraycatnTX(const TypeInfo ti, byte[][] arrs) 2201 { 2202 size_t length; 2203 auto tinext = unqualify(ti.next); 2204 auto size = tinext.tsize; // array element size 2205 2206 foreach (b; arrs) 2207 length += b.length; 2208 2209 if (!length) 2210 return null; 2211 2212 auto allocsize = length * size; 2213 auto info = __arrayAlloc(allocsize, ti, tinext); 2214 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2215 __setArrayAllocLength(info, allocsize, isshared, tinext); 2216 void *a = __arrayStart (info); 2217 2218 size_t j = 0; 2219 foreach (b; arrs) 2220 { 2221 if (b.length) 2222 { 2223 memcpy(a + j, b.ptr, b.length * size); 2224 j += b.length * size; 2225 } 2226 } 2227 2228 // do postblit processing 2229 __doPostblit(a, j, tinext); 2230 2231 return a[0..length]; 2232 } 2233 2234 2235 /** 2236 * Allocate the array, rely on the caller to do the initialization of the array. 2237 */ 2238 extern (C) 2239 void* _d_arrayliteralTX(const TypeInfo ti, size_t length) 2240 { 2241 auto tinext = unqualify(ti.next); 2242 auto sizeelem = tinext.tsize; // array element size 2243 void* result; 2244 2245 debug(PRINTF) printf("_d_arrayliteralTX(sizeelem = %d, length = %d)\n", sizeelem, length); 2246 if (length == 0 || sizeelem == 0) 2247 result = null; 2248 else 2249 { 2250 auto allocsize = length * sizeelem; 2251 auto info = __arrayAlloc(allocsize, ti, tinext); 2252 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2253 __setArrayAllocLength(info, allocsize, isshared, tinext); 2254 result = __arrayStart(info); 2255 } 2256 return result; 2257 } 2258 2259 2260 unittest 2261 { 2262 int[] a; 2263 int[] b; 2264 int i; 2265 2266 a = new int[3]; 2267 a[0] = 1; a[1] = 2; a[2] = 3; 2268 b = a.dup; 2269 assert(b.length == 3); 2270 for (i = 0; i < 3; i++) 2271 assert(b[i] == i + 1); 2272 2273 // test slice appending 2274 b = a[0..1]; 2275 b ~= 4; 2276 for (i = 0; i < 3; i++) 2277 assert(a[i] == i + 1); 2278 2279 // test reserving 2280 char[] arr = new char[4093]; 2281 for (i = 0; i < arr.length; i++) 2282 arr[i] = cast(char)(i % 256); 2283 2284 // note that these two commands used to cause corruption, which may not be 2285 // detected. 2286 arr.reserve(4094); 2287 auto arr2 = arr ~ "123"; 2288 assert(arr2[0..arr.length] == arr); 2289 assert(arr2[arr.length..$] == "123"); 2290 2291 // test postblit on array concat, append, length, etc. 2292 static struct S 2293 { 2294 int x; 2295 int pad; 2296 this(this) 2297 { 2298 ++x; 2299 } 2300 } 2301 void testPostBlit(T)() 2302 { 2303 auto sarr = new T[1]; 2304 debug(SENTINEL) {} else 2305 assert(sarr.capacity == 1); 2306 2307 // length extend 2308 auto sarr2 = sarr; 2309 assert(sarr[0].x == 0); 2310 sarr2.length += 1; 2311 assert(sarr2[0].x == 1); 2312 assert(sarr[0].x == 0); 2313 2314 // append 2315 T s; 2316 sarr2 = sarr; 2317 sarr2 ~= s; 2318 assert(sarr2[0].x == 1); 2319 assert(sarr2[1].x == 1); 2320 assert(sarr[0].x == 0); 2321 assert(s.x == 0); 2322 2323 // concat 2324 sarr2 = sarr ~ sarr; 2325 assert(sarr2[0].x == 1); 2326 assert(sarr2[1].x == 1); 2327 assert(sarr[0].x == 0); 2328 2329 // concat multiple (calls different method) 2330 sarr2 = sarr ~ sarr ~ sarr; 2331 assert(sarr2[0].x == 1); 2332 assert(sarr2[1].x == 1); 2333 assert(sarr2[2].x == 1); 2334 assert(sarr[0].x == 0); 2335 2336 // reserve capacity 2337 sarr2 = sarr; 2338 sarr2.reserve(2); 2339 assert(sarr2[0].x == 1); 2340 assert(sarr[0].x == 0); 2341 } 2342 testPostBlit!(S)(); 2343 testPostBlit!(const(S))(); 2344 } 2345 2346 // cannot define structs inside unit test block, or they become nested structs. 2347 version (unittest) 2348 { 2349 struct S1 2350 { 2351 int x = 5; 2352 } 2353 struct S2 2354 { 2355 int x; 2356 this(int x) {this.x = x;} 2357 } 2358 struct S3 2359 { 2360 int[4] x; 2361 this(int x) 2362 {this.x[] = x;} 2363 } 2364 struct S4 2365 { 2366 int *x; 2367 } 2368 2369 } 2370 2371 unittest 2372 { 2373 auto s1 = new S1; 2374 assert(s1.x == 5); 2375 assert(GC.getAttr(s1) == BlkAttr.NO_SCAN); 2376 2377 auto s2 = new S2(3); 2378 assert(s2.x == 3); 2379 assert(GC.getAttr(s2) == BlkAttr.NO_SCAN); 2380 2381 auto s3 = new S3(1); 2382 assert(s3.x == [1,1,1,1]); 2383 assert(GC.getAttr(s3) == BlkAttr.NO_SCAN); 2384 debug(SENTINEL) {} else 2385 assert(GC.sizeOf(s3) == 16); 2386 2387 auto s4 = new S4; 2388 assert(s4.x == null); 2389 assert(GC.getAttr(s4) == 0); 2390 } 2391 2392 unittest 2393 { 2394 // Bugzilla 3454 - Inconsistent flag setting in GC.realloc() 2395 static void test(size_t multiplier) 2396 { 2397 auto p = GC.malloc(8 * multiplier, BlkAttr.NO_SCAN); 2398 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2399 p = GC.realloc(p, 2 * multiplier, BlkAttr.NO_SCAN); 2400 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2401 } 2402 test(1); 2403 test(1024 * 1024); 2404 } 2405 2406 unittest 2407 { 2408 import core.exception; 2409 try 2410 { 2411 size_t x = size_t.max; 2412 byte[] big_buf = new byte[x]; 2413 } 2414 catch (OutOfMemoryError) 2415 { 2416 } 2417 } 2418 2419 unittest 2420 { 2421 // bugzilla 13854 2422 auto arr = new ubyte[PAGESIZE]; // ensure page size 2423 auto info1 = GC.query(arr.ptr); 2424 assert(info1.base !is arr.ptr); // offset is required for page size or larger 2425 2426 auto arr2 = arr[0..1]; 2427 assert(arr2.capacity == 0); // cannot append 2428 arr2 ~= 0; // add a byte 2429 assert(arr2.ptr !is arr.ptr); // reallocated 2430 auto info2 = GC.query(arr2.ptr); 2431 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2432 2433 // do the same via setting length 2434 arr2 = arr[0..1]; 2435 assert(arr2.capacity == 0); 2436 arr2.length += 1; 2437 assert(arr2.ptr !is arr.ptr); // reallocated 2438 info2 = GC.query(arr2.ptr); 2439 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2440 2441 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2442 auto carr = new char[PAGESIZE]; 2443 info1 = GC.query(carr.ptr); 2444 assert(info1.base !is carr.ptr); // offset is required for page size or larger 2445 2446 auto carr2 = carr[0..1]; 2447 assert(carr2.capacity == 0); // cannot append 2448 carr2 ~= 0; // add a byte 2449 assert(carr2.ptr !is carr.ptr); // reallocated 2450 info2 = GC.query(carr2.ptr); 2451 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2452 2453 // do the same via setting length 2454 carr2 = carr[0..1]; 2455 assert(carr2.capacity == 0); 2456 carr2.length += 1; 2457 assert(carr2.ptr !is carr.ptr); // reallocated 2458 info2 = GC.query(carr2.ptr); 2459 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2460 } 2461 2462 unittest 2463 { 2464 // bugzilla 13878 2465 auto arr = new ubyte[1]; 2466 auto info = GC.query(arr.ptr); 2467 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2468 arr ~= 0; // ensure array is inserted into cache 2469 debug(SENTINEL) {} else 2470 assert(arr.ptr is info.base); 2471 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2472 auto arr2 = arr[0..1]; 2473 assert(arr2.capacity == 0); // cannot append 2474 arr2 ~= 0; 2475 assert(arr2.ptr !is arr.ptr); 2476 info = GC.query(arr2.ptr); 2477 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2478 2479 // do the same via setting length 2480 arr = new ubyte[1]; 2481 arr ~= 0; // ensure array is inserted into cache 2482 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2483 arr2 = arr[0..1]; 2484 assert(arr2.capacity == 0); 2485 arr2.length += 1; 2486 assert(arr2.ptr !is arr.ptr); // reallocated 2487 info = GC.query(arr2.ptr); 2488 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2489 2490 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2491 auto carr = new char[1]; 2492 info = GC.query(carr.ptr); 2493 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2494 carr ~= 0; // ensure array is inserted into cache 2495 debug(SENTINEL) {} else 2496 assert(carr.ptr is info.base); 2497 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2498 auto carr2 = carr[0..1]; 2499 assert(carr2.capacity == 0); // cannot append 2500 carr2 ~= 0; 2501 assert(carr2.ptr !is carr.ptr); 2502 info = GC.query(carr2.ptr); 2503 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2504 2505 // do the same via setting length 2506 carr = new char[1]; 2507 carr ~= 0; // ensure array is inserted into cache 2508 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2509 carr2 = carr[0..1]; 2510 assert(carr2.capacity == 0); 2511 carr2.length += 1; 2512 assert(carr2.ptr !is carr.ptr); // reallocated 2513 info = GC.query(carr2.ptr); 2514 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2515 } 2516 2517 // test struct finalizers 2518 debug(SENTINEL) {} else 2519 unittest 2520 { 2521 __gshared int dtorCount; 2522 static struct S1 2523 { 2524 int x; 2525 2526 ~this() 2527 { 2528 dtorCount++; 2529 } 2530 } 2531 2532 dtorCount = 0; 2533 S1* s1 = new S1; 2534 delete s1; 2535 assert(dtorCount == 1); 2536 2537 dtorCount = 0; 2538 S1[] arr1 = new S1[7]; 2539 delete arr1; 2540 assert(dtorCount == 7); 2541 2542 if (callStructDtorsDuringGC) 2543 { 2544 dtorCount = 0; 2545 S1* s2 = new S1; 2546 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2547 assert(dtorCount == 1); 2548 GC.free(s2); 2549 2550 dtorCount = 0; 2551 const(S1)* s3 = new const(S1); 2552 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2553 assert(dtorCount == 1); 2554 GC.free(cast(void*)s3); 2555 2556 dtorCount = 0; 2557 shared(S1)* s4 = new shared(S1); 2558 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2559 assert(dtorCount == 1); 2560 GC.free(cast(void*)s4); 2561 2562 dtorCount = 0; 2563 const(S1)[] carr1 = new const(S1)[5]; 2564 BlkInfo blkinf1 = GC.query(carr1.ptr); 2565 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2566 assert(dtorCount == 5); 2567 GC.free(blkinf1.base); 2568 } 2569 2570 dtorCount = 0; 2571 S1[] arr2 = new S1[10]; 2572 arr2.length = 6; 2573 arr2.assumeSafeAppend; 2574 assert(dtorCount == 4); // destructors run explicitely? 2575 2576 if (callStructDtorsDuringGC) 2577 { 2578 dtorCount = 0; 2579 BlkInfo blkinf = GC.query(arr2.ptr); 2580 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2581 assert(dtorCount == 6); 2582 GC.free(blkinf.base); 2583 } 2584 2585 // associative arrays 2586 import rt.aaA : entryDtor; 2587 // throw away all existing AA entries with dtor 2588 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2589 2590 S1[int] aa1; 2591 aa1[0] = S1(0); 2592 aa1[1] = S1(1); 2593 if (callStructDtorsDuringGC) 2594 { 2595 dtorCount = 0; 2596 aa1 = null; 2597 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2598 assert(dtorCount == 2); 2599 } 2600 2601 int[S1] aa2; 2602 aa2[S1(0)] = 0; 2603 aa2[S1(1)] = 1; 2604 aa2[S1(2)] = 2; 2605 if (callStructDtorsDuringGC) 2606 { 2607 dtorCount = 0; 2608 aa2 = null; 2609 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2610 assert(dtorCount == 3); 2611 } 2612 2613 S1[2][int] aa3; 2614 aa3[0] = [S1(0),S1(2)]; 2615 aa3[1] = [S1(1),S1(3)]; 2616 if (callStructDtorsDuringGC) 2617 { 2618 dtorCount = 0; 2619 aa3 = null; 2620 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2621 assert(dtorCount == 4); 2622 } 2623 } 2624 2625 // test class finalizers exception handling 2626 unittest 2627 { 2628 bool test(E)() 2629 { 2630 import core.exception; 2631 static class C1 2632 { 2633 E exc; 2634 this(E exc) { this.exc = exc; } 2635 ~this() { throw exc; } 2636 } 2637 2638 bool caught = false; 2639 C1 c = new C1(new E("test onFinalizeError")); 2640 try 2641 { 2642 GC.runFinalizers((cast(uint*)&C1.__dtor)[0..1]); 2643 } 2644 catch (FinalizeError err) 2645 { 2646 caught = true; 2647 } 2648 catch (E) 2649 { 2650 } 2651 GC.free(cast(void*)c); 2652 return caught; 2653 } 2654 2655 assert( test!Exception); 2656 import core.exception : InvalidMemoryOperationError; 2657 assert(!test!InvalidMemoryOperationError); 2658 } 2659 2660 // test struct finalizers exception handling 2661 debug(SENTINEL) {} else 2662 unittest 2663 { 2664 if (!callStructDtorsDuringGC) 2665 return; 2666 2667 bool test(E)() 2668 { 2669 import core.exception; 2670 static struct S1 2671 { 2672 E exc; 2673 ~this() { throw exc; } 2674 } 2675 2676 bool caught = false; 2677 S1* s = new S1(new E("test onFinalizeError")); 2678 try 2679 { 2680 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2681 } 2682 catch (FinalizeError err) 2683 { 2684 caught = true; 2685 } 2686 catch (E) 2687 { 2688 } 2689 GC.free(s); 2690 return caught; 2691 } 2692 2693 assert( test!Exception); 2694 import core.exception : InvalidMemoryOperationError; 2695 assert(!test!InvalidMemoryOperationError); 2696 } 2697 2698 // test bug 14126 2699 unittest 2700 { 2701 static struct S 2702 { 2703 S* thisptr; 2704 ~this() { assert(&this == thisptr); thisptr = null;} 2705 } 2706 2707 S[] test14126 = new S[2048]; // make sure we allocate at least a PAGE 2708 foreach (ref s; test14126) 2709 { 2710 s.thisptr = &s; 2711 } 2712 } 2713