Lines Matching +full:can +full:- +full:primary

1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
108 Allocator.Primary.Options.load())))
130 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
158 if (getFlags()->may_return_null)
159 Primary.Options.set(OptionBit::MayReturnNull);
160 if (getFlags()->zero_contents)
161 Primary.Options.setFillContentsMode(ZeroFill);
162 else if (getFlags()->pattern_fill_contents)
163 Primary.Options.setFillContentsMode(PatternOrZeroFill);
164 if (getFlags()->dealloc_type_mismatch)
165 Primary.Options.set(OptionBit::DeallocTypeMismatch);
166 if (getFlags()->delete_size_mismatch)
167 Primary.Options.set(OptionBit::DeleteSizeMismatch);
170 Primary.Options.set(OptionBit::UseMemoryTagging);
173 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
179 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
180 Primary.init(ReleaseToOsIntervalMs);
183 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
184 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
190 RB->Depot->enable();
198 RB->Depot->disable();
201 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
206 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
208 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
209 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
210 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
211 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
212 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
213 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
227 GuardedAlloc.getAllocatorState()->maximumAllocationSize();
250 Primary.unmapTestOnly();
253 if (getFlags()->GWP_ASAN_InstallSignalHandlers)
262 // The Cache must be provided zero-initialized.
263 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
266 // - draining the local quarantine cache to the global quarantine;
267 // - releasing the cached pointers back to the Primary;
268 // - unlinking the local stats from the global ones (destroying the cache does
271 TSD->assertLocked(/*BypassCheck=*/true);
272 Quarantine.drain(&TSD->getQuarantineCache(),
273 QuarantineCallback(*this, TSD->getCache()));
274 TSD->getCache().destroy(&Stats);
278 TSD->assertLocked(/*BypassCheck=*/true);
279 Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
280 QuarantineCallback(*this, TSD->getCache()));
281 TSD->getCache().drain();
315 return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
330 // can use to determine which tag mask to use.
339 const Options Options = Primary.Options.load();
375 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
390 Block = TSD->getCache().allocate(ClassId);
396 Block = TSD->getCache().allocate(++ClassId);
416 ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
463 const Options Options = Primary.Options.load();
466 // With the exception of memalign'd chunks, that can be still be free'd.
486 const Options Options = Primary.Options.load();
524 // Pointer has to be allocated with a malloc-type function. Some
544 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
551 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
561 OldSize - NewSize);
567 : BlockEnd -
587 // allow for potential further in-place realloc. The gains of such a trick
597 // TODO(kostyak): disable() is currently best-effort. There are some small
608 Primary.disable();
617 Primary.enable();
629 // function. This can be called with a null buffer or zero size for buffer
638 Buffer[Size - 1] = '\0';
651 Primary.getFragmentationInfo(&Str);
660 Primary.releaseToOS(ReleaseType);
666 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
684 // A chunk header can either have a zero tag (tagged primary) or the
685 // header tag (secondary, or untagged primary). We don't know which so
699 if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
705 Primary.iterateOverBlocks(Lambda);
714 return Primary.Options.load().get(OptionBit::MayReturnNull);
720 // Enabling odd/even tags involves a tradeoff between use-after-free
724 // use-after-free is less likely to be detected because the tag space for
728 Primary.Options.set(OptionBit::UseOddEvenTags);
730 Primary.Options.clear(OptionBit::UseOddEvenTags);
733 // We leave it to the various sub-components to decide whether or not they
734 // want to handle the option, but we do not want to short-circuit
736 const bool PrimaryResult = Primary.setOption(O, Value);
798 return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
804 // may end up calling the allocator (via pthread_atfork, via the post-init
810 Primary.Options.clear(OptionBit::UseMemoryTagging);
816 if (getFlags()->allocation_ring_buffer_size <= 0) {
817 DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
823 Primary.Options.set(OptionBit::TrackAllocationStacks);
825 Primary.Options.clear(OptionBit::TrackAllocationStacks);
830 Primary.Options.setFillContentsMode(FillContents);
836 Primary.Options.set(OptionBit::AddLargeAllocationSlack);
838 Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
844 return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
850 return RB ? RB->StackDepotSize : 0;
854 return Primary.getRegionInfoArrayAddress();
869 return RB && RB->RingBufferElements
870 ? ringBufferSizeInBytes(RB->RingBufferElements)
879 if (!Depot->find(Hash, &RingPos, &Size))
882 Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
902 // check for corrupted StackDepot. First we need to check whether we can
907 if (!Depot->isValid(DepotSize))
920 // Check the ring buffer. For primary allocations this will only find UAF;
921 // for secondary allocations we can find either UAF or OOB.
951 // These are indexes into an "array" of 32-bit values that store information
954 // negative indexes may be used. The smallest index that may be used is -2,
958 static const sptr MemTagAllocationTraceIndex = -2;
959 static const sptr MemTagAllocationTidIndex = -1;
965 PrimaryT Primary;
1013 // Verify that the header offset field can hold the maximum offset. In the
1015 // offset will always be small. In the case of the Primary, the worst case
1023 SizeClassMap::MaxSize - MinAlignment);
1025 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1030 // Verify that we can fit the maximum size or amount of unused bytes in the
1032 // case scenario happens in the Primary. It will depend on the second to
1033 // last and last class sizes, as well as the dynamic base for the Primary.
1034 // The following is an over-approximation that works for our needs.
1035 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1049 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1050 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1055 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1056 if (LIKELY(Header->ClassId))
1060 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1061 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1073 // Only do content fill when it's from primary allocator because secondary
1085 const uptr Offset = UserPtr - DefaultAlignedPtr;
1088 // the chunk iteration function that can be used in debugging situations.
1111 const Options Options = Primary.Options.load();
1122 // Init the primary chunk.
1124 // We only need to zero or tag the contents for Primary backed
1125 // allocations. We only set tags for primary allocations in order to avoid
1160 // We can detect case (2) by loading the tag from the start
1166 // We can detect case (3) by moving to the next page (if covered by the
1169 // hand, if it is nonzero, we can assume that all following pages are
1185 // If an allocation needs to be zeroed (i.e. calloc) we can normally
1186 // avoid zeroing the memory now since we can rely on memory having
1188 // UAF tag. But if tagging was disabled per-thread when the memory
1192 Min(Size, roundUp(PrevEnd - TaggedUserPtr,
1217 const uptr Offset = UserPtr - DefaultAlignedPtr;
1220 // the chunk iteration function that can be used in debugging situations.
1245 ((Size - 1) >= QuarantineMaxChunkSize) ||
1246 !Header->ClassId;
1248 Header->State = Chunk::State::Available;
1250 Header->State = Chunk::State::Quarantined;
1254 Header->OriginOrWasZeroed = 0U;
1259 Header->OriginOrWasZeroed =
1260 Header->ClassId && !TSDRegistry.getDisableMemInit();
1268 const uptr ClassId = Header->ClassId;
1273 CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
1275 // When we have drained some blocks back to the Primary from TSD, that
1280 Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1286 Quarantine.put(&TSD->getQuarantineCache(),
1287 QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1298 if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1302 Header->ClassId);
1311 if (BypassQuarantine && !Header->ClassId) {
1360 // allocation, the chunk may already have a non-zero tag from the previous
1362 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1395 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
1403 uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
1405 getRingBufferEntry(RB, Pos % RB->RingBufferElements);
1411 atomic_store_relaxed(&Entry->Ptr, 0);
1414 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1415 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1416 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1417 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1418 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1421 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1431 u32 Trace = collectStackTrace(RB->Depot);
1452 u32 DeallocationTrace = collectStackTrace(RB->Depot);
1461 sizeof(((scudo_error_info *)nullptr)->reports) /
1462 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1476 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1480 *Data = &Memory[Addr - MemoryAddr];
1482 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1500 ChunkBegin - Chunk::getHeaderSize());
1505 if (Header->SizeOrUnusedBytes == 0)
1526 auto *R = &ErrorInfo->reports[NextErrorReport++];
1527 R->error_type =
1529 R->allocation_address = ChunkAddr;
1530 R->allocation_size = Header.SizeOrUnusedBytes;
1532 collectTraceMaybe(Depot, R->allocation_trace,
1535 R->allocation_tid = Data[MemTagAllocationTidIndex];
1544 CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1559 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1561 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1563 --I) {
1565 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1570 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1571 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1572 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1573 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1574 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1577 // For UAF we only consider in-bounds fault addresses because
1578 // out-of-bounds UAF is rare and attempting to detect it is very likely
1587 if (FaultAddr < EntryPtr - getPageSizeCached() ||
1597 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1606 auto *R = &ErrorInfo->reports[NextErrorReport++];
1608 R->error_type = USE_AFTER_FREE;
1610 R->error_type = BUFFER_UNDERFLOW;
1612 R->error_type = BUFFER_OVERFLOW;
1614 R->allocation_address = UntaggedEntryPtr;
1615 R->allocation_size = EntrySize;
1616 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1617 R->allocation_tid = AllocationTid;
1618 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1619 R->deallocation_tid = DeallocationTid;
1624 Primary.getStats(Str);
1628 return Str->length();
1651 int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
1684 Depot->init(RingSize, TabSize);
1693 RB->RawRingBufferMap = MemMap;
1694 RB->RingBufferElements = AllocationRingBufferSize;
1695 RB->Depot = Depot;
1696 RB->StackDepotSize = StackDepotSize;
1697 RB->RawStackDepotMap = DepotMap;
1709 RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
1710 RB->RawStackDepotMap.getCapacity());
1711 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1714 MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1729 return (Bytes - sizeof(AllocationRingBuffer)) /