Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
42 // header files. We only declare/use it when targeting the platform.
130 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
158 if (getFlags()->may_return_null)
160 if (getFlags()->zero_contents)
162 else if (getFlags()->pattern_fill_contents)
164 if (getFlags()->dealloc_type_mismatch)
166 if (getFlags()->delete_size_mismatch)
173 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
177 // the PrimaryConfig and CacheConfig, consider to deprecate the use of
179 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
183 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
184 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
190 RB->Depot->enable();
198 RB->Depot->disable();
201 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
206 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
208 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
209 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
210 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
211 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
212 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
213 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
227 GuardedAlloc.getAllocatorState()->maximumAllocationSize();
253 if (getFlags()->GWP_ASAN_InstallSignalHandlers)
262 // The Cache must be provided zero-initialized.
263 void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
266 // - draining the local quarantine cache to the global quarantine;
267 // - releasing the cached pointers back to the Primary;
268 // - unlinking the local stats from the global ones (destroying the cache does
271 TSD->assertLocked(/*BypassCheck=*/true);
272 Quarantine.drain(&TSD->getQuarantineCache(),
273 QuarantineCallback(*this, TSD->getCache()));
274 TSD->getCache().destroy(&Stats);
278 TSD->assertLocked(/*BypassCheck=*/true);
279 Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
280 QuarantineCallback(*this, TSD->getCache()));
281 TSD->getCache().drain();
315 return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
330 // can use to determine which tag mask to use.
375 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
390 Block = TSD->getCache().allocate(ClassId);
396 Block = TSD->getCache().allocate(++ClassId);
416 ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
524 // Pointer has to be allocated with a malloc-type function. Some
544 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
551 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
561 OldSize - NewSize);
567 : BlockEnd -
587 // allow for potential further in-place realloc. The gains of such a trick
597 // TODO(kostyak): disable() is currently best-effort. There are some small
638 Buffer[Size - 1] = '\0';
665 // within the provided memory range. Said callback must not use this allocator
720 // Enabling odd/even tags involves a tradeoff between use-after-free
724 // use-after-free is less likely to be detected because the tag space for
725 // any particular chunk is cut in half. Therefore we use this tuning
733 // We leave it to the various sub-components to decide whether or not they
734 // want to handle the option, but we do not want to short-circuit
804 // may end up calling the allocator (via pthread_atfork, via the post-init
816 if (getFlags()->allocation_ring_buffer_size <= 0) {
844 return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
850 return RB ? RB->StackDepotSize : 0;
869 return RB && RB->RingBufferElements
870 ? ringBufferSizeInBytes(RB->RingBufferElements)
879 if (!Depot->find(Hash, &RingPos, &Size))
882 Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
907 if (!Depot->isValid(DepotSize))
951 // These are indexes into an "array" of 32-bit values that store information
954 // negative indexes may be used. The smallest index that may be used is -2,
958 static const sptr MemTagAllocationTraceIndex = -2;
959 static const sptr MemTagAllocationTidIndex = -1;
1023 SizeClassMap::MaxSize - MinAlignment);
1025 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1034 // The following is an over-approximation that works for our needs.
1035 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1049 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1050 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1055 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1056 if (LIKELY(Header->ClassId))
1060 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1061 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1085 const uptr Offset = UserPtr - DefaultAlignedPtr;
1126 // faulting potentially large numbers of pages for large secondary
1127 // allocations. We assume that guard pages are enough to protect these
1162 // reclaimed (since we never use zero as the chunk tag), or that the
1168 // means that all following pages may need to be retagged. On the other
1169 // hand, if it is nonzero, we can assume that all following pages are
1170 // still tagged, according to the logic that if any of the pages
1188 // UAF tag. But if tagging was disabled per-thread when the memory
1192 Min(Size, roundUp(PrevEnd - TaggedUserPtr,
1217 const uptr Offset = UserPtr - DefaultAlignedPtr;
1245 ((Size - 1) >= QuarantineMaxChunkSize) ||
1246 !Header->ClassId;
1248 Header->State = Chunk::State::Available;
1250 Header->State = Chunk::State::Quarantined;
1254 Header->OriginOrWasZeroed = 0U;
1259 Header->OriginOrWasZeroed =
1260 Header->ClassId && !TSDRegistry.getDisableMemInit();
1268 const uptr ClassId = Header->ClassId;
1273 CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
1276 // implies that we may have the chance to release some pages as well.
1286 Quarantine.put(&TSD->getQuarantineCache(),
1287 QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1298 if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1302 Header->ClassId);
1303 // Exclude the previous tag so that immediate use after free is
1311 if (BypassQuarantine && !Header->ClassId) {
1360 // allocation, the chunk may already have a non-zero tag from the previous
1362 storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1395 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
1403 uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
1405 getRingBufferEntry(RB, Pos % RB->RingBufferElements);
1411 atomic_store_relaxed(&Entry->Ptr, 0);
1414 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1415 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1416 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1417 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1418 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1421 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1431 u32 Trace = collectStackTrace(RB->Depot);
1452 u32 DeallocationTrace = collectStackTrace(RB->Depot);
1461 sizeof(((scudo_error_info *)nullptr)->reports) /
1462 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1476 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1480 *Data = &Memory[Addr - MemoryAddr];
1482 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1500 ChunkBegin - Chunk::getHeaderSize());
1505 if (Header->SizeOrUnusedBytes == 0)
1526 auto *R = &ErrorInfo->reports[NextErrorReport++];
1527 R->error_type =
1529 R->allocation_address = ChunkAddr;
1530 R->allocation_size = Header.SizeOrUnusedBytes;
1532 collectTraceMaybe(Depot, R->allocation_trace,
1535 R->allocation_tid = Data[MemTagAllocationTidIndex];
1544 CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1559 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1561 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1563 --I) {
1565 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1570 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1571 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1572 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1573 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1574 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1577 // For UAF we only consider in-bounds fault addresses because
1578 // out-of-bounds UAF is rare and attempting to detect it is very likely
1584 // case we are guaranteed a guard region of at least a page on either
1585 // side of the allocation (guard page on the right, guard page + tagged
1587 if (FaultAddr < EntryPtr - getPageSizeCached() ||
1597 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1606 auto *R = &ErrorInfo->reports[NextErrorReport++];
1608 R->error_type = USE_AFTER_FREE;
1610 R->error_type = BUFFER_UNDERFLOW;
1612 R->error_type = BUFFER_OVERFLOW;
1614 R->allocation_address = UntaggedEntryPtr;
1615 R->allocation_size = EntrySize;
1616 collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
1617 R->allocation_tid = AllocationTid;
1618 collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
1619 R->deallocation_tid = DeallocationTid;
1628 return Str->length();
1651 int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
1684 Depot->init(RingSize, TabSize);
1693 RB->RawRingBufferMap = MemMap;
1694 RB->RingBufferElements = AllocationRingBufferSize;
1695 RB->Depot = Depot;
1696 RB->StackDepotSize = StackDepotSize;
1697 RB->RawStackDepotMap = DepotMap;
1709 RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
1710 RB->RawStackDepotMap.getCapacity());
1711 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1713 // operation along with unmap() won't touch inaccessible pages.
1714 MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1729 return (Bytes - sizeof(AllocationRingBuffer)) /