Lines Matching full:block

1 //===-- Implementation header for a block of memory -------------*- C++ -*-===//
45 /// is also the block's size.
48 /// This also implies that block outer sizes are aligned to max_align_t.
50 /// As an example, the diagram below represents two contiguous `Block`s. The
54 /// Block 1:
62 /// Block 2:
72 /// As a space optimization, when a block is allocated, it consumes the prev
73 /// field of the following block:
75 /// Block 1 (used):
83 /// Block 2:
92 /// The next offset of a block matches the previous offset of its next block.
93 /// The first block in a list is denoted by having a previous offset of `0`.
94 class Block {
102 Block(const Block &other) = delete;
103 Block &operator=(const Block &other) = delete;
105 /// Initializes a given memory region into a first block and a sentinel last
106 /// block. Returns the first block, which has its usable space aligned to
108 static optional<Block *> init(ByteSpan region);
110 /// @returns A pointer to a `Block`, given a pointer to the start of the
111 /// usable space inside the block.
117 LIBC_INLINE static Block *from_usable_space(void *usable_space) {
119 return reinterpret_cast<Block *>(bytes - sizeof(Block));
121 LIBC_INLINE static const Block *from_usable_space(const void *usable_space) {
123 return reinterpret_cast<const Block *>(bytes - sizeof(Block));
126 /// @returns The total size of the block in bytes, including the header.
130 // The usable region includes the prev_ field of the next block.
131 return inner_size - sizeof(prev_) + sizeof(Block);
134 /// @returns The number of usable bytes inside the block were it to be
142 /// @returns The number of usable bytes inside a block with the given outer
145 // The usable region includes the prev_ field of the next block.
149 /// @returns The number of usable bytes inside the block if it remains free.
156 /// @returns The number of usable bytes inside a block with the given outer
159 return outer_size - sizeof(Block);
162 /// @returns A pointer to the usable space inside this block.
166 auto *s = reinterpret_cast<cpp::byte *>(this) + sizeof(Block);
172 const auto *s = reinterpret_cast<const cpp::byte *>(this) + sizeof(Block);
178 // @returns The region of memory the block manages, including the header.
183 /// Attempts to split this block.
185 /// If successful, the block will have an inner size of at least
186 /// `new_inner_size`. The remaining space will be returned as a new block,
188 /// field of the next block counts as part of the inner size of the block.
190 optional<Block *> split(size_t new_inner_size,
193 /// Merges this block with the one that comes after it.
196 /// @returns The block immediately after this one, or a null pointer if this
197 /// is the last block.
198 LIBC_INLINE Block *next() const {
201 return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) +
205 /// @returns The free block immediately before this one, otherwise nullptr.
206 LIBC_INLINE Block *prev_free() const {
209 return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) - prev_);
212 /// @returns Whether the block is unavailable for allocation.
215 /// Marks this block as in use.
217 LIBC_ASSERT(next() && "last block is always considered used");
221 /// Marks this block as free.
223 LIBC_ASSERT(next() && "last block is always considered used");
225 // The next block's prev_ field becomes alive, as it is no longer part of
226 // this block's used space.
230 LIBC_INLINE Block(size_t outer_size, bool is_last) : next_(outer_size) {
233 // to Block.
235 outer_size % (is_last ? alignof(Block) : alignof(max_align_t)) == 0 &&
236 "block sizes must be aligned");
247 // Returns the minimum inner size necessary for a block of that size to
260 // We must create a new block inside this one (splitting). This requires a
261 // block header in addition to the requested size.
262 if (add_overflow(size, sizeof(Block), size))
265 // Beyond that, padding space may need to remain in this block to ensure
266 // that the usable space of the next block is aligned.
278 // This block's usable space is aligned to max_align_t >= Block. With zero
279 // padding, the next block's usable space is sizeof(Block) past it, which is
280 // a point aligned to Block. Thus the max padding needed is alignment -
281 // alignof(Block).
282 if (add_overflow(size, alignment - alignof(Block), size))
287 // This is the return type for `allocate` which can split one block into up to
290 // This is the newly aligned block. It will have the alignment requested by
292 Block *block;
294 // If the usable_space in the new block was not aligned according to the
295 // `alignment` parameter, we will need to split into this block and the
296 // `block` to ensure `block` is properly aligned. In this case, `prev` will
297 // be a pointer to this new "padding" block. `prev` will be nullptr if no
298 // new block was created or we were able to merge the block before the
299 // original block with the "padding" block.
300 Block *prev;
302 // This is the remainder of the next block after splitting the `block`
304 // `block`.
305 Block *next;
308 // Divide a block into up to 3 blocks according to `BlockInfo`. Behavior is
310 static BlockInfo allocate(Block *block, size_t alignment, size_t size);
315 return align_up(ptr + sizeof(Block), usable_space_alignment) -
316 sizeof(Block);
320 return align_down(ptr, usable_space_alignment) - sizeof(Block);
324 /// Construct a block to represent a span of bytes. Overwrites only enough
325 /// memory for the block header; the rest of the span is left alone.
326 LIBC_INLINE static Block *as_block(ByteSpan bytes) {
327 LIBC_ASSERT(reinterpret_cast<uintptr_t>(bytes.data()) % alignof(Block) ==
329 "block start must be suitably aligned");
330 return ::new (bytes.data()) Block(bytes.size(), /*is_last=*/false);
334 LIBC_ASSERT(reinterpret_cast<uintptr_t>(start) % alignof(Block) == 0 &&
335 "block start must be suitably aligned");
336 ::new (start) Block(sizeof(Block), /*is_last=*/true);
339 /// Offset from this block to the previous block. 0 if this is the first
340 /// block. This field is only alive when the previous block is free;
341 /// otherwise, its memory is reused as part of the previous block's usable
345 /// Offset from this block to the next block. Valid even if this is the last
346 /// block, since it equals the size of the block.
349 /// Information about the current state of the block is stored in the two low
354 /// * If the `prev_free` flag is set, the block isn't the first and the
355 /// previous block is free.
356 /// * If the `last` flag is set, the block is the sentinel last block. It is
357 /// summarily considered used and has no next block.
364 static_assert(alignof(Block) >= 4,
365 "at least 2 bits must be available in block sizes for flags");
368 optional<Block *> Block::init(ByteSpan region) {
385 if (block_start + sizeof(Block) > last_start)
389 Block *block =
392 block->mark_free();
393 return block;
397 Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
401 BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
403 if (!info.block->is_usable_space_aligned(alignment)) {
404 Block *original = info.block;
405 // The padding block has no minimum size requirement.
406 optional<Block *> maybe_aligned_block = original->split(0, alignment);
410 if (Block *prev = original->prev_free()) {
411 // If there is a free block before this, we can merge the current one with
418 Block *aligned_block = *maybe_aligned_block;
420 "The aligned block isn't aligned somehow.");
421 info.block = aligned_block;
424 // Now get a block for the requested size.
425 if (optional<Block *> next = info.block->split(size))
432 optional<Block *> Block::split(size_t new_inner_size,
439 // Compute the minimum outer size that produces a block of at least
453 outer_size() - new_outer_size < sizeof(Block))
460 Block *new_block = as_block(new_region);
461 mark_free(); // Free status for this block is now stored in new_block.
470 bool Block::merge_next() {