xref: /netbsd-src/external/bsd/zstd/dist/lib/compress/zstd_cwksp.h (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1*3117ece4Schristos /*
2*3117ece4Schristos  * Copyright (c) Meta Platforms, Inc. and affiliates.
3*3117ece4Schristos  * All rights reserved.
4*3117ece4Schristos  *
5*3117ece4Schristos  * This source code is licensed under both the BSD-style license (found in the
6*3117ece4Schristos  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*3117ece4Schristos  * in the COPYING file in the root directory of this source tree).
8*3117ece4Schristos  * You may select, at your option, one of the above-listed licenses.
9*3117ece4Schristos  */
10*3117ece4Schristos 
11*3117ece4Schristos #ifndef ZSTD_CWKSP_H
12*3117ece4Schristos #define ZSTD_CWKSP_H
13*3117ece4Schristos 
14*3117ece4Schristos /*-*************************************
15*3117ece4Schristos *  Dependencies
16*3117ece4Schristos ***************************************/
17*3117ece4Schristos #include "../common/allocations.h"  /* ZSTD_customMalloc, ZSTD_customFree */
18*3117ece4Schristos #include "../common/zstd_internal.h"
19*3117ece4Schristos #include "../common/portability_macros.h"
20*3117ece4Schristos 
21*3117ece4Schristos #if defined (__cplusplus)
22*3117ece4Schristos extern "C" {
23*3117ece4Schristos #endif
24*3117ece4Schristos 
25*3117ece4Schristos /*-*************************************
26*3117ece4Schristos *  Constants
27*3117ece4Schristos ***************************************/
28*3117ece4Schristos 
29*3117ece4Schristos /* Since the workspace is effectively its own little malloc implementation /
30*3117ece4Schristos  * arena, when we run under ASAN, we should similarly insert redzones between
31*3117ece4Schristos  * each internal element of the workspace, so ASAN will catch overruns that
32*3117ece4Schristos  * reach outside an object but that stay inside the workspace.
33*3117ece4Schristos  *
34*3117ece4Schristos  * This defines the size of that redzone.
35*3117ece4Schristos  */
36*3117ece4Schristos #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
37*3117ece4Schristos #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
38*3117ece4Schristos #endif
39*3117ece4Schristos 
40*3117ece4Schristos 
41*3117ece4Schristos /* Set our tables and aligneds to align by 64 bytes */
42*3117ece4Schristos #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
43*3117ece4Schristos 
44*3117ece4Schristos /*-*************************************
45*3117ece4Schristos *  Structures
46*3117ece4Schristos ***************************************/
47*3117ece4Schristos typedef enum {
48*3117ece4Schristos     ZSTD_cwksp_alloc_objects,
49*3117ece4Schristos     ZSTD_cwksp_alloc_aligned_init_once,
50*3117ece4Schristos     ZSTD_cwksp_alloc_aligned,
51*3117ece4Schristos     ZSTD_cwksp_alloc_buffers
52*3117ece4Schristos } ZSTD_cwksp_alloc_phase_e;
53*3117ece4Schristos 
54*3117ece4Schristos /**
55*3117ece4Schristos  * Used to describe whether the workspace is statically allocated (and will not
56*3117ece4Schristos  * necessarily ever be freed), or if it's dynamically allocated and we can
57*3117ece4Schristos  * expect a well-formed caller to free this.
58*3117ece4Schristos  */
59*3117ece4Schristos typedef enum {
60*3117ece4Schristos     ZSTD_cwksp_dynamic_alloc,
61*3117ece4Schristos     ZSTD_cwksp_static_alloc
62*3117ece4Schristos } ZSTD_cwksp_static_alloc_e;
63*3117ece4Schristos 
64*3117ece4Schristos /**
65*3117ece4Schristos  * Zstd fits all its internal datastructures into a single continuous buffer,
66*3117ece4Schristos  * so that it only needs to perform a single OS allocation (or so that a buffer
67*3117ece4Schristos  * can be provided to it and it can perform no allocations at all). This buffer
68*3117ece4Schristos  * is called the workspace.
69*3117ece4Schristos  *
70*3117ece4Schristos  * Several optimizations complicate that process of allocating memory ranges
71*3117ece4Schristos  * from this workspace for each internal datastructure:
72*3117ece4Schristos  *
73*3117ece4Schristos  * - These different internal datastructures have different setup requirements:
74*3117ece4Schristos  *
75*3117ece4Schristos  *   - The static objects need to be cleared once and can then be trivially
76*3117ece4Schristos  *     reused for each compression.
77*3117ece4Schristos  *
78*3117ece4Schristos  *   - Various buffers don't need to be initialized at all--they are always
79*3117ece4Schristos  *     written into before they're read.
80*3117ece4Schristos  *
81*3117ece4Schristos  *   - The matchstate tables have a unique requirement that they don't need
82*3117ece4Schristos  *     their memory to be totally cleared, but they do need the memory to have
83*3117ece4Schristos  *     some bound, i.e., a guarantee that all values in the memory they've been
84*3117ece4Schristos  *     allocated is less than some maximum value (which is the starting value
85*3117ece4Schristos  *     for the indices that they will then use for compression). When this
86*3117ece4Schristos  *     guarantee is provided to them, they can use the memory without any setup
87*3117ece4Schristos  *     work. When it can't, they have to clear the area.
88*3117ece4Schristos  *
89*3117ece4Schristos  * - These buffers also have different alignment requirements.
90*3117ece4Schristos  *
91*3117ece4Schristos  * - We would like to reuse the objects in the workspace for multiple
92*3117ece4Schristos  *   compressions without having to perform any expensive reallocation or
93*3117ece4Schristos  *   reinitialization work.
94*3117ece4Schristos  *
95*3117ece4Schristos  * - We would like to be able to efficiently reuse the workspace across
96*3117ece4Schristos  *   multiple compressions **even when the compression parameters change** and
97*3117ece4Schristos  *   we need to resize some of the objects (where possible).
98*3117ece4Schristos  *
99*3117ece4Schristos  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
100*3117ece4Schristos  * abstraction was created. It works as follows:
101*3117ece4Schristos  *
102*3117ece4Schristos  * Workspace Layout:
103*3117ece4Schristos  *
104*3117ece4Schristos  * [                        ... workspace ...                           ]
105*3117ece4Schristos  * [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
106*3117ece4Schristos  *
107*3117ece4Schristos  * The various objects that live in the workspace are divided into the
108*3117ece4Schristos  * following categories, and are allocated separately:
109*3117ece4Schristos  *
110*3117ece4Schristos  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
111*3117ece4Schristos  *   so that literally everything fits in a single buffer. Note: if present,
112*3117ece4Schristos  *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
113*3117ece4Schristos  *   CDict}() rely on a pointer comparison to see whether one or two frees are
114*3117ece4Schristos  *   required.
115*3117ece4Schristos  *
116*3117ece4Schristos  * - Fixed size objects: these are fixed-size, fixed-count objects that are
117*3117ece4Schristos  *   nonetheless "dynamically" allocated in the workspace so that we can
118*3117ece4Schristos  *   control how they're initialized separately from the broader ZSTD_CCtx.
119*3117ece4Schristos  *   Examples:
120*3117ece4Schristos  *   - Entropy Workspace
121*3117ece4Schristos  *   - 2 x ZSTD_compressedBlockState_t
122*3117ece4Schristos  *   - CDict dictionary contents
123*3117ece4Schristos  *
124*3117ece4Schristos  * - Tables: these are any of several different datastructures (hash tables,
125*3117ece4Schristos  *   chain tables, binary trees) that all respect a common format: they are
126*3117ece4Schristos  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
127*3117ece4Schristos  *   Their sizes depend on the cparams. These tables are 64-byte aligned.
128*3117ece4Schristos  *
129*3117ece4Schristos  * - Init once: these buffers require to be initialized at least once before
130*3117ece4Schristos  *   use. They should be used when we want to skip memory initialization
131*3117ece4Schristos  *   while not triggering memory checkers (like Valgrind) when reading from
132*3117ece4Schristos  *   from this memory without writing to it first.
133*3117ece4Schristos  *   These buffers should be used carefully as they might contain data
134*3117ece4Schristos  *   from previous compressions.
135*3117ece4Schristos  *   Buffers are aligned to 64 bytes.
136*3117ece4Schristos  *
137*3117ece4Schristos  * - Aligned: these buffers don't require any initialization before they're
138*3117ece4Schristos  *   used. The user of the buffer should make sure they write into a buffer
139*3117ece4Schristos  *   location before reading from it.
140*3117ece4Schristos  *   Buffers are aligned to 64 bytes.
141*3117ece4Schristos  *
142*3117ece4Schristos  * - Buffers: these buffers are used for various purposes that don't require
143*3117ece4Schristos  *   any alignment or initialization before they're used. This means they can
144*3117ece4Schristos  *   be moved around at no cost for a new compression.
145*3117ece4Schristos  *
146*3117ece4Schristos  * Allocating Memory:
147*3117ece4Schristos  *
148*3117ece4Schristos  * The various types of objects must be allocated in order, so they can be
149*3117ece4Schristos  * correctly packed into the workspace buffer. That order is:
150*3117ece4Schristos  *
151*3117ece4Schristos  * 1. Objects
152*3117ece4Schristos  * 2. Init once / Tables
153*3117ece4Schristos  * 3. Aligned / Tables
154*3117ece4Schristos  * 4. Buffers / Tables
155*3117ece4Schristos  *
156*3117ece4Schristos  * Attempts to reserve objects of different types out of order will fail.
157*3117ece4Schristos  */
158*3117ece4Schristos typedef struct {
159*3117ece4Schristos     void* workspace;
160*3117ece4Schristos     void* workspaceEnd;
161*3117ece4Schristos 
162*3117ece4Schristos     void* objectEnd;
163*3117ece4Schristos     void* tableEnd;
164*3117ece4Schristos     void* tableValidEnd;
165*3117ece4Schristos     void* allocStart;
166*3117ece4Schristos     void* initOnceStart;
167*3117ece4Schristos 
168*3117ece4Schristos     BYTE allocFailed;
169*3117ece4Schristos     int workspaceOversizedDuration;
170*3117ece4Schristos     ZSTD_cwksp_alloc_phase_e phase;
171*3117ece4Schristos     ZSTD_cwksp_static_alloc_e isStatic;
172*3117ece4Schristos } ZSTD_cwksp;
173*3117ece4Schristos 
174*3117ece4Schristos /*-*************************************
175*3117ece4Schristos *  Functions
176*3117ece4Schristos ***************************************/
177*3117ece4Schristos 
178*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
179*3117ece4Schristos MEM_STATIC void*  ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
180*3117ece4Schristos 
181*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
182*3117ece4Schristos     (void)ws;
183*3117ece4Schristos     assert(ws->workspace <= ws->objectEnd);
184*3117ece4Schristos     assert(ws->objectEnd <= ws->tableEnd);
185*3117ece4Schristos     assert(ws->objectEnd <= ws->tableValidEnd);
186*3117ece4Schristos     assert(ws->tableEnd <= ws->allocStart);
187*3117ece4Schristos     assert(ws->tableValidEnd <= ws->allocStart);
188*3117ece4Schristos     assert(ws->allocStart <= ws->workspaceEnd);
189*3117ece4Schristos     assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
190*3117ece4Schristos     assert(ws->workspace <= ws->initOnceStart);
191*3117ece4Schristos #if ZSTD_MEMORY_SANITIZER
192*3117ece4Schristos     {
193*3117ece4Schristos         intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
194*3117ece4Schristos             (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
195*3117ece4Schristos         (void)offset;
196*3117ece4Schristos #if defined(ZSTD_MSAN_PRINT)
197*3117ece4Schristos         if(offset!=-1) {
198*3117ece4Schristos             __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
199*3117ece4Schristos         }
200*3117ece4Schristos #endif
201*3117ece4Schristos         assert(offset==-1);
202*3117ece4Schristos     };
203*3117ece4Schristos #endif
204*3117ece4Schristos }
205*3117ece4Schristos 
206*3117ece4Schristos /**
207*3117ece4Schristos  * Align must be a power of 2.
208*3117ece4Schristos  */
209*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
210*3117ece4Schristos     size_t const mask = align - 1;
211*3117ece4Schristos     assert((align & mask) == 0);
212*3117ece4Schristos     return (size + mask) & ~mask;
213*3117ece4Schristos }
214*3117ece4Schristos 
215*3117ece4Schristos /**
216*3117ece4Schristos  * Use this to determine how much space in the workspace we will consume to
217*3117ece4Schristos  * allocate this object. (Normally it should be exactly the size of the object,
218*3117ece4Schristos  * but under special conditions, like ASAN, where we pad each object, it might
219*3117ece4Schristos  * be larger.)
220*3117ece4Schristos  *
221*3117ece4Schristos  * Since tables aren't currently redzoned, you don't need to call through this
222*3117ece4Schristos  * to figure out how much space you need for the matchState tables. Everything
223*3117ece4Schristos  * else is though.
224*3117ece4Schristos  *
225*3117ece4Schristos  * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
226*3117ece4Schristos  */
227*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
228*3117ece4Schristos     if (size == 0)
229*3117ece4Schristos         return 0;
230*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
231*3117ece4Schristos     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
232*3117ece4Schristos #else
233*3117ece4Schristos     return size;
234*3117ece4Schristos #endif
235*3117ece4Schristos }
236*3117ece4Schristos 
237*3117ece4Schristos /**
238*3117ece4Schristos  * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
239*3117ece4Schristos  * Used to determine the number of bytes required for a given "aligned".
240*3117ece4Schristos  */
241*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
242*3117ece4Schristos     return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
243*3117ece4Schristos }
244*3117ece4Schristos 
245*3117ece4Schristos /**
246*3117ece4Schristos  * Returns the amount of additional space the cwksp must allocate
247*3117ece4Schristos  * for internal purposes (currently only alignment).
248*3117ece4Schristos  */
249*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
250*3117ece4Schristos     /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
251*3117ece4Schristos      * bytes to align the beginning of tables section and end of buffers;
252*3117ece4Schristos      */
253*3117ece4Schristos     size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
254*3117ece4Schristos     return slackSpace;
255*3117ece4Schristos }
256*3117ece4Schristos 
257*3117ece4Schristos 
258*3117ece4Schristos /**
259*3117ece4Schristos  * Return the number of additional bytes required to align a pointer to the given number of bytes.
260*3117ece4Schristos  * alignBytes must be a power of two.
261*3117ece4Schristos  */
262*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
263*3117ece4Schristos     size_t const alignBytesMask = alignBytes - 1;
264*3117ece4Schristos     size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
265*3117ece4Schristos     assert((alignBytes & alignBytesMask) == 0);
266*3117ece4Schristos     assert(bytes < alignBytes);
267*3117ece4Schristos     return bytes;
268*3117ece4Schristos }
269*3117ece4Schristos 
270*3117ece4Schristos /**
271*3117ece4Schristos  * Returns the initial value for allocStart which is used to determine the position from
272*3117ece4Schristos  * which we can allocate from the end of the workspace.
273*3117ece4Schristos  */
274*3117ece4Schristos MEM_STATIC void*  ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
275*3117ece4Schristos     return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
276*3117ece4Schristos }
277*3117ece4Schristos 
278*3117ece4Schristos /**
279*3117ece4Schristos  * Internal function. Do not use directly.
280*3117ece4Schristos  * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
281*3117ece4Schristos  * which counts from the end of the wksp (as opposed to the object/table segment).
282*3117ece4Schristos  *
283*3117ece4Schristos  * Returns a pointer to the beginning of that space.
284*3117ece4Schristos  */
285*3117ece4Schristos MEM_STATIC void*
286*3117ece4Schristos ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
287*3117ece4Schristos {
288*3117ece4Schristos     void* const alloc = (BYTE*)ws->allocStart - bytes;
289*3117ece4Schristos     void* const bottom = ws->tableEnd;
290*3117ece4Schristos     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
291*3117ece4Schristos         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
292*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
293*3117ece4Schristos     assert(alloc >= bottom);
294*3117ece4Schristos     if (alloc < bottom) {
295*3117ece4Schristos         DEBUGLOG(4, "cwksp: alloc failed!");
296*3117ece4Schristos         ws->allocFailed = 1;
297*3117ece4Schristos         return NULL;
298*3117ece4Schristos     }
299*3117ece4Schristos     /* the area is reserved from the end of wksp.
300*3117ece4Schristos      * If it overlaps with tableValidEnd, it voids guarantees on values' range */
301*3117ece4Schristos     if (alloc < ws->tableValidEnd) {
302*3117ece4Schristos         ws->tableValidEnd = alloc;
303*3117ece4Schristos     }
304*3117ece4Schristos     ws->allocStart = alloc;
305*3117ece4Schristos     return alloc;
306*3117ece4Schristos }
307*3117ece4Schristos 
308*3117ece4Schristos /**
309*3117ece4Schristos  * Moves the cwksp to the next phase, and does any necessary allocations.
310*3117ece4Schristos  * cwksp initialization must necessarily go through each phase in order.
311*3117ece4Schristos  * Returns a 0 on success, or zstd error
312*3117ece4Schristos  */
313*3117ece4Schristos MEM_STATIC size_t
314*3117ece4Schristos ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
315*3117ece4Schristos {
316*3117ece4Schristos     assert(phase >= ws->phase);
317*3117ece4Schristos     if (phase > ws->phase) {
318*3117ece4Schristos         /* Going from allocating objects to allocating initOnce / tables */
319*3117ece4Schristos         if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
320*3117ece4Schristos             phase >= ZSTD_cwksp_alloc_aligned_init_once) {
321*3117ece4Schristos             ws->tableValidEnd = ws->objectEnd;
322*3117ece4Schristos             ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
323*3117ece4Schristos 
324*3117ece4Schristos             {   /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
325*3117ece4Schristos                 void *const alloc = ws->objectEnd;
326*3117ece4Schristos                 size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
327*3117ece4Schristos                 void *const objectEnd = (BYTE *) alloc + bytesToAlign;
328*3117ece4Schristos                 DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
329*3117ece4Schristos                 RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
330*3117ece4Schristos                                 "table phase - alignment initial allocation failed!");
331*3117ece4Schristos                 ws->objectEnd = objectEnd;
332*3117ece4Schristos                 ws->tableEnd = objectEnd;  /* table area starts being empty */
333*3117ece4Schristos                 if (ws->tableValidEnd < ws->tableEnd) {
334*3117ece4Schristos                     ws->tableValidEnd = ws->tableEnd;
335*3117ece4Schristos                 }
336*3117ece4Schristos             }
337*3117ece4Schristos         }
338*3117ece4Schristos         ws->phase = phase;
339*3117ece4Schristos         ZSTD_cwksp_assert_internal_consistency(ws);
340*3117ece4Schristos     }
341*3117ece4Schristos     return 0;
342*3117ece4Schristos }
343*3117ece4Schristos 
344*3117ece4Schristos /**
345*3117ece4Schristos  * Returns whether this object/buffer/etc was allocated in this workspace.
346*3117ece4Schristos  */
347*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
348*3117ece4Schristos {
349*3117ece4Schristos     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
350*3117ece4Schristos }
351*3117ece4Schristos 
352*3117ece4Schristos /**
353*3117ece4Schristos  * Internal function. Do not use directly.
354*3117ece4Schristos  */
355*3117ece4Schristos MEM_STATIC void*
356*3117ece4Schristos ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
357*3117ece4Schristos {
358*3117ece4Schristos     void* alloc;
359*3117ece4Schristos     if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
360*3117ece4Schristos         return NULL;
361*3117ece4Schristos     }
362*3117ece4Schristos 
363*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
364*3117ece4Schristos     /* over-reserve space */
365*3117ece4Schristos     bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
366*3117ece4Schristos #endif
367*3117ece4Schristos 
368*3117ece4Schristos     alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
369*3117ece4Schristos 
370*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
371*3117ece4Schristos     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
372*3117ece4Schristos      * either size. */
373*3117ece4Schristos     if (alloc) {
374*3117ece4Schristos         alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
375*3117ece4Schristos         if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
376*3117ece4Schristos             /* We need to keep the redzone poisoned while unpoisoning the bytes that
377*3117ece4Schristos              * are actually allocated. */
378*3117ece4Schristos             __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
379*3117ece4Schristos         }
380*3117ece4Schristos     }
381*3117ece4Schristos #endif
382*3117ece4Schristos 
383*3117ece4Schristos     return alloc;
384*3117ece4Schristos }
385*3117ece4Schristos 
386*3117ece4Schristos /**
387*3117ece4Schristos  * Reserves and returns unaligned memory.
388*3117ece4Schristos  */
389*3117ece4Schristos MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
390*3117ece4Schristos {
391*3117ece4Schristos     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
392*3117ece4Schristos }
393*3117ece4Schristos 
394*3117ece4Schristos /**
395*3117ece4Schristos  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
396*3117ece4Schristos  * This memory has been initialized at least once in the past.
397*3117ece4Schristos  * This doesn't mean it has been initialized this time, and it might contain data from previous
398*3117ece4Schristos  * operations.
399*3117ece4Schristos  * The main usage is for algorithms that might need read access into uninitialized memory.
400*3117ece4Schristos  * The algorithm must maintain safety under these conditions and must make sure it doesn't
401*3117ece4Schristos  * leak any of the past data (directly or in side channels).
402*3117ece4Schristos  */
403*3117ece4Schristos MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
404*3117ece4Schristos {
405*3117ece4Schristos     size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
406*3117ece4Schristos     void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
407*3117ece4Schristos     assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
408*3117ece4Schristos     if(ptr && ptr < ws->initOnceStart) {
409*3117ece4Schristos         /* We assume the memory following the current allocation is either:
410*3117ece4Schristos          * 1. Not usable as initOnce memory (end of workspace)
411*3117ece4Schristos          * 2. Another initOnce buffer that has been allocated before (and so was previously memset)
412*3117ece4Schristos          * 3. An ASAN redzone, in which case we don't want to write on it
413*3117ece4Schristos          * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
414*3117ece4Schristos          * Note that we assume here that MSAN and ASAN cannot run in the same time. */
415*3117ece4Schristos         ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
416*3117ece4Schristos         ws->initOnceStart = ptr;
417*3117ece4Schristos     }
418*3117ece4Schristos #if ZSTD_MEMORY_SANITIZER
419*3117ece4Schristos     assert(__msan_test_shadow(ptr, bytes) == -1);
420*3117ece4Schristos #endif
421*3117ece4Schristos     return ptr;
422*3117ece4Schristos }
423*3117ece4Schristos 
424*3117ece4Schristos /**
425*3117ece4Schristos  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
426*3117ece4Schristos  */
427*3117ece4Schristos MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
428*3117ece4Schristos {
429*3117ece4Schristos     void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
430*3117ece4Schristos                                             ZSTD_cwksp_alloc_aligned);
431*3117ece4Schristos     assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
432*3117ece4Schristos     return ptr;
433*3117ece4Schristos }
434*3117ece4Schristos 
435*3117ece4Schristos /**
436*3117ece4Schristos  * Aligned on 64 bytes. These buffers have the special property that
437*3117ece4Schristos  * their values remain constrained, allowing us to reuse them without
438*3117ece4Schristos  * memset()-ing them.
439*3117ece4Schristos  */
440*3117ece4Schristos MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
441*3117ece4Schristos {
442*3117ece4Schristos     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
443*3117ece4Schristos     void* alloc;
444*3117ece4Schristos     void* end;
445*3117ece4Schristos     void* top;
446*3117ece4Schristos 
447*3117ece4Schristos     /* We can only start allocating tables after we are done reserving space for objects at the
448*3117ece4Schristos      * start of the workspace */
449*3117ece4Schristos     if(ws->phase < phase) {
450*3117ece4Schristos         if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
451*3117ece4Schristos             return NULL;
452*3117ece4Schristos         }
453*3117ece4Schristos     }
454*3117ece4Schristos     alloc = ws->tableEnd;
455*3117ece4Schristos     end = (BYTE *)alloc + bytes;
456*3117ece4Schristos     top = ws->allocStart;
457*3117ece4Schristos 
458*3117ece4Schristos     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
459*3117ece4Schristos         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
460*3117ece4Schristos     assert((bytes & (sizeof(U32)-1)) == 0);
461*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
462*3117ece4Schristos     assert(end <= top);
463*3117ece4Schristos     if (end > top) {
464*3117ece4Schristos         DEBUGLOG(4, "cwksp: table alloc failed!");
465*3117ece4Schristos         ws->allocFailed = 1;
466*3117ece4Schristos         return NULL;
467*3117ece4Schristos     }
468*3117ece4Schristos     ws->tableEnd = end;
469*3117ece4Schristos 
470*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
471*3117ece4Schristos     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
472*3117ece4Schristos         __asan_unpoison_memory_region(alloc, bytes);
473*3117ece4Schristos     }
474*3117ece4Schristos #endif
475*3117ece4Schristos 
476*3117ece4Schristos     assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
477*3117ece4Schristos     assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
478*3117ece4Schristos     return alloc;
479*3117ece4Schristos }
480*3117ece4Schristos 
481*3117ece4Schristos /**
482*3117ece4Schristos  * Aligned on sizeof(void*).
483*3117ece4Schristos  * Note : should happen only once, at workspace first initialization
484*3117ece4Schristos  */
485*3117ece4Schristos MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
486*3117ece4Schristos {
487*3117ece4Schristos     size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
488*3117ece4Schristos     void* alloc = ws->objectEnd;
489*3117ece4Schristos     void* end = (BYTE*)alloc + roundedBytes;
490*3117ece4Schristos 
491*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
492*3117ece4Schristos     /* over-reserve space */
493*3117ece4Schristos     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
494*3117ece4Schristos #endif
495*3117ece4Schristos 
496*3117ece4Schristos     DEBUGLOG(4,
497*3117ece4Schristos         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
498*3117ece4Schristos         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
499*3117ece4Schristos     assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
500*3117ece4Schristos     assert(bytes % ZSTD_ALIGNOF(void*) == 0);
501*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
502*3117ece4Schristos     /* we must be in the first phase, no advance is possible */
503*3117ece4Schristos     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
504*3117ece4Schristos         DEBUGLOG(3, "cwksp: object alloc failed!");
505*3117ece4Schristos         ws->allocFailed = 1;
506*3117ece4Schristos         return NULL;
507*3117ece4Schristos     }
508*3117ece4Schristos     ws->objectEnd = end;
509*3117ece4Schristos     ws->tableEnd = end;
510*3117ece4Schristos     ws->tableValidEnd = end;
511*3117ece4Schristos 
512*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
513*3117ece4Schristos     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
514*3117ece4Schristos      * either size. */
515*3117ece4Schristos     alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
516*3117ece4Schristos     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
517*3117ece4Schristos         __asan_unpoison_memory_region(alloc, bytes);
518*3117ece4Schristos     }
519*3117ece4Schristos #endif
520*3117ece4Schristos 
521*3117ece4Schristos     return alloc;
522*3117ece4Schristos }
523*3117ece4Schristos 
524*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
525*3117ece4Schristos {
526*3117ece4Schristos     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
527*3117ece4Schristos 
528*3117ece4Schristos #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
529*3117ece4Schristos     /* To validate that the table reuse logic is sound, and that we don't
530*3117ece4Schristos      * access table space that we haven't cleaned, we re-"poison" the table
531*3117ece4Schristos      * space every time we mark it dirty.
532*3117ece4Schristos      * Since tableValidEnd space and initOnce space may overlap we don't poison
533*3117ece4Schristos      * the initOnce portion as it break its promise. This means that this poisoning
534*3117ece4Schristos      * check isn't always applied fully. */
535*3117ece4Schristos     {
536*3117ece4Schristos         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
537*3117ece4Schristos         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
538*3117ece4Schristos         if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
539*3117ece4Schristos             __msan_poison(ws->objectEnd, size);
540*3117ece4Schristos         } else {
541*3117ece4Schristos             assert(ws->initOnceStart >= ws->objectEnd);
542*3117ece4Schristos             __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
543*3117ece4Schristos         }
544*3117ece4Schristos     }
545*3117ece4Schristos #endif
546*3117ece4Schristos 
547*3117ece4Schristos     assert(ws->tableValidEnd >= ws->objectEnd);
548*3117ece4Schristos     assert(ws->tableValidEnd <= ws->allocStart);
549*3117ece4Schristos     ws->tableValidEnd = ws->objectEnd;
550*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
551*3117ece4Schristos }
552*3117ece4Schristos 
553*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
554*3117ece4Schristos     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
555*3117ece4Schristos     assert(ws->tableValidEnd >= ws->objectEnd);
556*3117ece4Schristos     assert(ws->tableValidEnd <= ws->allocStart);
557*3117ece4Schristos     if (ws->tableValidEnd < ws->tableEnd) {
558*3117ece4Schristos         ws->tableValidEnd = ws->tableEnd;
559*3117ece4Schristos     }
560*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
561*3117ece4Schristos }
562*3117ece4Schristos 
563*3117ece4Schristos /**
564*3117ece4Schristos  * Zero the part of the allocated tables not already marked clean.
565*3117ece4Schristos  */
566*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
567*3117ece4Schristos     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
568*3117ece4Schristos     assert(ws->tableValidEnd >= ws->objectEnd);
569*3117ece4Schristos     assert(ws->tableValidEnd <= ws->allocStart);
570*3117ece4Schristos     if (ws->tableValidEnd < ws->tableEnd) {
571*3117ece4Schristos         ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
572*3117ece4Schristos     }
573*3117ece4Schristos     ZSTD_cwksp_mark_tables_clean(ws);
574*3117ece4Schristos }
575*3117ece4Schristos 
576*3117ece4Schristos /**
577*3117ece4Schristos  * Invalidates table allocations.
578*3117ece4Schristos  * All other allocations remain valid.
579*3117ece4Schristos  */
580*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
581*3117ece4Schristos     DEBUGLOG(4, "cwksp: clearing tables!");
582*3117ece4Schristos 
583*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
584*3117ece4Schristos     /* We don't do this when the workspace is statically allocated, because
585*3117ece4Schristos      * when that is the case, we have no capability to hook into the end of the
586*3117ece4Schristos      * workspace's lifecycle to unpoison the memory.
587*3117ece4Schristos      */
588*3117ece4Schristos     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
589*3117ece4Schristos         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
590*3117ece4Schristos         __asan_poison_memory_region(ws->objectEnd, size);
591*3117ece4Schristos     }
592*3117ece4Schristos #endif
593*3117ece4Schristos 
594*3117ece4Schristos     ws->tableEnd = ws->objectEnd;
595*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
596*3117ece4Schristos }
597*3117ece4Schristos 
598*3117ece4Schristos /**
599*3117ece4Schristos  * Invalidates all buffer, aligned, and table allocations.
600*3117ece4Schristos  * Object allocations remain valid.
601*3117ece4Schristos  */
602*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
603*3117ece4Schristos     DEBUGLOG(4, "cwksp: clearing!");
604*3117ece4Schristos 
605*3117ece4Schristos #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
606*3117ece4Schristos     /* To validate that the context reuse logic is sound, and that we don't
607*3117ece4Schristos      * access stuff that this compression hasn't initialized, we re-"poison"
608*3117ece4Schristos      * the workspace except for the areas in which we expect memory reuse
609*3117ece4Schristos      * without initialization (objects, valid tables area and init once
610*3117ece4Schristos      * memory). */
611*3117ece4Schristos     {
612*3117ece4Schristos         if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
613*3117ece4Schristos             size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
614*3117ece4Schristos             __msan_poison(ws->tableValidEnd, size);
615*3117ece4Schristos         }
616*3117ece4Schristos     }
617*3117ece4Schristos #endif
618*3117ece4Schristos 
619*3117ece4Schristos #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
620*3117ece4Schristos     /* We don't do this when the workspace is statically allocated, because
621*3117ece4Schristos      * when that is the case, we have no capability to hook into the end of the
622*3117ece4Schristos      * workspace's lifecycle to unpoison the memory.
623*3117ece4Schristos      */
624*3117ece4Schristos     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
625*3117ece4Schristos         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
626*3117ece4Schristos         __asan_poison_memory_region(ws->objectEnd, size);
627*3117ece4Schristos     }
628*3117ece4Schristos #endif
629*3117ece4Schristos 
630*3117ece4Schristos     ws->tableEnd = ws->objectEnd;
631*3117ece4Schristos     ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
632*3117ece4Schristos     ws->allocFailed = 0;
633*3117ece4Schristos     if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
634*3117ece4Schristos         ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
635*3117ece4Schristos     }
636*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
637*3117ece4Schristos }
638*3117ece4Schristos 
639*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
640*3117ece4Schristos     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
641*3117ece4Schristos }
642*3117ece4Schristos 
643*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
644*3117ece4Schristos     return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
645*3117ece4Schristos          + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
646*3117ece4Schristos }
647*3117ece4Schristos 
648*3117ece4Schristos /**
649*3117ece4Schristos  * The provided workspace takes ownership of the buffer [start, start+size).
650*3117ece4Schristos  * Any existing values in the workspace are ignored (the previously managed
651*3117ece4Schristos  * buffer, if present, must be separately freed).
652*3117ece4Schristos  */
653*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
654*3117ece4Schristos     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
655*3117ece4Schristos     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
656*3117ece4Schristos     ws->workspace = start;
657*3117ece4Schristos     ws->workspaceEnd = (BYTE*)start + size;
658*3117ece4Schristos     ws->objectEnd = ws->workspace;
659*3117ece4Schristos     ws->tableValidEnd = ws->objectEnd;
660*3117ece4Schristos     ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
661*3117ece4Schristos     ws->phase = ZSTD_cwksp_alloc_objects;
662*3117ece4Schristos     ws->isStatic = isStatic;
663*3117ece4Schristos     ZSTD_cwksp_clear(ws);
664*3117ece4Schristos     ws->workspaceOversizedDuration = 0;
665*3117ece4Schristos     ZSTD_cwksp_assert_internal_consistency(ws);
666*3117ece4Schristos }
667*3117ece4Schristos 
668*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
669*3117ece4Schristos     void* workspace = ZSTD_customMalloc(size, customMem);
670*3117ece4Schristos     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
671*3117ece4Schristos     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
672*3117ece4Schristos     ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
673*3117ece4Schristos     return 0;
674*3117ece4Schristos }
675*3117ece4Schristos 
676*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
677*3117ece4Schristos     void *ptr = ws->workspace;
678*3117ece4Schristos     DEBUGLOG(4, "cwksp: freeing workspace");
679*3117ece4Schristos #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
680*3117ece4Schristos     if (ptr != NULL && customMem.customFree != NULL) {
681*3117ece4Schristos         __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws));
682*3117ece4Schristos     }
683*3117ece4Schristos #endif
684*3117ece4Schristos     ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
685*3117ece4Schristos     ZSTD_customFree(ptr, customMem);
686*3117ece4Schristos }
687*3117ece4Schristos 
688*3117ece4Schristos /**
689*3117ece4Schristos  * Moves the management of a workspace from one cwksp to another. The src cwksp
690*3117ece4Schristos  * is left in an invalid state (src must be re-init()'ed before it's used again).
691*3117ece4Schristos  */
692*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
693*3117ece4Schristos     *dst = *src;
694*3117ece4Schristos     ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
695*3117ece4Schristos }
696*3117ece4Schristos 
697*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
698*3117ece4Schristos     return ws->allocFailed;
699*3117ece4Schristos }
700*3117ece4Schristos 
701*3117ece4Schristos /*-*************************************
702*3117ece4Schristos *  Functions Checking Free Space
703*3117ece4Schristos ***************************************/
704*3117ece4Schristos 
705*3117ece4Schristos /* ZSTD_alignmentSpaceWithinBounds() :
706*3117ece4Schristos  * Returns if the estimated space needed for a wksp is within an acceptable limit of the
707*3117ece4Schristos  * actual amount of space used.
708*3117ece4Schristos  */
709*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
710*3117ece4Schristos     /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
711*3117ece4Schristos      * the alignment bytes difference between estimation and actual usage */
712*3117ece4Schristos     return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
713*3117ece4Schristos            ZSTD_cwksp_used(ws) <= estimatedSpace;
714*3117ece4Schristos }
715*3117ece4Schristos 
716*3117ece4Schristos 
717*3117ece4Schristos MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
718*3117ece4Schristos     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
719*3117ece4Schristos }
720*3117ece4Schristos 
721*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
722*3117ece4Schristos     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
723*3117ece4Schristos }
724*3117ece4Schristos 
725*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
726*3117ece4Schristos     return ZSTD_cwksp_check_available(
727*3117ece4Schristos         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
728*3117ece4Schristos }
729*3117ece4Schristos 
730*3117ece4Schristos MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
731*3117ece4Schristos     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
732*3117ece4Schristos         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
733*3117ece4Schristos }
734*3117ece4Schristos 
735*3117ece4Schristos MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
736*3117ece4Schristos         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
737*3117ece4Schristos     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
738*3117ece4Schristos         ws->workspaceOversizedDuration++;
739*3117ece4Schristos     } else {
740*3117ece4Schristos         ws->workspaceOversizedDuration = 0;
741*3117ece4Schristos     }
742*3117ece4Schristos }
743*3117ece4Schristos 
744*3117ece4Schristos #if defined (__cplusplus)
745*3117ece4Schristos }
746*3117ece4Schristos #endif
747*3117ece4Schristos 
748*3117ece4Schristos #endif /* ZSTD_CWKSP_H */
749