1 /* ****************************************************************** 2 * FSE : Finite State Entropy decoder 3 * Copyright (c) Meta Platforms, Inc. and affiliates. 4 * 5 * You can contact the author at : 6 * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy 7 * - Public forum : https://groups.google.com/forum/#!forum/lz4c 8 * 9 * This source code is licensed under both the BSD-style license (found in the 10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 11 * in the COPYING file in the root directory of this source tree). 12 * You may select, at your option, one of the above-listed licenses. 13 ****************************************************************** */ 14 15 16 /* ************************************************************** 17 * Includes 18 ****************************************************************/ 19 #include "debug.h" /* assert */ 20 #include "bitstream.h" 21 #include "compiler.h" 22 #define FSE_STATIC_LINKING_ONLY 23 #include "fse.h" 24 #include "error_private.h" 25 #include "zstd_deps.h" /* ZSTD_memcpy */ 26 #include "bits.h" /* ZSTD_highbit32 */ 27 28 29 /* ************************************************************** 30 * Error Management 31 ****************************************************************/ 32 #define FSE_isError ERR_isError 33 #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ 34 35 36 /* ************************************************************** 37 * Templates 38 ****************************************************************/ 39 /* 40 designed to be included 41 for type-specific functions (template emulation in C) 42 Objective is to write these functions only once, for improved maintenance 43 */ 44 45 /* safety checks */ 46 #ifndef FSE_FUNCTION_EXTENSION 47 # error "FSE_FUNCTION_EXTENSION must be defined" 48 #endif 49 #ifndef FSE_FUNCTION_TYPE 50 # error "FSE_FUNCTION_TYPE must be defined" 51 #endif 52 53 /* Function names */ 54 #define FSE_CAT(X,Y) X##Y 55 #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) 56 #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) 57 58 static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) 59 { 60 void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ 61 FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); 62 U16* symbolNext = (U16*)workSpace; 63 BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1); 64 65 U32 const maxSV1 = maxSymbolValue + 1; 66 U32 const tableSize = 1 << tableLog; 67 U32 highThreshold = tableSize-1; 68 69 /* Sanity Checks */ 70 if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge); 71 if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); 72 if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); 73 74 /* Init, lay down lowprob symbols */ 75 { FSE_DTableHeader DTableH; 76 DTableH.tableLog = (U16)tableLog; 77 DTableH.fastMode = 1; 78 { S16 const largeLimit= (S16)(1 << (tableLog-1)); 79 U32 s; 80 for (s=0; s<maxSV1; s++) { 81 if (normalizedCounter[s]==-1) { 82 tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; 83 symbolNext[s] = 1; 84 } else { 85 if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; 86 symbolNext[s] = (U16)normalizedCounter[s]; 87 } } } 88 ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); 89 } 90 91 /* Spread symbols */ 92 if (highThreshold == tableSize - 1) { 93 size_t const tableMask = tableSize-1; 94 size_t const step = FSE_TABLESTEP(tableSize); 95 /* First lay down the symbols in order. 96 * We use a uint64_t to lay down 8 bytes at a time. This reduces branch 97 * misses since small blocks generally have small table logs, so nearly 98 * all symbols have counts <= 8. We ensure we have 8 bytes at the end of 99 * our buffer to handle the over-write. 100 */ 101 { U64 const add = 0x0101010101010101ull; 102 size_t pos = 0; 103 U64 sv = 0; 104 U32 s; 105 for (s=0; s<maxSV1; ++s, sv += add) { 106 int i; 107 int const n = normalizedCounter[s]; 108 MEM_write64(spread + pos, sv); 109 for (i = 8; i < n; i += 8) { 110 MEM_write64(spread + pos + i, sv); 111 } 112 pos += (size_t)n; 113 } } 114 /* Now we spread those positions across the table. 115 * The benefit of doing it in two stages is that we avoid the 116 * variable size inner loop, which caused lots of branch misses. 117 * Now we can run through all the positions without any branch misses. 118 * We unroll the loop twice, since that is what empirically worked best. 119 */ 120 { 121 size_t position = 0; 122 size_t s; 123 size_t const unroll = 2; 124 assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ 125 for (s = 0; s < (size_t)tableSize; s += unroll) { 126 size_t u; 127 for (u = 0; u < unroll; ++u) { 128 size_t const uPosition = (position + (u * step)) & tableMask; 129 tableDecode[uPosition].symbol = spread[s + u]; 130 } 131 position = (position + (unroll * step)) & tableMask; 132 } 133 assert(position == 0); 134 } 135 } else { 136 U32 const tableMask = tableSize-1; 137 U32 const step = FSE_TABLESTEP(tableSize); 138 U32 s, position = 0; 139 for (s=0; s<maxSV1; s++) { 140 int i; 141 for (i=0; i<normalizedCounter[s]; i++) { 142 tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; 143 position = (position + step) & tableMask; 144 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */ 145 } } 146 if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ 147 } 148 149 /* Build Decoding table */ 150 { U32 u; 151 for (u=0; u<tableSize; u++) { 152 FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); 153 U32 const nextState = symbolNext[symbol]++; 154 tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) ); 155 tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); 156 } } 157 158 return 0; 159 } 160 161 size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) 162 { 163 return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize); 164 } 165 166 167 #ifndef FSE_COMMONDEFS_ONLY 168 169 /*-******************************************************* 170 * Decompression (Byte symbols) 171 *********************************************************/ 172 173 FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( 174 void* dst, size_t maxDstSize, 175 const void* cSrc, size_t cSrcSize, 176 const FSE_DTable* dt, const unsigned fast) 177 { 178 BYTE* const ostart = (BYTE*) dst; 179 BYTE* op = ostart; 180 BYTE* const omax = op + maxDstSize; 181 BYTE* const olimit = omax-3; 182 183 BIT_DStream_t bitD; 184 FSE_DState_t state1; 185 FSE_DState_t state2; 186 187 /* Init */ 188 CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); 189 190 FSE_initDState(&state1, &bitD, dt); 191 FSE_initDState(&state2, &bitD, dt); 192 193 #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) 194 195 /* 4 symbols per loop */ 196 for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) { 197 op[0] = FSE_GETSYMBOL(&state1); 198 199 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 200 BIT_reloadDStream(&bitD); 201 202 op[1] = FSE_GETSYMBOL(&state2); 203 204 if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 205 { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } 206 207 op[2] = FSE_GETSYMBOL(&state1); 208 209 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 210 BIT_reloadDStream(&bitD); 211 212 op[3] = FSE_GETSYMBOL(&state2); 213 } 214 215 /* tail */ 216 /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ 217 while (1) { 218 if (op>(omax-2)) return ERROR(dstSize_tooSmall); 219 *op++ = FSE_GETSYMBOL(&state1); 220 if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { 221 *op++ = FSE_GETSYMBOL(&state2); 222 break; 223 } 224 225 if (op>(omax-2)) return ERROR(dstSize_tooSmall); 226 *op++ = FSE_GETSYMBOL(&state2); 227 if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { 228 *op++ = FSE_GETSYMBOL(&state1); 229 break; 230 } } 231 232 assert(op >= ostart); 233 return (size_t)(op-ostart); 234 } 235 236 typedef struct { 237 short ncount[FSE_MAX_SYMBOL_VALUE + 1]; 238 } FSE_DecompressWksp; 239 240 241 FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( 242 void* dst, size_t dstCapacity, 243 const void* cSrc, size_t cSrcSize, 244 unsigned maxLog, void* workSpace, size_t wkspSize, 245 int bmi2) 246 { 247 const BYTE* const istart = (const BYTE*)cSrc; 248 const BYTE* ip = istart; 249 unsigned tableLog; 250 unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; 251 FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace; 252 size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable); 253 FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos; 254 255 FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0); 256 if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC); 257 258 /* correct offset to dtable depends on this property */ 259 FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0); 260 261 /* normal FSE decoding mode */ 262 { size_t const NCountLength = 263 FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); 264 if (FSE_isError(NCountLength)) return NCountLength; 265 if (tableLog > maxLog) return ERROR(tableLog_tooLarge); 266 assert(NCountLength <= cSrcSize); 267 ip += NCountLength; 268 cSrcSize -= NCountLength; 269 } 270 271 if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); 272 assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize); 273 workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); 274 wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); 275 276 CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); 277 278 { 279 const void* ptr = dtable; 280 const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; 281 const U32 fastMode = DTableH->fastMode; 282 283 /* select fast mode (static) */ 284 if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); 285 return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); 286 } 287 } 288 289 /* Avoids the FORCE_INLINE of the _body() function. */ 290 static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) 291 { 292 return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0); 293 } 294 295 #if DYNAMIC_BMI2 296 BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) 297 { 298 return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1); 299 } 300 #endif 301 302 size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2) 303 { 304 #if DYNAMIC_BMI2 305 if (bmi2) { 306 return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); 307 } 308 #endif 309 (void)bmi2; 310 return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); 311 } 312 313 #endif /* FSE_COMMONDEFS_ONLY */ 314