Lines Matching full:table
36 // table size is a power of two) to visit every group of elements exactly once.
64 // about special sizes when check the fullness of the table.
94 // How many entries are there in the table.
97 // How many entries can we store in the table before resizing.
184 // Nor does the routine check if the table is full.
186 // into a new table. Hence, the requirements are naturally satisfied.
214 // - the new table has enough capacity to hold all the entries
215 // - there is no duplicate key in the old table
222 LIBC_INLINE static ENTRY *insert(HashTable *&table, ENTRY item,
224 auto index = table->find(item.key, primary);
225 auto slot = &table->entry(index);
232 // if table of full, we try to grow the table
233 if (table->is_full()) {
234 HashTable *new_table = table->grow();
238 // resized sccuessfully: clean up the old table and use the new one
239 deallocate(table);
240 table = new_table;
242 return table->unsafe_insert(item);
245 table->set_ctrl(index, secondary_hash(primary));
248 table->available_slots--;
253 LIBC_INLINE static void deallocate(HashTable *table) {
254 if (table) {
256 reinterpret_cast<uint8_t *>(table) - table->offset_from_entries();
279 HashTable *table = reinterpret_cast<HashTable *>(
282 table->entries_mask = entries - 1u;
283 table->available_slots = entries / 8 * 7;
284 table->state = HashState{randomness};
285 memset(&table->control(0), 0x80, ctrl_sizes);
286 memset(mem, 0, table->offset_from_entries());
288 return table;
295 const HashTable &table;
298 // - this comparison only happens with the same table
315 return table.entry(
317 table.entries_mask);
326 Group::load_aligned(&table.control(current_offset)).occupied();
347 LIBC_INLINE static ENTRY *insert(HashTable *&table, ENTRY item) {
348 uint64_t primary = table->oneshot_hash(item.key);
349 return insert(table, item, primary);