1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <stdalign.h>
6 #include <stdio.h>
7 #include <string.h>
8
9 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12
13 #include "rte_table_hash.h"
14 #include "rte_lru.h"
15
16 #include "table_log.h"
17
18 #define KEY_SIZE 32
19
20 #define KEYS_PER_BUCKET 4
21
22 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
23
24 #ifdef RTE_TABLE_STATS_COLLECT
25
26 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \
27 table->stats.n_pkts_in += val
28 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \
29 table->stats.n_pkts_lookup_miss += val
30
31 #else
32
33 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val)
34 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val)
35
36 #endif
37
38 #ifdef RTE_ARCH_64
39 struct rte_bucket_4_32 {
40 /* Cache line 0 */
41 uint64_t signature[4 + 1];
42 uint64_t lru_list;
43 struct rte_bucket_4_32 *next;
44 uint64_t next_valid;
45
46 /* Cache lines 1 and 2 */
47 uint64_t key[4][4];
48
49 /* Cache line 3 */
50 uint8_t data[];
51 };
52 #else
53 struct rte_bucket_4_32 {
54 /* Cache line 0 */
55 uint64_t signature[4 + 1];
56 uint64_t lru_list;
57 struct rte_bucket_4_32 *next;
58 uint32_t pad;
59 uint64_t next_valid;
60
61 /* Cache lines 1 and 2 */
62 uint64_t key[4][4];
63
64 /* Cache line 3 */
65 uint8_t data[];
66 };
67 #endif
68
69 struct rte_table_hash {
70 struct rte_table_stats stats;
71
72 /* Input parameters */
73 uint32_t n_buckets;
74 uint32_t key_size;
75 uint32_t entry_size;
76 uint32_t bucket_size;
77 uint32_t key_offset;
78 uint64_t key_mask[4];
79 rte_table_hash_op_hash f_hash;
80 uint64_t seed;
81
82 /* Extendible buckets */
83 uint32_t n_buckets_ext;
84 uint32_t stack_pos;
85 uint32_t *stack;
86
87 /* Lookup table */
88 alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[];
89 };
90
91 static int
keycmp(void * a,void * b,void * b_mask)92 keycmp(void *a, void *b, void *b_mask)
93 {
94 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
95
96 return (a64[0] != (b64[0] & b_mask64[0])) ||
97 (a64[1] != (b64[1] & b_mask64[1])) ||
98 (a64[2] != (b64[2] & b_mask64[2])) ||
99 (a64[3] != (b64[3] & b_mask64[3]));
100 }
101
102 static void
keycpy(void * dst,void * src,void * src_mask)103 keycpy(void *dst, void *src, void *src_mask)
104 {
105 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
106
107 dst64[0] = src64[0] & src_mask64[0];
108 dst64[1] = src64[1] & src_mask64[1];
109 dst64[2] = src64[2] & src_mask64[2];
110 dst64[3] = src64[3] & src_mask64[3];
111 }
112
113 static int
check_params_create(struct rte_table_hash_params * params)114 check_params_create(struct rte_table_hash_params *params)
115 {
116 /* name */
117 if (params->name == NULL) {
118 TABLE_LOG(ERR, "%s: name invalid value", __func__);
119 return -EINVAL;
120 }
121
122 /* key_size */
123 if (params->key_size != KEY_SIZE) {
124 TABLE_LOG(ERR, "%s: key_size invalid value", __func__);
125 return -EINVAL;
126 }
127
128 /* n_keys */
129 if (params->n_keys == 0) {
130 TABLE_LOG(ERR, "%s: n_keys is zero", __func__);
131 return -EINVAL;
132 }
133
134 /* n_buckets */
135 if ((params->n_buckets == 0) ||
136 (!rte_is_power_of_2(params->n_buckets))) {
137 TABLE_LOG(ERR, "%s: n_buckets invalid value", __func__);
138 return -EINVAL;
139 }
140
141 /* f_hash */
142 if (params->f_hash == NULL) {
143 TABLE_LOG(ERR, "%s: f_hash function pointer is NULL",
144 __func__);
145 return -EINVAL;
146 }
147
148 return 0;
149 }
150
151 static void *
rte_table_hash_create_key32_lru(void * params,int socket_id,uint32_t entry_size)152 rte_table_hash_create_key32_lru(void *params,
153 int socket_id,
154 uint32_t entry_size)
155 {
156 struct rte_table_hash_params *p = params;
157 struct rte_table_hash *f;
158 uint64_t bucket_size, total_size;
159 uint32_t n_buckets, i;
160
161 /* Check input parameters */
162 if ((check_params_create(p) != 0) ||
163 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
164 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
165 return NULL;
166
167 /*
168 * Table dimensioning
169 *
170 * Objective: Pick the number of buckets (n_buckets) so that there a chance
171 * to store n_keys keys in the table.
172 *
173 * Note: Since the buckets do not get extended, it is not possible to
174 * guarantee that n_keys keys can be stored in the table at any time. In the
175 * worst case scenario when all the n_keys fall into the same bucket, only
176 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
177 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
178 * n_keys to n_buckets ratio.
179 *
180 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
181 */
182 n_buckets = rte_align32pow2(
183 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
184 n_buckets = RTE_MAX(n_buckets, p->n_buckets);
185
186 /* Memory allocation */
187 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
188 KEYS_PER_BUCKET * entry_size);
189 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
190 if (total_size > SIZE_MAX) {
191 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes "
192 "for hash table %s",
193 __func__, total_size, p->name);
194 return NULL;
195 }
196
197 f = rte_zmalloc_socket(p->name,
198 (size_t)total_size,
199 RTE_CACHE_LINE_SIZE,
200 socket_id);
201 if (f == NULL) {
202 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes "
203 "for hash table %s",
204 __func__, total_size, p->name);
205 return NULL;
206 }
207 TABLE_LOG(INFO,
208 "%s: Hash table %s memory footprint "
209 "is %" PRIu64 " bytes",
210 __func__, p->name, total_size);
211
212 /* Memory initialization */
213 f->n_buckets = n_buckets;
214 f->key_size = KEY_SIZE;
215 f->entry_size = entry_size;
216 f->bucket_size = bucket_size;
217 f->key_offset = p->key_offset;
218 f->f_hash = p->f_hash;
219 f->seed = p->seed;
220
221 if (p->key_mask != NULL) {
222 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
223 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
224 f->key_mask[2] = ((uint64_t *)p->key_mask)[2];
225 f->key_mask[3] = ((uint64_t *)p->key_mask)[3];
226 } else {
227 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
228 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
229 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
230 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
231 }
232
233 for (i = 0; i < n_buckets; i++) {
234 struct rte_bucket_4_32 *bucket;
235
236 bucket = (struct rte_bucket_4_32 *) &f->memory[i *
237 f->bucket_size];
238 bucket->lru_list = 0x0000000100020003LLU;
239 }
240
241 return f;
242 }
243
244 static int
rte_table_hash_free_key32_lru(void * table)245 rte_table_hash_free_key32_lru(void *table)
246 {
247 struct rte_table_hash *f = table;
248
249 /* Check input parameters */
250 if (f == NULL) {
251 TABLE_LOG(ERR, "%s: table parameter is NULL", __func__);
252 return -EINVAL;
253 }
254
255 rte_free(f);
256 return 0;
257 }
258
259 static int
rte_table_hash_entry_add_key32_lru(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)260 rte_table_hash_entry_add_key32_lru(
261 void *table,
262 void *key,
263 void *entry,
264 int *key_found,
265 void **entry_ptr)
266 {
267 struct rte_table_hash *f = table;
268 struct rte_bucket_4_32 *bucket;
269 uint64_t signature, pos;
270 uint32_t bucket_index, i;
271
272 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
273 bucket_index = signature & (f->n_buckets - 1);
274 bucket = (struct rte_bucket_4_32 *)
275 &f->memory[bucket_index * f->bucket_size];
276 signature |= RTE_BUCKET_ENTRY_VALID;
277
278 /* Key is present in the bucket */
279 for (i = 0; i < 4; i++) {
280 uint64_t bucket_signature = bucket->signature[i];
281 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
282
283 if ((bucket_signature == signature) &&
284 (keycmp(bucket_key, key, f->key_mask) == 0)) {
285 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
286
287 memcpy(bucket_data, entry, f->entry_size);
288 lru_update(bucket, i);
289 *key_found = 1;
290 *entry_ptr = (void *) bucket_data;
291 return 0;
292 }
293 }
294
295 /* Key is not present in the bucket */
296 for (i = 0; i < 4; i++) {
297 uint64_t bucket_signature = bucket->signature[i];
298 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
299
300 if (bucket_signature == 0) {
301 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
302
303 bucket->signature[i] = signature;
304 keycpy(bucket_key, key, f->key_mask);
305 memcpy(bucket_data, entry, f->entry_size);
306 lru_update(bucket, i);
307 *key_found = 0;
308 *entry_ptr = (void *) bucket_data;
309
310 return 0;
311 }
312 }
313
314 /* Bucket full: replace LRU entry */
315 pos = lru_pos(bucket);
316 bucket->signature[pos] = signature;
317 keycpy(&bucket->key[pos], key, f->key_mask);
318 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
319 lru_update(bucket, pos);
320 *key_found = 0;
321 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
322
323 return 0;
324 }
325
326 static int
rte_table_hash_entry_delete_key32_lru(void * table,void * key,int * key_found,void * entry)327 rte_table_hash_entry_delete_key32_lru(
328 void *table,
329 void *key,
330 int *key_found,
331 void *entry)
332 {
333 struct rte_table_hash *f = table;
334 struct rte_bucket_4_32 *bucket;
335 uint64_t signature;
336 uint32_t bucket_index, i;
337
338 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
339 bucket_index = signature & (f->n_buckets - 1);
340 bucket = (struct rte_bucket_4_32 *)
341 &f->memory[bucket_index * f->bucket_size];
342 signature |= RTE_BUCKET_ENTRY_VALID;
343
344 /* Key is present in the bucket */
345 for (i = 0; i < 4; i++) {
346 uint64_t bucket_signature = bucket->signature[i];
347 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
348
349 if ((bucket_signature == signature) &&
350 (keycmp(bucket_key, key, f->key_mask) == 0)) {
351 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
352
353 bucket->signature[i] = 0;
354 *key_found = 1;
355 if (entry)
356 memcpy(entry, bucket_data, f->entry_size);
357
358 return 0;
359 }
360 }
361
362 /* Key is not present in the bucket */
363 *key_found = 0;
364 return 0;
365 }
366
367 static void *
rte_table_hash_create_key32_ext(void * params,int socket_id,uint32_t entry_size)368 rte_table_hash_create_key32_ext(void *params,
369 int socket_id,
370 uint32_t entry_size)
371 {
372 struct rte_table_hash_params *p = params;
373 struct rte_table_hash *f;
374 uint64_t bucket_size, stack_size, total_size;
375 uint32_t n_buckets_ext, i;
376
377 /* Check input parameters */
378 if ((check_params_create(p) != 0) ||
379 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
380 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
381 return NULL;
382
383 /*
384 * Table dimensioning
385 *
386 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
387 * it is guaranteed that n_keys keys can be stored in the table at any time.
388 *
389 * The worst case scenario takes place when all the n_keys keys fall into
390 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
391 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
392 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
393 * into a different bucket. This case defeats the purpose of the hash table.
394 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
395 *
396 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
397 */
398 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
399
400 /* Memory allocation */
401 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
402 KEYS_PER_BUCKET * entry_size);
403 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
404 total_size = sizeof(struct rte_table_hash) +
405 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
406 if (total_size > SIZE_MAX) {
407 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes "
408 "for hash table %s",
409 __func__, total_size, p->name);
410 return NULL;
411 }
412
413 f = rte_zmalloc_socket(p->name,
414 (size_t)total_size,
415 RTE_CACHE_LINE_SIZE,
416 socket_id);
417 if (f == NULL) {
418 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes "
419 "for hash table %s",
420 __func__, total_size, p->name);
421 return NULL;
422 }
423 TABLE_LOG(INFO,
424 "%s: Hash table %s memory footprint "
425 "is %" PRIu64" bytes",
426 __func__, p->name, total_size);
427
428 /* Memory initialization */
429 f->n_buckets = p->n_buckets;
430 f->key_size = KEY_SIZE;
431 f->entry_size = entry_size;
432 f->bucket_size = bucket_size;
433 f->key_offset = p->key_offset;
434 f->f_hash = p->f_hash;
435 f->seed = p->seed;
436
437 f->n_buckets_ext = n_buckets_ext;
438 f->stack_pos = n_buckets_ext;
439 f->stack = (uint32_t *)
440 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
441
442 if (p->key_mask != NULL) {
443 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
444 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
445 f->key_mask[2] = (((uint64_t *)p->key_mask)[2]);
446 f->key_mask[3] = (((uint64_t *)p->key_mask)[3]);
447 } else {
448 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
449 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
450 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
451 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
452 }
453
454 for (i = 0; i < n_buckets_ext; i++)
455 f->stack[i] = i;
456
457 return f;
458 }
459
460 static int
rte_table_hash_free_key32_ext(void * table)461 rte_table_hash_free_key32_ext(void *table)
462 {
463 struct rte_table_hash *f = table;
464
465 /* Check input parameters */
466 if (f == NULL) {
467 TABLE_LOG(ERR, "%s: table parameter is NULL", __func__);
468 return -EINVAL;
469 }
470
471 rte_free(f);
472 return 0;
473 }
474
475 static int
rte_table_hash_entry_add_key32_ext(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)476 rte_table_hash_entry_add_key32_ext(
477 void *table,
478 void *key,
479 void *entry,
480 int *key_found,
481 void **entry_ptr)
482 {
483 struct rte_table_hash *f = table;
484 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
485 uint64_t signature;
486 uint32_t bucket_index, i;
487
488 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
489 bucket_index = signature & (f->n_buckets - 1);
490 bucket0 = (struct rte_bucket_4_32 *)
491 &f->memory[bucket_index * f->bucket_size];
492 signature |= RTE_BUCKET_ENTRY_VALID;
493
494 /* Key is present in the bucket */
495 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
496 for (i = 0; i < 4; i++) {
497 uint64_t bucket_signature = bucket->signature[i];
498 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
499
500 if ((bucket_signature == signature) &&
501 (keycmp(bucket_key, key, f->key_mask) == 0)) {
502 uint8_t *bucket_data = &bucket->data[i *
503 f->entry_size];
504
505 memcpy(bucket_data, entry, f->entry_size);
506 *key_found = 1;
507 *entry_ptr = (void *) bucket_data;
508
509 return 0;
510 }
511 }
512 }
513
514 /* Key is not present in the bucket */
515 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
516 bucket_prev = bucket, bucket = bucket->next)
517 for (i = 0; i < 4; i++) {
518 uint64_t bucket_signature = bucket->signature[i];
519 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
520
521 if (bucket_signature == 0) {
522 uint8_t *bucket_data = &bucket->data[i *
523 f->entry_size];
524
525 bucket->signature[i] = signature;
526 keycpy(bucket_key, key, f->key_mask);
527 memcpy(bucket_data, entry, f->entry_size);
528 *key_found = 0;
529 *entry_ptr = (void *) bucket_data;
530
531 return 0;
532 }
533 }
534
535 /* Bucket full: extend bucket */
536 if (f->stack_pos > 0) {
537 bucket_index = f->stack[--f->stack_pos];
538
539 bucket = (struct rte_bucket_4_32 *)
540 &f->memory[(f->n_buckets + bucket_index) *
541 f->bucket_size];
542 bucket_prev->next = bucket;
543 bucket_prev->next_valid = 1;
544
545 bucket->signature[0] = signature;
546 keycpy(&bucket->key[0], key, f->key_mask);
547 memcpy(&bucket->data[0], entry, f->entry_size);
548 *key_found = 0;
549 *entry_ptr = (void *) &bucket->data[0];
550 return 0;
551 }
552
553 return -ENOSPC;
554 }
555
556 static int
rte_table_hash_entry_delete_key32_ext(void * table,void * key,int * key_found,void * entry)557 rte_table_hash_entry_delete_key32_ext(
558 void *table,
559 void *key,
560 int *key_found,
561 void *entry)
562 {
563 struct rte_table_hash *f = table;
564 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
565 uint64_t signature;
566 uint32_t bucket_index, i;
567
568 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
569 bucket_index = signature & (f->n_buckets - 1);
570 bucket0 = (struct rte_bucket_4_32 *)
571 &f->memory[bucket_index * f->bucket_size];
572 signature |= RTE_BUCKET_ENTRY_VALID;
573
574 /* Key is present in the bucket */
575 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
576 bucket_prev = bucket, bucket = bucket->next)
577 for (i = 0; i < 4; i++) {
578 uint64_t bucket_signature = bucket->signature[i];
579 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
580
581 if ((bucket_signature == signature) &&
582 (keycmp(bucket_key, key, f->key_mask) == 0)) {
583 uint8_t *bucket_data = &bucket->data[i *
584 f->entry_size];
585
586 bucket->signature[i] = 0;
587 *key_found = 1;
588 if (entry)
589 memcpy(entry, bucket_data, f->entry_size);
590
591 if ((bucket->signature[0] == 0) &&
592 (bucket->signature[1] == 0) &&
593 (bucket->signature[2] == 0) &&
594 (bucket->signature[3] == 0) &&
595 (bucket_prev != NULL)) {
596 bucket_prev->next = bucket->next;
597 bucket_prev->next_valid =
598 bucket->next_valid;
599
600 memset(bucket, 0,
601 sizeof(struct rte_bucket_4_32));
602 bucket_index = (((uint8_t *)bucket -
603 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
604 f->stack[f->stack_pos++] = bucket_index;
605 }
606
607 return 0;
608 }
609 }
610
611 /* Key is not present in the bucket */
612 *key_found = 0;
613 return 0;
614 }
615
616 #define lookup_key32_cmp(key_in, bucket, pos, f) \
617 { \
618 uint64_t xor[4][4], or[4], signature[4], k[4]; \
619 \
620 k[0] = key_in[0] & f->key_mask[0]; \
621 k[1] = key_in[1] & f->key_mask[1]; \
622 k[2] = key_in[2] & f->key_mask[2]; \
623 k[3] = key_in[3] & f->key_mask[3]; \
624 \
625 signature[0] = ((~bucket->signature[0]) & 1); \
626 signature[1] = ((~bucket->signature[1]) & 1); \
627 signature[2] = ((~bucket->signature[2]) & 1); \
628 signature[3] = ((~bucket->signature[3]) & 1); \
629 \
630 xor[0][0] = k[0] ^ bucket->key[0][0]; \
631 xor[0][1] = k[1] ^ bucket->key[0][1]; \
632 xor[0][2] = k[2] ^ bucket->key[0][2]; \
633 xor[0][3] = k[3] ^ bucket->key[0][3]; \
634 \
635 xor[1][0] = k[0] ^ bucket->key[1][0]; \
636 xor[1][1] = k[1] ^ bucket->key[1][1]; \
637 xor[1][2] = k[2] ^ bucket->key[1][2]; \
638 xor[1][3] = k[3] ^ bucket->key[1][3]; \
639 \
640 xor[2][0] = k[0] ^ bucket->key[2][0]; \
641 xor[2][1] = k[1] ^ bucket->key[2][1]; \
642 xor[2][2] = k[2] ^ bucket->key[2][2]; \
643 xor[2][3] = k[3] ^ bucket->key[2][3]; \
644 \
645 xor[3][0] = k[0] ^ bucket->key[3][0]; \
646 xor[3][1] = k[1] ^ bucket->key[3][1]; \
647 xor[3][2] = k[2] ^ bucket->key[3][2]; \
648 xor[3][3] = k[3] ^ bucket->key[3][3]; \
649 \
650 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
651 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
652 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
653 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
654 \
655 pos = 4; \
656 if (or[0] == 0) \
657 pos = 0; \
658 if (or[1] == 0) \
659 pos = 1; \
660 if (or[2] == 0) \
661 pos = 2; \
662 if (or[3] == 0) \
663 pos = 3; \
664 }
665
666 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
667 { \
668 uint64_t pkt_mask; \
669 uint32_t key_offset = f->key_offset; \
670 \
671 pkt0_index = rte_ctz64(pkts_mask); \
672 pkt_mask = 1LLU << pkt0_index; \
673 pkts_mask &= ~pkt_mask; \
674 \
675 mbuf0 = pkts[pkt0_index]; \
676 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
677 }
678
679 #define lookup1_stage1(mbuf1, bucket1, f) \
680 { \
681 uint64_t *key; \
682 uint64_t signature; \
683 uint32_t bucket_index; \
684 \
685 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \
686 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
687 \
688 bucket_index = signature & (f->n_buckets - 1); \
689 bucket1 = (struct rte_bucket_4_32 *) \
690 &f->memory[bucket_index * f->bucket_size]; \
691 rte_prefetch0(bucket1); \
692 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
693 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
694 }
695
696 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
697 pkts_mask_out, entries, f) \
698 { \
699 void *a; \
700 uint64_t pkt_mask; \
701 uint64_t *key; \
702 uint32_t pos; \
703 \
704 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
705 lookup_key32_cmp(key, bucket2, pos, f); \
706 \
707 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
708 pkts_mask_out |= pkt_mask; \
709 \
710 a = (void *) &bucket2->data[pos * f->entry_size]; \
711 rte_prefetch0(a); \
712 entries[pkt2_index] = a; \
713 lru_update(bucket2, pos); \
714 }
715
716 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
717 entries, buckets_mask, buckets, keys, f) \
718 { \
719 struct rte_bucket_4_32 *bucket_next; \
720 void *a; \
721 uint64_t pkt_mask, bucket_mask; \
722 uint64_t *key; \
723 uint32_t pos; \
724 \
725 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
726 lookup_key32_cmp(key, bucket2, pos, f); \
727 \
728 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
729 pkts_mask_out |= pkt_mask; \
730 \
731 a = (void *) &bucket2->data[pos * f->entry_size]; \
732 rte_prefetch0(a); \
733 entries[pkt2_index] = a; \
734 \
735 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
736 buckets_mask |= bucket_mask; \
737 bucket_next = bucket2->next; \
738 buckets[pkt2_index] = bucket_next; \
739 keys[pkt2_index] = key; \
740 }
741
742 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
743 entries, buckets_mask, f) \
744 { \
745 struct rte_bucket_4_32 *bucket, *bucket_next; \
746 void *a; \
747 uint64_t pkt_mask, bucket_mask; \
748 uint64_t *key; \
749 uint32_t pos; \
750 \
751 bucket = buckets[pkt_index]; \
752 key = keys[pkt_index]; \
753 \
754 lookup_key32_cmp(key, bucket, pos, f); \
755 \
756 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
757 pkts_mask_out |= pkt_mask; \
758 \
759 a = (void *) &bucket->data[pos * f->entry_size]; \
760 rte_prefetch0(a); \
761 entries[pkt_index] = a; \
762 \
763 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
764 buckets_mask |= bucket_mask; \
765 bucket_next = bucket->next; \
766 rte_prefetch0(bucket_next); \
767 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
768 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
769 2 * RTE_CACHE_LINE_SIZE)); \
770 buckets[pkt_index] = bucket_next; \
771 keys[pkt_index] = key; \
772 }
773
774 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
775 pkts, pkts_mask, f) \
776 { \
777 uint64_t pkt00_mask, pkt01_mask; \
778 uint32_t key_offset = f->key_offset; \
779 \
780 pkt00_index = rte_ctz64(pkts_mask); \
781 pkt00_mask = 1LLU << pkt00_index; \
782 pkts_mask &= ~pkt00_mask; \
783 \
784 mbuf00 = pkts[pkt00_index]; \
785 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
786 \
787 pkt01_index = rte_ctz64(pkts_mask); \
788 pkt01_mask = 1LLU << pkt01_index; \
789 pkts_mask &= ~pkt01_mask; \
790 \
791 mbuf01 = pkts[pkt01_index]; \
792 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
793 }
794
795 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
796 mbuf00, mbuf01, pkts, pkts_mask, f) \
797 { \
798 uint64_t pkt00_mask, pkt01_mask; \
799 uint32_t key_offset = f->key_offset; \
800 \
801 pkt00_index = rte_ctz64(pkts_mask); \
802 pkt00_mask = 1LLU << pkt00_index; \
803 pkts_mask &= ~pkt00_mask; \
804 \
805 mbuf00 = pkts[pkt00_index]; \
806 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
807 \
808 pkt01_index = rte_ctz64(pkts_mask); \
809 if (pkts_mask == 0) \
810 pkt01_index = pkt00_index; \
811 \
812 pkt01_mask = 1LLU << pkt01_index; \
813 pkts_mask &= ~pkt01_mask; \
814 \
815 mbuf01 = pkts[pkt01_index]; \
816 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
817 }
818
819 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
820 { \
821 uint64_t *key10, *key11; \
822 uint64_t signature10, signature11; \
823 uint32_t bucket10_index, bucket11_index; \
824 \
825 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \
826 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \
827 \
828 bucket10_index = signature10 & (f->n_buckets - 1); \
829 bucket10 = (struct rte_bucket_4_32 *) \
830 &f->memory[bucket10_index * f->bucket_size]; \
831 rte_prefetch0(bucket10); \
832 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
833 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
834 \
835 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \
836 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
837 \
838 bucket11_index = signature11 & (f->n_buckets - 1); \
839 bucket11 = (struct rte_bucket_4_32 *) \
840 &f->memory[bucket11_index * f->bucket_size]; \
841 rte_prefetch0(bucket11); \
842 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
843 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
844 }
845
846 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
847 bucket20, bucket21, pkts_mask_out, entries, f) \
848 { \
849 void *a20, *a21; \
850 uint64_t pkt20_mask, pkt21_mask; \
851 uint64_t *key20, *key21; \
852 uint32_t pos20, pos21; \
853 \
854 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
855 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
856 \
857 lookup_key32_cmp(key20, bucket20, pos20, f); \
858 lookup_key32_cmp(key21, bucket21, pos21, f); \
859 \
860 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
861 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
862 pkts_mask_out |= pkt20_mask | pkt21_mask; \
863 \
864 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
865 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
866 rte_prefetch0(a20); \
867 rte_prefetch0(a21); \
868 entries[pkt20_index] = a20; \
869 entries[pkt21_index] = a21; \
870 lru_update(bucket20, pos20); \
871 lru_update(bucket21, pos21); \
872 }
873
874 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
875 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
876 { \
877 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
878 void *a20, *a21; \
879 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
880 uint64_t *key20, *key21; \
881 uint32_t pos20, pos21; \
882 \
883 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
884 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
885 \
886 lookup_key32_cmp(key20, bucket20, pos20, f); \
887 lookup_key32_cmp(key21, bucket21, pos21, f); \
888 \
889 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
890 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
891 pkts_mask_out |= pkt20_mask | pkt21_mask; \
892 \
893 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
894 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
895 rte_prefetch0(a20); \
896 rte_prefetch0(a21); \
897 entries[pkt20_index] = a20; \
898 entries[pkt21_index] = a21; \
899 \
900 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
901 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
902 buckets_mask |= bucket20_mask | bucket21_mask; \
903 bucket20_next = bucket20->next; \
904 bucket21_next = bucket21->next; \
905 buckets[pkt20_index] = bucket20_next; \
906 buckets[pkt21_index] = bucket21_next; \
907 keys[pkt20_index] = key20; \
908 keys[pkt21_index] = key21; \
909 }
910
911 static int
rte_table_hash_lookup_key32_lru(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)912 rte_table_hash_lookup_key32_lru(
913 void *table,
914 struct rte_mbuf **pkts,
915 uint64_t pkts_mask,
916 uint64_t *lookup_hit_mask,
917 void **entries)
918 {
919 struct rte_table_hash *f = (struct rte_table_hash *) table;
920 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
921 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
922 uint32_t pkt00_index, pkt01_index, pkt10_index;
923 uint32_t pkt11_index, pkt20_index, pkt21_index;
924 uint64_t pkts_mask_out = 0;
925
926 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
927 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
928
929 /* Cannot run the pipeline with less than 5 packets */
930 if (rte_popcount64(pkts_mask) < 5) {
931 for ( ; pkts_mask; ) {
932 struct rte_bucket_4_32 *bucket;
933 struct rte_mbuf *mbuf;
934 uint32_t pkt_index;
935
936 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
937 lookup1_stage1(mbuf, bucket, f);
938 lookup1_stage2_lru(pkt_index, mbuf, bucket,
939 pkts_mask_out, entries, f);
940 }
941
942 *lookup_hit_mask = pkts_mask_out;
943 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out));
944 return 0;
945 }
946
947 /*
948 * Pipeline fill
949 *
950 */
951 /* Pipeline stage 0 */
952 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
953 pkts_mask, f);
954
955 /* Pipeline feed */
956 mbuf10 = mbuf00;
957 mbuf11 = mbuf01;
958 pkt10_index = pkt00_index;
959 pkt11_index = pkt01_index;
960
961 /* Pipeline stage 0 */
962 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
963 pkts_mask, f);
964
965 /* Pipeline stage 1 */
966 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
967
968 /*
969 * Pipeline run
970 *
971 */
972 for ( ; pkts_mask; ) {
973 /* Pipeline feed */
974 bucket20 = bucket10;
975 bucket21 = bucket11;
976 mbuf20 = mbuf10;
977 mbuf21 = mbuf11;
978 mbuf10 = mbuf00;
979 mbuf11 = mbuf01;
980 pkt20_index = pkt10_index;
981 pkt21_index = pkt11_index;
982 pkt10_index = pkt00_index;
983 pkt11_index = pkt01_index;
984
985 /* Pipeline stage 0 */
986 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
987 mbuf00, mbuf01, pkts, pkts_mask, f);
988
989 /* Pipeline stage 1 */
990 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
991
992 /* Pipeline stage 2 */
993 lookup2_stage2_lru(pkt20_index, pkt21_index,
994 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
995 entries, f);
996 }
997
998 /*
999 * Pipeline flush
1000 *
1001 */
1002 /* Pipeline feed */
1003 bucket20 = bucket10;
1004 bucket21 = bucket11;
1005 mbuf20 = mbuf10;
1006 mbuf21 = mbuf11;
1007 mbuf10 = mbuf00;
1008 mbuf11 = mbuf01;
1009 pkt20_index = pkt10_index;
1010 pkt21_index = pkt11_index;
1011 pkt10_index = pkt00_index;
1012 pkt11_index = pkt01_index;
1013
1014 /* Pipeline stage 1 */
1015 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1016
1017 /* Pipeline stage 2 */
1018 lookup2_stage2_lru(pkt20_index, pkt21_index,
1019 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1020
1021 /* Pipeline feed */
1022 bucket20 = bucket10;
1023 bucket21 = bucket11;
1024 mbuf20 = mbuf10;
1025 mbuf21 = mbuf11;
1026 pkt20_index = pkt10_index;
1027 pkt21_index = pkt11_index;
1028
1029 /* Pipeline stage 2 */
1030 lookup2_stage2_lru(pkt20_index, pkt21_index,
1031 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1032
1033 *lookup_hit_mask = pkts_mask_out;
1034 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out));
1035 return 0;
1036 } /* rte_table_hash_lookup_key32_lru() */
1037
1038 static int
rte_table_hash_lookup_key32_ext(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)1039 rte_table_hash_lookup_key32_ext(
1040 void *table,
1041 struct rte_mbuf **pkts,
1042 uint64_t pkts_mask,
1043 uint64_t *lookup_hit_mask,
1044 void **entries)
1045 {
1046 struct rte_table_hash *f = (struct rte_table_hash *) table;
1047 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
1048 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1049 uint32_t pkt00_index, pkt01_index, pkt10_index;
1050 uint32_t pkt11_index, pkt20_index, pkt21_index;
1051 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1052 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1053 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1054
1055 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
1056 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
1057
1058 /* Cannot run the pipeline with less than 5 packets */
1059 if (rte_popcount64(pkts_mask) < 5) {
1060 for ( ; pkts_mask; ) {
1061 struct rte_bucket_4_32 *bucket;
1062 struct rte_mbuf *mbuf;
1063 uint32_t pkt_index;
1064
1065 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1066 lookup1_stage1(mbuf, bucket, f);
1067 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1068 pkts_mask_out, entries, buckets_mask, buckets,
1069 keys, f);
1070 }
1071
1072 goto grind_next_buckets;
1073 }
1074
1075 /*
1076 * Pipeline fill
1077 *
1078 */
1079 /* Pipeline stage 0 */
1080 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1081 pkts_mask, f);
1082
1083 /* Pipeline feed */
1084 mbuf10 = mbuf00;
1085 mbuf11 = mbuf01;
1086 pkt10_index = pkt00_index;
1087 pkt11_index = pkt01_index;
1088
1089 /* Pipeline stage 0 */
1090 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1091 pkts_mask, f);
1092
1093 /* Pipeline stage 1 */
1094 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1095
1096 /*
1097 * Pipeline run
1098 *
1099 */
1100 for ( ; pkts_mask; ) {
1101 /* Pipeline feed */
1102 bucket20 = bucket10;
1103 bucket21 = bucket11;
1104 mbuf20 = mbuf10;
1105 mbuf21 = mbuf11;
1106 mbuf10 = mbuf00;
1107 mbuf11 = mbuf01;
1108 pkt20_index = pkt10_index;
1109 pkt21_index = pkt11_index;
1110 pkt10_index = pkt00_index;
1111 pkt11_index = pkt01_index;
1112
1113 /* Pipeline stage 0 */
1114 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1115 mbuf00, mbuf01, pkts, pkts_mask, f);
1116
1117 /* Pipeline stage 1 */
1118 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1119
1120 /* Pipeline stage 2 */
1121 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1122 bucket20, bucket21, pkts_mask_out, entries,
1123 buckets_mask, buckets, keys, f);
1124 }
1125
1126 /*
1127 * Pipeline flush
1128 *
1129 */
1130 /* Pipeline feed */
1131 bucket20 = bucket10;
1132 bucket21 = bucket11;
1133 mbuf20 = mbuf10;
1134 mbuf21 = mbuf11;
1135 mbuf10 = mbuf00;
1136 mbuf11 = mbuf01;
1137 pkt20_index = pkt10_index;
1138 pkt21_index = pkt11_index;
1139 pkt10_index = pkt00_index;
1140 pkt11_index = pkt01_index;
1141
1142 /* Pipeline stage 1 */
1143 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1144
1145 /* Pipeline stage 2 */
1146 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1147 bucket20, bucket21, pkts_mask_out, entries,
1148 buckets_mask, buckets, keys, f);
1149
1150 /* Pipeline feed */
1151 bucket20 = bucket10;
1152 bucket21 = bucket11;
1153 mbuf20 = mbuf10;
1154 mbuf21 = mbuf11;
1155 pkt20_index = pkt10_index;
1156 pkt21_index = pkt11_index;
1157
1158 /* Pipeline stage 2 */
1159 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1160 bucket20, bucket21, pkts_mask_out, entries,
1161 buckets_mask, buckets, keys, f);
1162
1163 grind_next_buckets:
1164 /* Grind next buckets */
1165 for ( ; buckets_mask; ) {
1166 uint64_t buckets_mask_next = 0;
1167
1168 for ( ; buckets_mask; ) {
1169 uint64_t pkt_mask;
1170 uint32_t pkt_index;
1171
1172 pkt_index = rte_ctz64(buckets_mask);
1173 pkt_mask = 1LLU << pkt_index;
1174 buckets_mask &= ~pkt_mask;
1175
1176 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1177 entries, buckets_mask_next, f);
1178 }
1179
1180 buckets_mask = buckets_mask_next;
1181 }
1182
1183 *lookup_hit_mask = pkts_mask_out;
1184 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out));
1185 return 0;
1186 } /* rte_table_hash_lookup_key32_ext() */
1187
1188 static int
rte_table_hash_key32_stats_read(void * table,struct rte_table_stats * stats,int clear)1189 rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
1190 {
1191 struct rte_table_hash *t = table;
1192
1193 if (stats != NULL)
1194 memcpy(stats, &t->stats, sizeof(t->stats));
1195
1196 if (clear)
1197 memset(&t->stats, 0, sizeof(t->stats));
1198
1199 return 0;
1200 }
1201
1202 struct rte_table_ops rte_table_hash_key32_lru_ops = {
1203 .f_create = rte_table_hash_create_key32_lru,
1204 .f_free = rte_table_hash_free_key32_lru,
1205 .f_add = rte_table_hash_entry_add_key32_lru,
1206 .f_delete = rte_table_hash_entry_delete_key32_lru,
1207 .f_add_bulk = NULL,
1208 .f_delete_bulk = NULL,
1209 .f_lookup = rte_table_hash_lookup_key32_lru,
1210 .f_stats = rte_table_hash_key32_stats_read,
1211 };
1212
1213 struct rte_table_ops rte_table_hash_key32_ext_ops = {
1214 .f_create = rte_table_hash_create_key32_ext,
1215 .f_free = rte_table_hash_free_key32_ext,
1216 .f_add = rte_table_hash_entry_add_key32_ext,
1217 .f_delete = rte_table_hash_entry_delete_key32_ext,
1218 .f_add_bulk = NULL,
1219 .f_delete_bulk = NULL,
1220 .f_lookup = rte_table_hash_lookup_key32_ext,
1221 .f_stats = rte_table_hash_key32_stats_read,
1222 };
1223