1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <stdalign.h>
6 #include <stdio.h>
7 #include <string.h>
8
9 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12
13 #include "rte_table_hash.h"
14
15 #include "table_log.h"
16
17 #define KEYS_PER_BUCKET 4
18
19 struct bucket {
20 union {
21 uintptr_t next;
22 uint64_t lru_list;
23 };
24 uint16_t sig[KEYS_PER_BUCKET];
25 uint32_t key_pos[KEYS_PER_BUCKET];
26 };
27
28 #define BUCKET_NEXT(bucket) \
29 ((void *) ((bucket)->next & (~1LU)))
30
31 #define BUCKET_NEXT_VALID(bucket) \
32 ((bucket)->next & 1LU)
33
34 #define BUCKET_NEXT_SET(bucket, bucket_next) \
35 do \
36 (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
37 while (0)
38
39 #define BUCKET_NEXT_SET_NULL(bucket) \
40 do \
41 (bucket)->next = 0; \
42 while (0)
43
44 #define BUCKET_NEXT_COPY(bucket, bucket2) \
45 do \
46 (bucket)->next = (bucket2)->next; \
47 while (0)
48
49 #ifdef RTE_TABLE_STATS_COLLECT
50
51 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) \
52 table->stats.n_pkts_in += val
53 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) \
54 table->stats.n_pkts_lookup_miss += val
55
56 #else
57
58 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val)
59 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val)
60
61 #endif
62
63 struct grinder {
64 struct bucket *bkt;
65 uint64_t sig;
66 uint64_t match;
67 uint32_t key_index;
68 };
69
70 struct rte_table_hash {
71 struct rte_table_stats stats;
72
73 /* Input parameters */
74 uint32_t key_size;
75 uint32_t entry_size;
76 uint32_t n_keys;
77 uint32_t n_buckets;
78 uint32_t n_buckets_ext;
79 rte_table_hash_op_hash f_hash;
80 uint64_t seed;
81 uint32_t key_offset;
82
83 /* Internal */
84 uint64_t bucket_mask;
85 uint32_t key_size_shl;
86 uint32_t data_size_shl;
87 uint32_t key_stack_tos;
88 uint32_t bkt_ext_stack_tos;
89
90 /* Grinder */
91 struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
92
93 /* Tables */
94 uint64_t *key_mask;
95 struct bucket *buckets;
96 struct bucket *buckets_ext;
97 uint8_t *key_mem;
98 uint8_t *data_mem;
99 uint32_t *key_stack;
100 uint32_t *bkt_ext_stack;
101
102 /* Table memory */
103 alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[];
104 };
105
106 static int
keycmp(void * a,void * b,void * b_mask,uint32_t n_bytes)107 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
108 {
109 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
110 uint32_t i;
111
112 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
113 if (a64[i] != (b64[i] & b_mask64[i]))
114 return 1;
115
116 return 0;
117 }
118
119 static void
keycpy(void * dst,void * src,void * src_mask,uint32_t n_bytes)120 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
121 {
122 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
123 uint32_t i;
124
125 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
126 dst64[i] = src64[i] & src_mask64[i];
127 }
128
129 static int
check_params_create(struct rte_table_hash_params * params)130 check_params_create(struct rte_table_hash_params *params)
131 {
132 /* name */
133 if (params->name == NULL) {
134 TABLE_LOG(ERR, "%s: name invalid value", __func__);
135 return -EINVAL;
136 }
137
138 /* key_size */
139 if ((params->key_size < sizeof(uint64_t)) ||
140 (!rte_is_power_of_2(params->key_size))) {
141 TABLE_LOG(ERR, "%s: key_size invalid value", __func__);
142 return -EINVAL;
143 }
144
145 /* n_keys */
146 if (params->n_keys == 0) {
147 TABLE_LOG(ERR, "%s: n_keys invalid value", __func__);
148 return -EINVAL;
149 }
150
151 /* n_buckets */
152 if ((params->n_buckets == 0) ||
153 (!rte_is_power_of_2(params->n_buckets))) {
154 TABLE_LOG(ERR, "%s: n_buckets invalid value", __func__);
155 return -EINVAL;
156 }
157
158 /* f_hash */
159 if (params->f_hash == NULL) {
160 TABLE_LOG(ERR, "%s: f_hash invalid value", __func__);
161 return -EINVAL;
162 }
163
164 return 0;
165 }
166
167 static void *
rte_table_hash_ext_create(void * params,int socket_id,uint32_t entry_size)168 rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
169 {
170 struct rte_table_hash_params *p = params;
171 struct rte_table_hash *t;
172 uint64_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz;
173 uint64_t key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
174 uint64_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset;
175 uint64_t key_stack_offset, bkt_ext_stack_offset, data_offset;
176 uint32_t n_buckets_ext, i;
177
178 /* Check input parameters */
179 if ((check_params_create(p) != 0) ||
180 (!rte_is_power_of_2(entry_size)) ||
181 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
182 (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
183 return NULL;
184
185 /*
186 * Table dimensioning
187 *
188 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
189 * it is guaranteed that n_keys keys can be stored in the table at any time.
190 *
191 * The worst case scenario takes place when all the n_keys keys fall into
192 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
193 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
194 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
195 * into a different bucket. This case defeats the purpose of the hash table.
196 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
197 *
198 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
199 */
200 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
201
202 /* Memory allocation */
203 table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
204 key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
205 bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
206 bucket_ext_sz =
207 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(struct bucket));
208 key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
209 key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
210 bkt_ext_stack_sz =
211 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
212 data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
213 total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz +
214 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
215
216 if (total_size > SIZE_MAX) {
217 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes"
218 " for hash table %s",
219 __func__, total_size, p->name);
220 return NULL;
221 }
222
223 t = rte_zmalloc_socket(p->name,
224 (size_t)total_size,
225 RTE_CACHE_LINE_SIZE,
226 socket_id);
227 if (t == NULL) {
228 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes"
229 " for hash table %s",
230 __func__, total_size, p->name);
231 return NULL;
232 }
233 TABLE_LOG(INFO, "%s (%u-byte key): Hash table %s memory "
234 "footprint is %" PRIu64 " bytes",
235 __func__, p->key_size, p->name, total_size);
236
237 /* Memory initialization */
238 t->key_size = p->key_size;
239 t->entry_size = entry_size;
240 t->n_keys = p->n_keys;
241 t->n_buckets = p->n_buckets;
242 t->n_buckets_ext = n_buckets_ext;
243 t->f_hash = p->f_hash;
244 t->seed = p->seed;
245 t->key_offset = p->key_offset;
246
247 /* Internal */
248 t->bucket_mask = t->n_buckets - 1;
249 t->key_size_shl = rte_ctz32(p->key_size);
250 t->data_size_shl = rte_ctz32(entry_size);
251
252 /* Tables */
253 key_mask_offset = 0;
254 bucket_offset = key_mask_offset + key_mask_sz;
255 bucket_ext_offset = bucket_offset + bucket_sz;
256 key_offset = bucket_ext_offset + bucket_ext_sz;
257 key_stack_offset = key_offset + key_sz;
258 bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
259 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
260
261 t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
262 t->buckets = (struct bucket *) &t->memory[bucket_offset];
263 t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
264 t->key_mem = &t->memory[key_offset];
265 t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
266 t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
267 t->data_mem = &t->memory[data_offset];
268
269 /* Key mask */
270 if (p->key_mask == NULL)
271 memset(t->key_mask, 0xFF, p->key_size);
272 else
273 memcpy(t->key_mask, p->key_mask, p->key_size);
274
275 /* Key stack */
276 for (i = 0; i < t->n_keys; i++)
277 t->key_stack[i] = t->n_keys - 1 - i;
278 t->key_stack_tos = t->n_keys;
279
280 /* Bucket ext stack */
281 for (i = 0; i < t->n_buckets_ext; i++)
282 t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
283 t->bkt_ext_stack_tos = t->n_buckets_ext;
284
285 return t;
286 }
287
288 static int
rte_table_hash_ext_free(void * table)289 rte_table_hash_ext_free(void *table)
290 {
291 struct rte_table_hash *t = table;
292
293 /* Check input parameters */
294 if (t == NULL)
295 return -EINVAL;
296
297 rte_free(t);
298 return 0;
299 }
300
301 static int
rte_table_hash_ext_entry_add(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)302 rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
303 int *key_found, void **entry_ptr)
304 {
305 struct rte_table_hash *t = table;
306 struct bucket *bkt0, *bkt, *bkt_prev;
307 uint64_t sig;
308 uint32_t bkt_index, i;
309
310 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
311 bkt_index = sig & t->bucket_mask;
312 bkt0 = &t->buckets[bkt_index];
313 sig = (sig >> 16) | 1LLU;
314
315 /* Key is present in the bucket */
316 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
317 for (i = 0; i < KEYS_PER_BUCKET; i++) {
318 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
319 uint32_t bkt_key_index = bkt->key_pos[i];
320 uint8_t *bkt_key =
321 &t->key_mem[bkt_key_index << t->key_size_shl];
322
323 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
324 t->key_size) == 0)) {
325 uint8_t *data = &t->data_mem[bkt_key_index <<
326 t->data_size_shl];
327
328 memcpy(data, entry, t->entry_size);
329 *key_found = 1;
330 *entry_ptr = (void *) data;
331 return 0;
332 }
333 }
334
335 /* Key is not present in the bucket */
336 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
337 bkt = BUCKET_NEXT(bkt))
338 for (i = 0; i < KEYS_PER_BUCKET; i++) {
339 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
340
341 if (bkt_sig == 0) {
342 uint32_t bkt_key_index;
343 uint8_t *bkt_key, *data;
344
345 /* Allocate new key */
346 if (t->key_stack_tos == 0) /* No free keys */
347 return -ENOSPC;
348
349 bkt_key_index = t->key_stack[
350 --t->key_stack_tos];
351
352 /* Install new key */
353 bkt_key = &t->key_mem[bkt_key_index <<
354 t->key_size_shl];
355 data = &t->data_mem[bkt_key_index <<
356 t->data_size_shl];
357
358 bkt->sig[i] = (uint16_t) sig;
359 bkt->key_pos[i] = bkt_key_index;
360 keycpy(bkt_key, key, t->key_mask, t->key_size);
361 memcpy(data, entry, t->entry_size);
362
363 *key_found = 0;
364 *entry_ptr = (void *) data;
365 return 0;
366 }
367 }
368
369 /* Bucket full: extend bucket */
370 if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
371 uint32_t bkt_key_index;
372 uint8_t *bkt_key, *data;
373
374 /* Allocate new bucket ext */
375 bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
376 bkt = &t->buckets_ext[bkt_index];
377
378 /* Chain the new bucket ext */
379 BUCKET_NEXT_SET(bkt_prev, bkt);
380 BUCKET_NEXT_SET_NULL(bkt);
381
382 /* Allocate new key */
383 bkt_key_index = t->key_stack[--t->key_stack_tos];
384 bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
385
386 data = &t->data_mem[bkt_key_index << t->data_size_shl];
387
388 /* Install new key into bucket */
389 bkt->sig[0] = (uint16_t) sig;
390 bkt->key_pos[0] = bkt_key_index;
391 keycpy(bkt_key, key, t->key_mask, t->key_size);
392 memcpy(data, entry, t->entry_size);
393
394 *key_found = 0;
395 *entry_ptr = (void *) data;
396 return 0;
397 }
398
399 return -ENOSPC;
400 }
401
402 static int
rte_table_hash_ext_entry_delete(void * table,void * key,int * key_found,void * entry)403 rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
404 void *entry)
405 {
406 struct rte_table_hash *t = table;
407 struct bucket *bkt0, *bkt, *bkt_prev;
408 uint64_t sig;
409 uint32_t bkt_index, i;
410
411 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
412 bkt_index = sig & t->bucket_mask;
413 bkt0 = &t->buckets[bkt_index];
414 sig = (sig >> 16) | 1LLU;
415
416 /* Key is present in the bucket */
417 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
418 bkt = BUCKET_NEXT(bkt))
419 for (i = 0; i < KEYS_PER_BUCKET; i++) {
420 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
421 uint32_t bkt_key_index = bkt->key_pos[i];
422 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
423 t->key_size_shl];
424
425 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
426 t->key_size) == 0)) {
427 uint8_t *data = &t->data_mem[bkt_key_index <<
428 t->data_size_shl];
429
430 /* Uninstall key from bucket */
431 bkt->sig[i] = 0;
432 *key_found = 1;
433 if (entry)
434 memcpy(entry, data, t->entry_size);
435
436 /* Free key */
437 t->key_stack[t->key_stack_tos++] =
438 bkt_key_index;
439
440 /*Check if bucket is unused */
441 if ((bkt_prev != NULL) &&
442 (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
443 (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
444 /* Unchain bucket */
445 BUCKET_NEXT_COPY(bkt_prev, bkt);
446
447 /* Clear bucket */
448 memset(bkt, 0, sizeof(struct bucket));
449
450 /* Free bucket back to buckets ext */
451 bkt_index = bkt - t->buckets_ext;
452 t->bkt_ext_stack[t->bkt_ext_stack_tos++]
453 = bkt_index;
454 }
455
456 return 0;
457 }
458 }
459
460 /* Key is not present in the bucket */
461 *key_found = 0;
462 return 0;
463 }
464
rte_table_hash_ext_lookup_unoptimized(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)465 static int rte_table_hash_ext_lookup_unoptimized(
466 void *table,
467 struct rte_mbuf **pkts,
468 uint64_t pkts_mask,
469 uint64_t *lookup_hit_mask,
470 void **entries)
471 {
472 struct rte_table_hash *t = (struct rte_table_hash *) table;
473 uint64_t pkts_mask_out = 0;
474
475 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
476
477 for ( ; pkts_mask; ) {
478 struct bucket *bkt0, *bkt;
479 struct rte_mbuf *pkt;
480 uint8_t *key;
481 uint64_t pkt_mask, sig;
482 uint32_t pkt_index, bkt_index, i;
483
484 pkt_index = rte_ctz64(pkts_mask);
485 pkt_mask = 1LLU << pkt_index;
486 pkts_mask &= ~pkt_mask;
487
488 pkt = pkts[pkt_index];
489 key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
490 sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
491
492 bkt_index = sig & t->bucket_mask;
493 bkt0 = &t->buckets[bkt_index];
494 sig = (sig >> 16) | 1LLU;
495
496 /* Key is present in the bucket */
497 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
498 for (i = 0; i < KEYS_PER_BUCKET; i++) {
499 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
500 uint32_t bkt_key_index = bkt->key_pos[i];
501 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
502 t->key_size_shl];
503
504 if ((sig == bkt_sig) && (keycmp(bkt_key, key,
505 t->key_mask, t->key_size) == 0)) {
506 uint8_t *data = &t->data_mem[
507 bkt_key_index << t->data_size_shl];
508
509 pkts_mask_out |= pkt_mask;
510 entries[pkt_index] = (void *) data;
511 break;
512 }
513 }
514 }
515
516 *lookup_hit_mask = pkts_mask_out;
517 return 0;
518 }
519
520 /*
521 * mask = match bitmask
522 * match = at least one match
523 * match_many = more than one match
524 * match_pos = position of first match
525 *
526 *----------------------------------------
527 * mask match match_many match_pos
528 *----------------------------------------
529 * 0000 0 0 00
530 * 0001 1 0 00
531 * 0010 1 0 01
532 * 0011 1 1 00
533 *----------------------------------------
534 * 0100 1 0 10
535 * 0101 1 1 00
536 * 0110 1 1 01
537 * 0111 1 1 00
538 *----------------------------------------
539 * 1000 1 0 11
540 * 1001 1 1 00
541 * 1010 1 1 01
542 * 1011 1 1 00
543 *----------------------------------------
544 * 1100 1 1 10
545 * 1101 1 1 00
546 * 1110 1 1 01
547 * 1111 1 1 00
548 *----------------------------------------
549 *
550 * match = 1111_1111_1111_1110
551 * match_many = 1111_1110_1110_1000
552 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
553 *
554 * match = 0xFFFELLU
555 * match_many = 0xFEE8LLU
556 * match_pos = 0x12131210LLU
557 */
558
559 #define LUT_MATCH 0xFFFELLU
560 #define LUT_MATCH_MANY 0xFEE8LLU
561 #define LUT_MATCH_POS 0x12131210LLU
562
563 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \
564 { \
565 uint64_t bucket_sig[4], mask[4], mask_all; \
566 \
567 bucket_sig[0] = bucket->sig[0]; \
568 bucket_sig[1] = bucket->sig[1]; \
569 bucket_sig[2] = bucket->sig[2]; \
570 bucket_sig[3] = bucket->sig[3]; \
571 \
572 bucket_sig[0] ^= mbuf_sig; \
573 bucket_sig[1] ^= mbuf_sig; \
574 bucket_sig[2] ^= mbuf_sig; \
575 bucket_sig[3] ^= mbuf_sig; \
576 \
577 mask[0] = 0; \
578 mask[1] = 0; \
579 mask[2] = 0; \
580 mask[3] = 0; \
581 \
582 if (bucket_sig[0] == 0) \
583 mask[0] = 1; \
584 if (bucket_sig[1] == 0) \
585 mask[1] = 2; \
586 if (bucket_sig[2] == 0) \
587 mask[2] = 4; \
588 if (bucket_sig[3] == 0) \
589 mask[3] = 8; \
590 \
591 mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
592 \
593 match = (LUT_MATCH >> mask_all) & 1; \
594 match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
595 match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
596 }
597
598 #define lookup_cmp_key(mbuf, key, match_key, f) \
599 { \
600 uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
601 uint64_t *bkt_key = (uint64_t *) key; \
602 uint64_t *key_mask = f->key_mask; \
603 \
604 switch (f->key_size) { \
605 case 8: \
606 { \
607 uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
608 match_key = 0; \
609 if (xor == 0) \
610 match_key = 1; \
611 } \
612 break; \
613 \
614 case 16: \
615 { \
616 uint64_t xor[2], or; \
617 \
618 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
619 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
620 or = xor[0] | xor[1]; \
621 match_key = 0; \
622 if (or == 0) \
623 match_key = 1; \
624 } \
625 break; \
626 \
627 case 32: \
628 { \
629 uint64_t xor[4], or; \
630 \
631 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
632 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
633 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
634 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
635 or = xor[0] | xor[1] | xor[2] | xor[3]; \
636 match_key = 0; \
637 if (or == 0) \
638 match_key = 1; \
639 } \
640 break; \
641 \
642 case 64: \
643 { \
644 uint64_t xor[8], or; \
645 \
646 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
647 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
648 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
649 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
650 xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4]; \
651 xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5]; \
652 xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6]; \
653 xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7]; \
654 or = xor[0] | xor[1] | xor[2] | xor[3] | \
655 xor[4] | xor[5] | xor[6] | xor[7]; \
656 match_key = 0; \
657 if (or == 0) \
658 match_key = 1; \
659 } \
660 break; \
661 \
662 default: \
663 match_key = 0; \
664 if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0) \
665 match_key = 1; \
666 } \
667 }
668
669 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
670 { \
671 uint64_t pkt00_mask, pkt01_mask; \
672 struct rte_mbuf *mbuf00, *mbuf01; \
673 uint32_t key_offset = t->key_offset; \
674 \
675 pkt00_index = rte_ctz64(pkts_mask); \
676 pkt00_mask = 1LLU << pkt00_index; \
677 pkts_mask &= ~pkt00_mask; \
678 mbuf00 = pkts[pkt00_index]; \
679 \
680 pkt01_index = rte_ctz64(pkts_mask); \
681 pkt01_mask = 1LLU << pkt01_index; \
682 pkts_mask &= ~pkt01_mask; \
683 mbuf01 = pkts[pkt01_index]; \
684 \
685 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
686 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
687 }
688
689 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
690 pkt01_index) \
691 { \
692 uint64_t pkt00_mask, pkt01_mask; \
693 struct rte_mbuf *mbuf00, *mbuf01; \
694 uint32_t key_offset = t->key_offset; \
695 \
696 pkt00_index = rte_ctz64(pkts_mask); \
697 pkt00_mask = 1LLU << pkt00_index; \
698 pkts_mask &= ~pkt00_mask; \
699 mbuf00 = pkts[pkt00_index]; \
700 \
701 pkt01_index = rte_ctz64(pkts_mask); \
702 if (pkts_mask == 0) \
703 pkt01_index = pkt00_index; \
704 pkt01_mask = 1LLU << pkt01_index; \
705 pkts_mask &= ~pkt01_mask; \
706 mbuf01 = pkts[pkt01_index]; \
707 \
708 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
709 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
710 }
711
712 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
713 { \
714 struct grinder *g10, *g11; \
715 uint64_t sig10, sig11, bkt10_index, bkt11_index; \
716 struct rte_mbuf *mbuf10, *mbuf11; \
717 struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
718 uint8_t *key10, *key11; \
719 uint64_t bucket_mask = t->bucket_mask; \
720 rte_table_hash_op_hash f_hash = t->f_hash; \
721 uint64_t seed = t->seed; \
722 uint32_t key_size = t->key_size; \
723 uint32_t key_offset = t->key_offset; \
724 \
725 mbuf10 = pkts[pkt10_index]; \
726 key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
727 sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed); \
728 bkt10_index = sig10 & bucket_mask; \
729 bkt10 = &buckets[bkt10_index]; \
730 \
731 mbuf11 = pkts[pkt11_index]; \
732 key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
733 sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed); \
734 bkt11_index = sig11 & bucket_mask; \
735 bkt11 = &buckets[bkt11_index]; \
736 \
737 rte_prefetch0(bkt10); \
738 rte_prefetch0(bkt11); \
739 \
740 g10 = &g[pkt10_index]; \
741 g10->sig = sig10; \
742 g10->bkt = bkt10; \
743 \
744 g11 = &g[pkt11_index]; \
745 g11->sig = sig11; \
746 g11->bkt = bkt11; \
747 }
748
749 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
750 { \
751 struct grinder *g20, *g21; \
752 uint64_t sig20, sig21; \
753 struct bucket *bkt20, *bkt21; \
754 uint8_t *key20, *key21, *key_mem = t->key_mem; \
755 uint64_t match20, match21, match_many20, match_many21; \
756 uint64_t match_pos20, match_pos21; \
757 uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
758 \
759 g20 = &g[pkt20_index]; \
760 sig20 = g20->sig; \
761 bkt20 = g20->bkt; \
762 sig20 = (sig20 >> 16) | 1LLU; \
763 lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
764 match20 <<= pkt20_index; \
765 match_many20 |= BUCKET_NEXT_VALID(bkt20); \
766 match_many20 <<= pkt20_index; \
767 key20_index = bkt20->key_pos[match_pos20]; \
768 key20 = &key_mem[key20_index << key_size_shl]; \
769 \
770 g21 = &g[pkt21_index]; \
771 sig21 = g21->sig; \
772 bkt21 = g21->bkt; \
773 sig21 = (sig21 >> 16) | 1LLU; \
774 lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
775 match21 <<= pkt21_index; \
776 match_many21 |= BUCKET_NEXT_VALID(bkt21); \
777 match_many21 <<= pkt21_index; \
778 key21_index = bkt21->key_pos[match_pos21]; \
779 key21 = &key_mem[key21_index << key_size_shl]; \
780 \
781 rte_prefetch0(key20); \
782 rte_prefetch0(key21); \
783 \
784 pkts_mask_match_many |= match_many20 | match_many21; \
785 \
786 g20->match = match20; \
787 g20->key_index = key20_index; \
788 \
789 g21->match = match21; \
790 g21->key_index = key21_index; \
791 }
792
793 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
794 entries) \
795 { \
796 struct grinder *g30, *g31; \
797 struct rte_mbuf *mbuf30, *mbuf31; \
798 uint8_t *key30, *key31, *key_mem = t->key_mem; \
799 uint8_t *data30, *data31, *data_mem = t->data_mem; \
800 uint64_t match30, match31, match_key30, match_key31, match_keys;\
801 uint32_t key30_index, key31_index; \
802 uint32_t key_size_shl = t->key_size_shl; \
803 uint32_t data_size_shl = t->data_size_shl; \
804 \
805 mbuf30 = pkts[pkt30_index]; \
806 g30 = &g[pkt30_index]; \
807 match30 = g30->match; \
808 key30_index = g30->key_index; \
809 key30 = &key_mem[key30_index << key_size_shl]; \
810 lookup_cmp_key(mbuf30, key30, match_key30, t); \
811 match_key30 <<= pkt30_index; \
812 match_key30 &= match30; \
813 data30 = &data_mem[key30_index << data_size_shl]; \
814 entries[pkt30_index] = data30; \
815 \
816 mbuf31 = pkts[pkt31_index]; \
817 g31 = &g[pkt31_index]; \
818 match31 = g31->match; \
819 key31_index = g31->key_index; \
820 key31 = &key_mem[key31_index << key_size_shl]; \
821 lookup_cmp_key(mbuf31, key31, match_key31, t); \
822 match_key31 <<= pkt31_index; \
823 match_key31 &= match31; \
824 data31 = &data_mem[key31_index << data_size_shl]; \
825 entries[pkt31_index] = data31; \
826 \
827 rte_prefetch0(data30); \
828 rte_prefetch0(data31); \
829 \
830 match_keys = match_key30 | match_key31; \
831 pkts_mask_out |= match_keys; \
832 }
833
834 /*
835 * The lookup function implements a 4-stage pipeline, with each stage processing
836 * two different packets. The purpose of pipelined implementation is to hide the
837 * latency of prefetching the data structures and loosen the data dependency
838 * between instructions.
839 *
840 * p00 _______ p10 _______ p20 _______ p30 _______
841 *----->| |----->| |----->| |----->| |----->
842 * | 0 | | 1 | | 2 | | 3 |
843 *----->|_______|----->|_______|----->|_______|----->|_______|----->
844 * p01 p11 p21 p31
845 *
846 * The naming convention is:
847 * pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
848 */
rte_table_hash_ext_lookup(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)849 static int rte_table_hash_ext_lookup(
850 void *table,
851 struct rte_mbuf **pkts,
852 uint64_t pkts_mask,
853 uint64_t *lookup_hit_mask,
854 void **entries)
855 {
856 struct rte_table_hash *t = (struct rte_table_hash *) table;
857 struct grinder *g = t->grinders;
858 uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
859 uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
860 uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
861 int status = 0;
862
863 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
864 RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
865
866 /* Cannot run the pipeline with less than 7 packets */
867 if (rte_popcount64(pkts_mask) < 7) {
868 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
869 pkts_mask, lookup_hit_mask, entries);
870 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
871 rte_popcount64(*lookup_hit_mask));
872 return status;
873 }
874
875 /* Pipeline stage 0 */
876 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
877
878 /* Pipeline feed */
879 pkt10_index = pkt00_index;
880 pkt11_index = pkt01_index;
881
882 /* Pipeline stage 0 */
883 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
884
885 /* Pipeline stage 1 */
886 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
887
888 /* Pipeline feed */
889 pkt20_index = pkt10_index;
890 pkt21_index = pkt11_index;
891 pkt10_index = pkt00_index;
892 pkt11_index = pkt01_index;
893
894 /* Pipeline stage 0 */
895 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
896
897 /* Pipeline stage 1 */
898 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
899
900 /* Pipeline stage 2 */
901 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
902
903 /*
904 * Pipeline run
905 *
906 */
907 for ( ; pkts_mask; ) {
908 /* Pipeline feed */
909 pkt30_index = pkt20_index;
910 pkt31_index = pkt21_index;
911 pkt20_index = pkt10_index;
912 pkt21_index = pkt11_index;
913 pkt10_index = pkt00_index;
914 pkt11_index = pkt01_index;
915
916 /* Pipeline stage 0 */
917 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
918 pkt00_index, pkt01_index);
919
920 /* Pipeline stage 1 */
921 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
922
923 /* Pipeline stage 2 */
924 lookup2_stage2(t, g, pkt20_index, pkt21_index,
925 pkts_mask_match_many);
926
927 /* Pipeline stage 3 */
928 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
929 pkts_mask_out, entries);
930 }
931
932 /* Pipeline feed */
933 pkt30_index = pkt20_index;
934 pkt31_index = pkt21_index;
935 pkt20_index = pkt10_index;
936 pkt21_index = pkt11_index;
937 pkt10_index = pkt00_index;
938 pkt11_index = pkt01_index;
939
940 /* Pipeline stage 1 */
941 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
942
943 /* Pipeline stage 2 */
944 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
945
946 /* Pipeline stage 3 */
947 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
948 entries);
949
950 /* Pipeline feed */
951 pkt30_index = pkt20_index;
952 pkt31_index = pkt21_index;
953 pkt20_index = pkt10_index;
954 pkt21_index = pkt11_index;
955
956 /* Pipeline stage 2 */
957 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
958
959 /* Pipeline stage 3 */
960 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
961 entries);
962
963 /* Pipeline feed */
964 pkt30_index = pkt20_index;
965 pkt31_index = pkt21_index;
966
967 /* Pipeline stage 3 */
968 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
969 entries);
970
971 /* Slow path */
972 pkts_mask_match_many &= ~pkts_mask_out;
973 if (pkts_mask_match_many) {
974 uint64_t pkts_mask_out_slow = 0;
975
976 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
977 pkts_mask_match_many, &pkts_mask_out_slow, entries);
978 pkts_mask_out |= pkts_mask_out_slow;
979 }
980
981 *lookup_hit_mask = pkts_mask_out;
982 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out));
983 return status;
984 }
985
986 static int
rte_table_hash_ext_stats_read(void * table,struct rte_table_stats * stats,int clear)987 rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
988 {
989 struct rte_table_hash *t = table;
990
991 if (stats != NULL)
992 memcpy(stats, &t->stats, sizeof(t->stats));
993
994 if (clear)
995 memset(&t->stats, 0, sizeof(t->stats));
996
997 return 0;
998 }
999
1000 struct rte_table_ops rte_table_hash_ext_ops = {
1001 .f_create = rte_table_hash_ext_create,
1002 .f_free = rte_table_hash_ext_free,
1003 .f_add = rte_table_hash_ext_entry_add,
1004 .f_delete = rte_table_hash_ext_entry_delete,
1005 .f_add_bulk = NULL,
1006 .f_delete_bulk = NULL,
1007 .f_lookup = rte_table_hash_ext_lookup,
1008 .f_stats = rte_table_hash_ext_stats_read,
1009 };
1010