xref: /dpdk/lib/table/rte_table_hash_lru.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdio.h>
7 #include <string.h>
8 
9 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12 
13 #include "rte_table_hash.h"
14 #include "rte_lru.h"
15 
16 #include "table_log.h"
17 
18 #define KEYS_PER_BUCKET	4
19 
20 #ifdef RTE_TABLE_STATS_COLLECT
21 
22 #define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val) \
23 	table->stats.n_pkts_in += val
24 #define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val) \
25 	table->stats.n_pkts_lookup_miss += val
26 
27 #else
28 
29 #define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val)
30 #define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val)
31 
32 #endif
33 
34 struct bucket {
35 	union {
36 		struct bucket *next;
37 		uint64_t lru_list;
38 	};
39 	uint16_t sig[KEYS_PER_BUCKET];
40 	uint32_t key_pos[KEYS_PER_BUCKET];
41 };
42 
43 struct grinder {
44 	struct bucket *bkt;
45 	uint64_t sig;
46 	uint64_t match;
47 	uint64_t match_pos;
48 	uint32_t key_index;
49 };
50 
51 struct rte_table_hash {
52 	struct rte_table_stats stats;
53 
54 	/* Input parameters */
55 	uint32_t key_size;
56 	uint32_t entry_size;
57 	uint32_t n_keys;
58 	uint32_t n_buckets;
59 	rte_table_hash_op_hash f_hash;
60 	uint64_t seed;
61 	uint32_t key_offset;
62 
63 	/* Internal */
64 	uint64_t bucket_mask;
65 	uint32_t key_size_shl;
66 	uint32_t data_size_shl;
67 	uint32_t key_stack_tos;
68 
69 	/* Grinder */
70 	struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
71 
72 	/* Tables */
73 	uint64_t *key_mask;
74 	struct bucket *buckets;
75 	uint8_t *key_mem;
76 	uint8_t *data_mem;
77 	uint32_t *key_stack;
78 
79 	/* Table memory */
80 	alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[0];
81 };
82 
83 static int
84 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
85 {
86 	uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
87 	uint32_t i;
88 
89 	for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
90 		if (a64[i] != (b64[i] & b_mask64[i]))
91 			return 1;
92 
93 	return 0;
94 }
95 
96 static void
97 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
98 {
99 	uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
100 	uint32_t i;
101 
102 	for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
103 		dst64[i] = src64[i] & src_mask64[i];
104 }
105 
106 static int
107 check_params_create(struct rte_table_hash_params *params)
108 {
109 	/* name */
110 	if (params->name == NULL) {
111 		TABLE_LOG(ERR, "%s: name invalid value", __func__);
112 		return -EINVAL;
113 	}
114 
115 	/* key_size */
116 	if ((params->key_size < sizeof(uint64_t)) ||
117 		(!rte_is_power_of_2(params->key_size))) {
118 		TABLE_LOG(ERR, "%s: key_size invalid value", __func__);
119 		return -EINVAL;
120 	}
121 
122 	/* n_keys */
123 	if (params->n_keys == 0) {
124 		TABLE_LOG(ERR, "%s: n_keys invalid value", __func__);
125 		return -EINVAL;
126 	}
127 
128 	/* n_buckets */
129 	if ((params->n_buckets == 0) ||
130 		(!rte_is_power_of_2(params->n_buckets))) {
131 		TABLE_LOG(ERR, "%s: n_buckets invalid value", __func__);
132 		return -EINVAL;
133 	}
134 
135 	/* f_hash */
136 	if (params->f_hash == NULL) {
137 		TABLE_LOG(ERR, "%s: f_hash invalid value", __func__);
138 		return -EINVAL;
139 	}
140 
141 	return 0;
142 }
143 
144 static void *
145 rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
146 {
147 	struct rte_table_hash_params *p = params;
148 	struct rte_table_hash *t;
149 	uint64_t table_meta_sz, key_mask_sz, bucket_sz, key_sz, key_stack_sz;
150 	uint64_t data_sz, total_size;
151 	uint64_t key_mask_offset, bucket_offset, key_offset, key_stack_offset;
152 	uint64_t data_offset;
153 	uint32_t n_buckets, i;
154 
155 	/* Check input parameters */
156 	if ((check_params_create(p) != 0) ||
157 		(!rte_is_power_of_2(entry_size)) ||
158 		((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
159 		(sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) {
160 		return NULL;
161 	}
162 
163 	/*
164 	 * Table dimensioning
165 	 *
166 	 * Objective: Pick the number of buckets (n_buckets) so that there a chance
167 	 * to store n_keys keys in the table.
168 	 *
169 	 * Note: Since the buckets do not get extended, it is not possible to
170 	 * guarantee that n_keys keys can be stored in the table at any time. In the
171 	 * worst case scenario when all the n_keys fall into the same bucket, only
172 	 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
173 	 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
174 	 * n_keys to n_buckets ratio.
175 	 *
176 	 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
177 	 */
178 	n_buckets = rte_align32pow2(
179 		(p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
180 	n_buckets = RTE_MAX(n_buckets, p->n_buckets);
181 
182 	/* Memory allocation */
183 	table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
184 	key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
185 	bucket_sz = RTE_CACHE_LINE_ROUNDUP(n_buckets * sizeof(struct bucket));
186 	key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
187 	key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
188 	data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
189 	total_size = table_meta_sz + key_mask_sz + bucket_sz + key_sz +
190 		key_stack_sz + data_sz;
191 
192 	if (total_size > SIZE_MAX) {
193 		TABLE_LOG(ERR,
194 			"%s: Cannot allocate %" PRIu64 " bytes for hash "
195 			"table %s",
196 			__func__, total_size, p->name);
197 		return NULL;
198 	}
199 
200 	t = rte_zmalloc_socket(p->name,
201 		(size_t)total_size,
202 		RTE_CACHE_LINE_SIZE,
203 		socket_id);
204 	if (t == NULL) {
205 		TABLE_LOG(ERR,
206 			"%s: Cannot allocate %" PRIu64 " bytes for hash "
207 			"table %s",
208 			__func__, total_size, p->name);
209 		return NULL;
210 	}
211 	TABLE_LOG(INFO, "%s (%u-byte key): Hash table %s memory footprint"
212 		" is %" PRIu64 " bytes",
213 		__func__, p->key_size, p->name, total_size);
214 
215 	/* Memory initialization */
216 	t->key_size = p->key_size;
217 	t->entry_size = entry_size;
218 	t->n_keys = p->n_keys;
219 	t->n_buckets = n_buckets;
220 	t->f_hash = p->f_hash;
221 	t->seed = p->seed;
222 	t->key_offset = p->key_offset;
223 
224 	/* Internal */
225 	t->bucket_mask = t->n_buckets - 1;
226 	t->key_size_shl = rte_ctz32(p->key_size);
227 	t->data_size_shl = rte_ctz32(entry_size);
228 
229 	/* Tables */
230 	key_mask_offset = 0;
231 	bucket_offset = key_mask_offset + key_mask_sz;
232 	key_offset = bucket_offset + bucket_sz;
233 	key_stack_offset = key_offset + key_sz;
234 	data_offset = key_stack_offset + key_stack_sz;
235 
236 	t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
237 	t->buckets = (struct bucket *) &t->memory[bucket_offset];
238 	t->key_mem = &t->memory[key_offset];
239 	t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
240 	t->data_mem = &t->memory[data_offset];
241 
242 	/* Key mask */
243 	if (p->key_mask == NULL)
244 		memset(t->key_mask, 0xFF, p->key_size);
245 	else
246 		memcpy(t->key_mask, p->key_mask, p->key_size);
247 
248 	/* Key stack */
249 	for (i = 0; i < t->n_keys; i++)
250 		t->key_stack[i] = t->n_keys - 1 - i;
251 	t->key_stack_tos = t->n_keys;
252 
253 	/* LRU */
254 	for (i = 0; i < t->n_buckets; i++) {
255 		struct bucket *bkt = &t->buckets[i];
256 
257 		lru_init(bkt);
258 	}
259 
260 	return t;
261 }
262 
263 static int
264 rte_table_hash_lru_free(void *table)
265 {
266 	struct rte_table_hash *t = table;
267 
268 	/* Check input parameters */
269 	if (t == NULL)
270 		return -EINVAL;
271 
272 	rte_free(t);
273 	return 0;
274 }
275 
276 static int
277 rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
278 	int *key_found, void **entry_ptr)
279 {
280 	struct rte_table_hash *t = table;
281 	struct bucket *bkt;
282 	uint64_t sig;
283 	uint32_t bkt_index, i;
284 
285 	sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
286 	bkt_index = sig & t->bucket_mask;
287 	bkt = &t->buckets[bkt_index];
288 	sig = (sig >> 16) | 1LLU;
289 
290 	/* Key is present in the bucket */
291 	for (i = 0; i < KEYS_PER_BUCKET; i++) {
292 		uint64_t bkt_sig = (uint64_t) bkt->sig[i];
293 		uint32_t bkt_key_index = bkt->key_pos[i];
294 		uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
295 			t->key_size_shl];
296 
297 		if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
298 			t->key_size) == 0)) {
299 			uint8_t *data = &t->data_mem[bkt_key_index <<
300 				t->data_size_shl];
301 
302 			memcpy(data, entry, t->entry_size);
303 			lru_update(bkt, i);
304 			*key_found = 1;
305 			*entry_ptr = (void *) data;
306 			return 0;
307 		}
308 	}
309 
310 	/* Key is not present in the bucket */
311 	for (i = 0; i < KEYS_PER_BUCKET; i++) {
312 		uint64_t bkt_sig = (uint64_t) bkt->sig[i];
313 
314 		if (bkt_sig == 0) {
315 			uint32_t bkt_key_index;
316 			uint8_t *bkt_key, *data;
317 
318 			/* Allocate new key */
319 			if (t->key_stack_tos == 0) {
320 				/* No keys available */
321 				return -ENOSPC;
322 			}
323 			bkt_key_index = t->key_stack[--t->key_stack_tos];
324 
325 			/* Install new key */
326 			bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
327 			data = &t->data_mem[bkt_key_index << t->data_size_shl];
328 
329 			bkt->sig[i] = (uint16_t) sig;
330 			bkt->key_pos[i] = bkt_key_index;
331 			keycpy(bkt_key, key, t->key_mask, t->key_size);
332 			memcpy(data, entry, t->entry_size);
333 			lru_update(bkt, i);
334 
335 			*key_found = 0;
336 			*entry_ptr = (void *) data;
337 			return 0;
338 		}
339 	}
340 
341 	/* Bucket full */
342 	{
343 		uint64_t pos = lru_pos(bkt);
344 		uint32_t bkt_key_index = bkt->key_pos[pos];
345 		uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
346 			t->key_size_shl];
347 		uint8_t *data = &t->data_mem[bkt_key_index << t->data_size_shl];
348 
349 		bkt->sig[pos] = (uint16_t) sig;
350 		keycpy(bkt_key, key, t->key_mask, t->key_size);
351 		memcpy(data, entry, t->entry_size);
352 		lru_update(bkt, pos);
353 
354 		*key_found = 0;
355 		*entry_ptr = (void *) data;
356 		return 0;
357 	}
358 }
359 
360 static int
361 rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
362 	void *entry)
363 {
364 	struct rte_table_hash *t = table;
365 	struct bucket *bkt;
366 	uint64_t sig;
367 	uint32_t bkt_index, i;
368 
369 	sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
370 	bkt_index = sig & t->bucket_mask;
371 	bkt = &t->buckets[bkt_index];
372 	sig = (sig >> 16) | 1LLU;
373 
374 	/* Key is present in the bucket */
375 	for (i = 0; i < KEYS_PER_BUCKET; i++) {
376 		uint64_t bkt_sig = (uint64_t) bkt->sig[i];
377 		uint32_t bkt_key_index = bkt->key_pos[i];
378 		uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
379 			t->key_size_shl];
380 
381 		if ((sig == bkt_sig) &&
382 			(keycmp(bkt_key, key, t->key_mask, t->key_size) == 0)) {
383 			uint8_t *data = &t->data_mem[bkt_key_index <<
384 				t->data_size_shl];
385 
386 			bkt->sig[i] = 0;
387 			t->key_stack[t->key_stack_tos++] = bkt_key_index;
388 			*key_found = 1;
389 			if (entry)
390 				memcpy(entry, data, t->entry_size);
391 			return 0;
392 		}
393 	}
394 
395 	/* Key is not present in the bucket */
396 	*key_found = 0;
397 	return 0;
398 }
399 
400 static int rte_table_hash_lru_lookup_unoptimized(
401 	void *table,
402 	struct rte_mbuf **pkts,
403 	uint64_t pkts_mask,
404 	uint64_t *lookup_hit_mask,
405 	void **entries)
406 {
407 	struct rte_table_hash *t = (struct rte_table_hash *) table;
408 	uint64_t pkts_mask_out = 0;
409 
410 	__rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
411 	RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
412 
413 	for ( ; pkts_mask; ) {
414 		struct bucket *bkt;
415 		struct rte_mbuf *pkt;
416 		uint8_t *key;
417 		uint64_t pkt_mask, sig;
418 		uint32_t pkt_index, bkt_index, i;
419 
420 		pkt_index = rte_ctz64(pkts_mask);
421 		pkt_mask = 1LLU << pkt_index;
422 		pkts_mask &= ~pkt_mask;
423 
424 		pkt = pkts[pkt_index];
425 		key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
426 		sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
427 
428 		bkt_index = sig & t->bucket_mask;
429 		bkt = &t->buckets[bkt_index];
430 		sig = (sig >> 16) | 1LLU;
431 
432 		/* Key is present in the bucket */
433 		for (i = 0; i < KEYS_PER_BUCKET; i++) {
434 			uint64_t bkt_sig = (uint64_t) bkt->sig[i];
435 			uint32_t bkt_key_index = bkt->key_pos[i];
436 			uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
437 				t->key_size_shl];
438 
439 			if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
440 				t->key_size) == 0)) {
441 				uint8_t *data = &t->data_mem[bkt_key_index <<
442 					t->data_size_shl];
443 
444 				lru_update(bkt, i);
445 				pkts_mask_out |= pkt_mask;
446 				entries[pkt_index] = (void *) data;
447 				break;
448 			}
449 		}
450 	}
451 
452 	*lookup_hit_mask = pkts_mask_out;
453 	RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out));
454 	return 0;
455 }
456 
457 /*
458  * mask = match bitmask
459  * match = at least one match
460  * match_many = more than one match
461  * match_pos = position of first match
462  *
463  * ----------------------------------------
464  * mask		 match	 match_many	  match_pos
465  * ----------------------------------------
466  * 0000		 0		 0			  00
467  * 0001		 1		 0			  00
468  * 0010		 1		 0			  01
469  * 0011		 1		 1			  00
470  * ----------------------------------------
471  * 0100		 1		 0			  10
472  * 0101		 1		 1			  00
473  * 0110		 1		 1			  01
474  * 0111		 1		 1			  00
475  * ----------------------------------------
476  * 1000		 1		 0			  11
477  * 1001		 1		 1			  00
478  * 1010		 1		 1			  01
479  * 1011		 1		 1			  00
480  * ----------------------------------------
481  * 1100		 1		 1			  10
482  * 1101		 1		 1			  00
483  * 1110		 1		 1			  01
484  * 1111		 1		 1			  00
485  * ----------------------------------------
486  *
487  * match = 1111_1111_1111_1110
488  * match_many = 1111_1110_1110_1000
489  * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
490  *
491  * match = 0xFFFELLU
492  * match_many = 0xFEE8LLU
493  * match_pos = 0x12131210LLU
494  */
495 
496 #define LUT_MATCH						0xFFFELLU
497 #define LUT_MATCH_MANY						0xFEE8LLU
498 #define LUT_MATCH_POS						0x12131210LLU
499 
500 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos)\
501 {								\
502 	uint64_t bucket_sig[4], mask[4], mask_all;		\
503 								\
504 	bucket_sig[0] = bucket->sig[0];				\
505 	bucket_sig[1] = bucket->sig[1];				\
506 	bucket_sig[2] = bucket->sig[2];				\
507 	bucket_sig[3] = bucket->sig[3];				\
508 								\
509 	bucket_sig[0] ^= mbuf_sig;				\
510 	bucket_sig[1] ^= mbuf_sig;				\
511 	bucket_sig[2] ^= mbuf_sig;				\
512 	bucket_sig[3] ^= mbuf_sig;				\
513 								\
514 	mask[0] = 0;						\
515 	mask[1] = 0;						\
516 	mask[2] = 0;						\
517 	mask[3] = 0;						\
518 								\
519 	if (bucket_sig[0] == 0)					\
520 		mask[0] = 1;					\
521 	if (bucket_sig[1] == 0)					\
522 		mask[1] = 2;					\
523 	if (bucket_sig[2] == 0)					\
524 		mask[2] = 4;					\
525 	if (bucket_sig[3] == 0)					\
526 		mask[3] = 8;					\
527 								\
528 	mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]);	\
529 								\
530 	match = (LUT_MATCH >> mask_all) & 1;			\
531 	match_many = (LUT_MATCH_MANY >> mask_all) & 1;		\
532 	match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3;	\
533 }
534 
535 #define lookup_cmp_key(mbuf, key, match_key, f)				\
536 {									\
537 	uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
538 	uint64_t *bkt_key = (uint64_t *) key;				\
539 	uint64_t *key_mask = f->key_mask;					\
540 									\
541 	switch (f->key_size) {						\
542 	case 8:								\
543 	{								\
544 		uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];	\
545 		match_key = 0;						\
546 		if (xor == 0)						\
547 			match_key = 1;					\
548 	}								\
549 	break;								\
550 									\
551 	case 16:							\
552 	{								\
553 		uint64_t xor[2], or;					\
554 									\
555 		xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];		\
556 		xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];		\
557 		or = xor[0] | xor[1];					\
558 		match_key = 0;						\
559 		if (or == 0)						\
560 			match_key = 1;					\
561 	}								\
562 	break;								\
563 									\
564 	case 32:							\
565 	{								\
566 		uint64_t xor[4], or;					\
567 									\
568 		xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];		\
569 		xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];		\
570 		xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2];		\
571 		xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3];		\
572 		or = xor[0] | xor[1] | xor[2] | xor[3];			\
573 		match_key = 0;						\
574 		if (or == 0)						\
575 			match_key = 1;					\
576 	}								\
577 	break;								\
578 									\
579 	case 64:							\
580 	{								\
581 		uint64_t xor[8], or;					\
582 									\
583 		xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];		\
584 		xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];		\
585 		xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2];		\
586 		xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3];		\
587 		xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4];		\
588 		xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5];		\
589 		xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6];		\
590 		xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7];		\
591 		or = xor[0] | xor[1] | xor[2] | xor[3] |		\
592 			xor[4] | xor[5] | xor[6] | xor[7];		\
593 		match_key = 0;						\
594 		if (or == 0)						\
595 			match_key = 1;					\
596 	}								\
597 	break;								\
598 									\
599 	default:							\
600 		match_key = 0;						\
601 		if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0)	\
602 			match_key = 1;					\
603 	}								\
604 }
605 
606 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index)\
607 {								\
608 	uint64_t pkt00_mask, pkt01_mask;			\
609 	struct rte_mbuf *mbuf00, *mbuf01;			\
610 	uint32_t key_offset = t->key_offset;		\
611 								\
612 	pkt00_index = rte_ctz64(pkts_mask);		\
613 	pkt00_mask = 1LLU << pkt00_index;			\
614 	pkts_mask &= ~pkt00_mask;				\
615 	mbuf00 = pkts[pkt00_index];				\
616 								\
617 	pkt01_index = rte_ctz64(pkts_mask);		\
618 	pkt01_mask = 1LLU << pkt01_index;			\
619 	pkts_mask &= ~pkt01_mask;				\
620 	mbuf01 = pkts[pkt01_index];				\
621 								\
622 	rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
623 	rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
624 }
625 
626 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
627 	pkt01_index)						\
628 {								\
629 	uint64_t pkt00_mask, pkt01_mask;			\
630 	struct rte_mbuf *mbuf00, *mbuf01;			\
631 	uint32_t key_offset = t->key_offset;		\
632 								\
633 	pkt00_index = rte_ctz64(pkts_mask);		\
634 	pkt00_mask = 1LLU << pkt00_index;			\
635 	pkts_mask &= ~pkt00_mask;				\
636 	mbuf00 = pkts[pkt00_index];				\
637 								\
638 	pkt01_index = rte_ctz64(pkts_mask);		\
639 	if (pkts_mask == 0)					\
640 		pkt01_index = pkt00_index;			\
641 								\
642 	pkt01_mask = 1LLU << pkt01_index;			\
643 	pkts_mask &= ~pkt01_mask;				\
644 	mbuf01 = pkts[pkt01_index];				\
645 								\
646 	rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
647 	rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
648 }
649 
650 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index)\
651 {								\
652 	struct grinder *g10, *g11;				\
653 	uint64_t sig10, sig11, bkt10_index, bkt11_index;	\
654 	struct rte_mbuf *mbuf10, *mbuf11;			\
655 	struct bucket *bkt10, *bkt11, *buckets = t->buckets;	\
656 	uint8_t *key10, *key11;					\
657 	uint64_t bucket_mask = t->bucket_mask;			\
658 	rte_table_hash_op_hash f_hash = t->f_hash;		\
659 	uint64_t seed = t->seed;				\
660 	uint32_t key_size = t->key_size;			\
661 	uint32_t key_offset = t->key_offset;			\
662 								\
663 	mbuf10 = pkts[pkt10_index];				\
664 	key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset);\
665 	sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed);\
666 	bkt10_index = sig10 & bucket_mask;			\
667 	bkt10 = &buckets[bkt10_index];				\
668 								\
669 	mbuf11 = pkts[pkt11_index];				\
670 	key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset);\
671 	sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed);\
672 	bkt11_index = sig11 & bucket_mask;			\
673 	bkt11 = &buckets[bkt11_index];				\
674 								\
675 	rte_prefetch0(bkt10);					\
676 	rte_prefetch0(bkt11);					\
677 								\
678 	g10 = &g[pkt10_index];					\
679 	g10->sig = sig10;					\
680 	g10->bkt = bkt10;					\
681 								\
682 	g11 = &g[pkt11_index];					\
683 	g11->sig = sig11;					\
684 	g11->bkt = bkt11;					\
685 }
686 
687 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
688 {								\
689 	struct grinder *g20, *g21;				\
690 	uint64_t sig20, sig21;					\
691 	struct bucket *bkt20, *bkt21;				\
692 	uint8_t *key20, *key21, *key_mem = t->key_mem;		\
693 	uint64_t match20, match21, match_many20, match_many21;	\
694 	uint64_t match_pos20, match_pos21;			\
695 	uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
696 								\
697 	g20 = &g[pkt20_index];					\
698 	sig20 = g20->sig;					\
699 	bkt20 = g20->bkt;					\
700 	sig20 = (sig20 >> 16) | 1LLU;				\
701 	lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
702 	match20 <<= pkt20_index;				\
703 	match_many20 <<= pkt20_index;				\
704 	key20_index = bkt20->key_pos[match_pos20];		\
705 	key20 = &key_mem[key20_index << key_size_shl];		\
706 								\
707 	g21 = &g[pkt21_index];					\
708 	sig21 = g21->sig;					\
709 	bkt21 = g21->bkt;					\
710 	sig21 = (sig21 >> 16) | 1LLU;				\
711 	lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
712 	match21 <<= pkt21_index;				\
713 	match_many21 <<= pkt21_index;				\
714 	key21_index = bkt21->key_pos[match_pos21];		\
715 	key21 = &key_mem[key21_index << key_size_shl];		\
716 								\
717 	rte_prefetch0(key20);					\
718 	rte_prefetch0(key21);					\
719 								\
720 	pkts_mask_match_many |= match_many20 | match_many21;	\
721 								\
722 	g20->match = match20;					\
723 	g20->match_pos = match_pos20;				\
724 	g20->key_index = key20_index;				\
725 								\
726 	g21->match = match21;					\
727 	g21->match_pos = match_pos21;				\
728 	g21->key_index = key21_index;				\
729 }
730 
731 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
732 	entries)						\
733 {								\
734 	struct grinder *g30, *g31;				\
735 	struct rte_mbuf *mbuf30, *mbuf31;			\
736 	struct bucket *bkt30, *bkt31;				\
737 	uint8_t *key30, *key31, *key_mem = t->key_mem;		\
738 	uint8_t *data30, *data31, *data_mem = t->data_mem;	\
739 	uint64_t match30, match31, match_pos30, match_pos31;	\
740 	uint64_t match_key30, match_key31, match_keys;		\
741 	uint32_t key30_index, key31_index;			\
742 	uint32_t key_size_shl = t->key_size_shl;		\
743 	uint32_t data_size_shl = t->data_size_shl;		\
744 								\
745 	mbuf30 = pkts[pkt30_index];				\
746 	g30 = &g[pkt30_index];					\
747 	bkt30 = g30->bkt;					\
748 	match30 = g30->match;					\
749 	match_pos30 = g30->match_pos;				\
750 	key30_index = g30->key_index;				\
751 	key30 = &key_mem[key30_index << key_size_shl];		\
752 	lookup_cmp_key(mbuf30, key30, match_key30, t);		\
753 	match_key30 <<= pkt30_index;				\
754 	match_key30 &= match30;					\
755 	data30 = &data_mem[key30_index << data_size_shl];	\
756 	entries[pkt30_index] = data30;				\
757 								\
758 	mbuf31 = pkts[pkt31_index];				\
759 	g31 = &g[pkt31_index];					\
760 	bkt31 = g31->bkt;					\
761 	match31 = g31->match;					\
762 	match_pos31 = g31->match_pos;				\
763 	key31_index = g31->key_index;				\
764 	key31 = &key_mem[key31_index << key_size_shl];		\
765 	lookup_cmp_key(mbuf31, key31, match_key31, t);		\
766 	match_key31 <<= pkt31_index;				\
767 	match_key31 &= match31;					\
768 	data31 = &data_mem[key31_index << data_size_shl];	\
769 	entries[pkt31_index] = data31;				\
770 								\
771 	rte_prefetch0(data30);					\
772 	rte_prefetch0(data31);					\
773 								\
774 	match_keys = match_key30 | match_key31;			\
775 	pkts_mask_out |= match_keys;				\
776 								\
777 	if (match_key30 == 0)					\
778 		match_pos30 = 4;				\
779 	lru_update(bkt30, match_pos30);				\
780 								\
781 	if (match_key31 == 0)					\
782 		match_pos31 = 4;				\
783 	lru_update(bkt31, match_pos31);				\
784 }
785 
786 /*
787  * The lookup function implements a 4-stage pipeline, with each stage processing
788  * two different packets. The purpose of pipelined implementation is to hide the
789  * latency of prefetching the data structures and loosen the data dependency
790  * between instructions.
791  *
792  *   p00  _______   p10  _______   p20  _______   p30  _______
793  * ----->|       |----->|       |----->|       |----->|       |----->
794  *       |   0   |      |   1   |      |   2   |      |   3   |
795  * ----->|_______|----->|_______|----->|_______|----->|_______|----->
796  *   p01            p11            p21            p31
797  *
798  * The naming convention is:
799  *	  pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
800  */
801 static int rte_table_hash_lru_lookup(
802 	void *table,
803 	struct rte_mbuf **pkts,
804 	uint64_t pkts_mask,
805 	uint64_t *lookup_hit_mask,
806 	void **entries)
807 {
808 	struct rte_table_hash *t = (struct rte_table_hash *) table;
809 	struct grinder *g = t->grinders;
810 	uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
811 	uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
812 	uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
813 	int status = 0;
814 
815 	__rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
816 	RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
817 
818 	/* Cannot run the pipeline with less than 7 packets */
819 	if (rte_popcount64(pkts_mask) < 7)
820 		return rte_table_hash_lru_lookup_unoptimized(table, pkts,
821 			pkts_mask, lookup_hit_mask, entries);
822 
823 	/* Pipeline stage 0 */
824 	lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
825 
826 	/* Pipeline feed */
827 	pkt10_index = pkt00_index;
828 	pkt11_index = pkt01_index;
829 
830 	/* Pipeline stage 0 */
831 	lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
832 
833 	/* Pipeline stage 1 */
834 	lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
835 
836 	/* Pipeline feed */
837 	pkt20_index = pkt10_index;
838 	pkt21_index = pkt11_index;
839 	pkt10_index = pkt00_index;
840 	pkt11_index = pkt01_index;
841 
842 	/* Pipeline stage 0 */
843 	lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
844 
845 	/* Pipeline stage 1 */
846 	lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
847 
848 	/* Pipeline stage 2 */
849 	lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
850 
851 	/*
852 	* Pipeline run
853 	*
854 	*/
855 	for ( ; pkts_mask; ) {
856 		/* Pipeline feed */
857 		pkt30_index = pkt20_index;
858 		pkt31_index = pkt21_index;
859 		pkt20_index = pkt10_index;
860 		pkt21_index = pkt11_index;
861 		pkt10_index = pkt00_index;
862 		pkt11_index = pkt01_index;
863 
864 		/* Pipeline stage 0 */
865 		lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
866 			pkt00_index, pkt01_index);
867 
868 		/* Pipeline stage 1 */
869 		lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
870 
871 		/* Pipeline stage 2 */
872 		lookup2_stage2(t, g, pkt20_index, pkt21_index,
873 			pkts_mask_match_many);
874 
875 		/* Pipeline stage 3 */
876 		lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
877 			pkts_mask_out, entries);
878 	}
879 
880 	/* Pipeline feed */
881 	pkt30_index = pkt20_index;
882 	pkt31_index = pkt21_index;
883 	pkt20_index = pkt10_index;
884 	pkt21_index = pkt11_index;
885 	pkt10_index = pkt00_index;
886 	pkt11_index = pkt01_index;
887 
888 	/* Pipeline stage 1 */
889 	lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
890 
891 	/* Pipeline stage 2 */
892 	lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
893 
894 	/* Pipeline stage 3 */
895 	lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
896 		entries);
897 
898 	/* Pipeline feed */
899 	pkt30_index = pkt20_index;
900 	pkt31_index = pkt21_index;
901 	pkt20_index = pkt10_index;
902 	pkt21_index = pkt11_index;
903 
904 	/* Pipeline stage 2 */
905 	lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
906 
907 	/* Pipeline stage 3 */
908 	lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
909 		entries);
910 
911 	/* Pipeline feed */
912 	pkt30_index = pkt20_index;
913 	pkt31_index = pkt21_index;
914 
915 	/* Pipeline stage 3 */
916 	lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
917 		entries);
918 
919 	/* Slow path */
920 	pkts_mask_match_many &= ~pkts_mask_out;
921 	if (pkts_mask_match_many) {
922 		uint64_t pkts_mask_out_slow = 0;
923 
924 		status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
925 			pkts_mask_match_many, &pkts_mask_out_slow, entries);
926 		pkts_mask_out |= pkts_mask_out_slow;
927 	}
928 
929 	*lookup_hit_mask = pkts_mask_out;
930 	RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - rte_popcount64(pkts_mask_out));
931 	return status;
932 }
933 
934 static int
935 rte_table_hash_lru_stats_read(void *table, struct rte_table_stats *stats, int clear)
936 {
937 	struct rte_table_hash *t = table;
938 
939 	if (stats != NULL)
940 		memcpy(stats, &t->stats, sizeof(t->stats));
941 
942 	if (clear)
943 		memset(&t->stats, 0, sizeof(t->stats));
944 
945 	return 0;
946 }
947 
948 struct rte_table_ops rte_table_hash_lru_ops = {
949 	.f_create = rte_table_hash_lru_create,
950 	.f_free = rte_table_hash_lru_free,
951 	.f_add = rte_table_hash_lru_entry_add,
952 	.f_delete = rte_table_hash_lru_entry_delete,
953 	.f_add_bulk = NULL,
954 	.f_delete_bulk = NULL,
955 	.f_lookup = rte_table_hash_lru_lookup,
956 	.f_stats = rte_table_hash_lru_stats_read,
957 };
958