1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
3 */
4 #include <string.h>
5 #include <stdio.h>
6 #include <errno.h>
7
8 #include <rte_common.h>
9 #include <rte_prefetch.h>
10 #include <rte_jhash.h>
11 #include <rte_hash_crc.h>
12
13 #include "rte_swx_keycmp.h"
14 #include "rte_swx_table_em.h"
15
16 #define CHECK(condition, err_code) \
17 do { \
18 if (!(condition)) \
19 return -(err_code); \
20 } while (0)
21
22 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
23 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
24 #endif
25
26 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
27
28 #include <rte_malloc.h>
29
30 static void *
env_malloc(size_t size,size_t alignment,int numa_node)31 env_malloc(size_t size, size_t alignment, int numa_node)
32 {
33 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
34 }
35
36 static void
env_free(void * start,size_t size __rte_unused)37 env_free(void *start, size_t size __rte_unused)
38 {
39 rte_free(start);
40 }
41
42 #else
43
44 #include <numa.h>
45
46 static void *
env_malloc(size_t size,size_t alignment __rte_unused,int numa_node)47 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
48 {
49 return numa_alloc_onnode(size, numa_node);
50 }
51
52 static void
env_free(void * start,size_t size)53 env_free(void *start, size_t size)
54 {
55 numa_free(start, size);
56 }
57
58 #endif
59
60 static void
keycpy(void * dst,void * src,uint32_t n_bytes)61 keycpy(void *dst, void *src, uint32_t n_bytes)
62 {
63 memcpy(dst, src, n_bytes);
64 }
65
66 #define KEYS_PER_BUCKET 4
67
68 struct bucket_extension {
69 struct bucket_extension *next;
70 uint16_t sig[KEYS_PER_BUCKET];
71 uint32_t key_id[KEYS_PER_BUCKET];
72 };
73
74 struct table {
75 /* Input parameters */
76 struct rte_swx_table_params params;
77
78 /* Internal. */
79 uint32_t key_size_shl;
80 uint32_t data_size_shl;
81 uint32_t n_buckets;
82 uint32_t n_buckets_ext;
83 uint32_t key_stack_tos;
84 uint32_t bkt_ext_stack_tos;
85 uint64_t total_size;
86 rte_swx_keycmp_func_t keycmp_func;
87
88 /* Memory arrays. */
89 struct bucket_extension *buckets;
90 struct bucket_extension *buckets_ext;
91 uint8_t *keys;
92 uint32_t *key_stack;
93 uint32_t *bkt_ext_stack;
94 uint8_t *data;
95 };
96
97 static inline uint8_t *
table_key(struct table * t,uint32_t key_id)98 table_key(struct table *t, uint32_t key_id)
99 {
100 return &t->keys[(uint64_t)key_id << t->key_size_shl];
101 }
102
103 static inline uint64_t *
table_key_data(struct table * t,uint32_t key_id)104 table_key_data(struct table *t, uint32_t key_id)
105 {
106 return (uint64_t *)&t->data[(uint64_t)key_id << t->data_size_shl];
107 }
108
109 static inline int
bkt_is_empty(struct bucket_extension * bkt)110 bkt_is_empty(struct bucket_extension *bkt)
111 {
112 return (!bkt->sig[0] && !bkt->sig[1] && !bkt->sig[2] && !bkt->sig[3]) ? 1 : 0;
113 }
114
115 /* Return:
116 * 0 = Bucket key position is NOT empty;
117 * 1 = Bucket key position is empty.
118 */
119 static inline int
bkt_key_is_empty(struct bucket_extension * bkt,uint32_t bkt_pos)120 bkt_key_is_empty(struct bucket_extension *bkt, uint32_t bkt_pos)
121 {
122 return bkt->sig[bkt_pos] ? 0 : 1;
123 }
124
125 /* Return: 0 = Keys are NOT equal; 1 = Keys are equal. */
126 static inline int
bkt_keycmp(struct table * t,struct bucket_extension * bkt,uint8_t * input_key,uint32_t bkt_pos,uint32_t input_sig)127 bkt_keycmp(struct table *t,
128 struct bucket_extension *bkt,
129 uint8_t *input_key,
130 uint32_t bkt_pos,
131 uint32_t input_sig)
132 {
133 uint32_t bkt_key_id;
134 uint8_t *bkt_key;
135
136 /* Key signature comparison. */
137 if (input_sig != bkt->sig[bkt_pos])
138 return 0;
139
140 /* Key comparison. */
141 bkt_key_id = bkt->key_id[bkt_pos];
142 bkt_key = table_key(t, bkt_key_id);
143 return t->keycmp_func(bkt_key, input_key, t->params.key_size);
144 }
145
146 static inline void
bkt_key_install(struct table * t,struct bucket_extension * bkt,struct rte_swx_table_entry * input,uint32_t bkt_pos,uint32_t bkt_key_id,uint32_t input_sig)147 bkt_key_install(struct table *t,
148 struct bucket_extension *bkt,
149 struct rte_swx_table_entry *input,
150 uint32_t bkt_pos,
151 uint32_t bkt_key_id,
152 uint32_t input_sig)
153 {
154 uint8_t *bkt_key;
155 uint64_t *bkt_data;
156
157 /* Key signature. */
158 bkt->sig[bkt_pos] = (uint16_t)input_sig;
159
160 /* Key. */
161 bkt->key_id[bkt_pos] = bkt_key_id;
162 bkt_key = table_key(t, bkt_key_id);
163 keycpy(bkt_key, input->key, t->params.key_size);
164
165 /* Key data. */
166 bkt_data = table_key_data(t, bkt_key_id);
167 bkt_data[0] = input->action_id;
168 if (t->params.action_data_size && input->action_data)
169 memcpy(&bkt_data[1], input->action_data, t->params.action_data_size);
170 }
171
172 static inline void
bkt_key_data_update(struct table * t,struct bucket_extension * bkt,struct rte_swx_table_entry * input,uint32_t bkt_pos)173 bkt_key_data_update(struct table *t,
174 struct bucket_extension *bkt,
175 struct rte_swx_table_entry *input,
176 uint32_t bkt_pos)
177 {
178 uint32_t bkt_key_id;
179 uint64_t *bkt_data;
180
181 /* Key. */
182 bkt_key_id = bkt->key_id[bkt_pos];
183
184 /* Key data. */
185 bkt_data = table_key_data(t, bkt_key_id);
186 bkt_data[0] = input->action_id;
187 if (t->params.action_data_size && input->action_data)
188 memcpy(&bkt_data[1], input->action_data, t->params.action_data_size);
189 }
190
191 #define CL RTE_CACHE_LINE_ROUNDUP
192
193 static int
__table_create(struct table ** table,uint64_t * memory_footprint,struct rte_swx_table_params * params,const char * args __rte_unused,int numa_node)194 __table_create(struct table **table,
195 uint64_t *memory_footprint,
196 struct rte_swx_table_params *params,
197 const char *args __rte_unused,
198 int numa_node)
199 {
200 struct table *t;
201 uint8_t *memory;
202 size_t table_meta_sz, bucket_sz, bucket_ext_sz, key_sz,
203 key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
204 size_t bucket_offset, bucket_ext_offset, key_offset,
205 key_stack_offset, bkt_ext_stack_offset, data_offset;
206 uint32_t key_size, key_data_size, n_buckets, n_buckets_ext, i;
207
208 /* Check input arguments. */
209 CHECK(params, EINVAL);
210 CHECK(params->match_type == RTE_SWX_TABLE_MATCH_EXACT, EINVAL);
211 CHECK(params->key_size, EINVAL);
212
213 if (params->key_mask0) {
214 for (i = 0; i < params->key_size; i++)
215 if (params->key_mask0[i] != 0xFF)
216 break;
217
218 CHECK(i == params->key_size, EINVAL);
219 }
220
221 CHECK(params->n_keys_max, EINVAL);
222
223 /* Memory allocation. */
224 key_size = rte_align64pow2(params->key_size);
225 key_data_size = rte_align64pow2(params->action_data_size + 8);
226 n_buckets = rte_align64pow2((params->n_keys_max + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
227 n_buckets_ext = n_buckets;
228
229 table_meta_sz = CL(sizeof(struct table));
230 bucket_sz = CL(n_buckets * sizeof(struct bucket_extension));
231 bucket_ext_sz = CL(n_buckets_ext * sizeof(struct bucket_extension));
232 key_sz = CL(params->n_keys_max * key_size);
233 key_stack_sz = CL(params->n_keys_max * sizeof(uint32_t));
234 bkt_ext_stack_sz = CL(n_buckets_ext * sizeof(uint32_t));
235 data_sz = CL(params->n_keys_max * key_data_size);
236 total_size = table_meta_sz + bucket_sz + bucket_ext_sz +
237 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
238
239 bucket_offset = table_meta_sz;
240 bucket_ext_offset = bucket_offset + bucket_sz;
241 key_offset = bucket_ext_offset + bucket_ext_sz;
242 key_stack_offset = key_offset + key_sz;
243 bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
244 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
245
246 if (!table) {
247 if (memory_footprint)
248 *memory_footprint = total_size;
249 return 0;
250 }
251
252 memory = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
253 CHECK(memory, ENOMEM);
254 memset(memory, 0, total_size);
255
256 /* Initialization. */
257 t = (struct table *)memory;
258 memcpy(&t->params, params, sizeof(*params));
259 t->params.key_mask0 = NULL;
260 if (!params->hash_func)
261 t->params.hash_func = rte_hash_crc;
262
263 t->key_size_shl = rte_ctz32(key_size);
264 t->data_size_shl = rte_ctz32(key_data_size);
265 t->n_buckets = n_buckets;
266 t->n_buckets_ext = n_buckets_ext;
267 t->total_size = total_size;
268 t->keycmp_func = rte_swx_keycmp_func_get(params->key_size);
269
270 t->buckets = (struct bucket_extension *)&memory[bucket_offset];
271 t->buckets_ext = (struct bucket_extension *)&memory[bucket_ext_offset];
272 t->keys = &memory[key_offset];
273 t->key_stack = (uint32_t *)&memory[key_stack_offset];
274 t->bkt_ext_stack = (uint32_t *)&memory[bkt_ext_stack_offset];
275 t->data = &memory[data_offset];
276
277 for (i = 0; i < t->params.n_keys_max; i++)
278 t->key_stack[i] = t->params.n_keys_max - 1 - i;
279 t->key_stack_tos = t->params.n_keys_max;
280
281 for (i = 0; i < n_buckets_ext; i++)
282 t->bkt_ext_stack[i] = n_buckets_ext - 1 - i;
283 t->bkt_ext_stack_tos = n_buckets_ext;
284
285 *table = t;
286 return 0;
287 }
288
289 static void
table_free(void * table)290 table_free(void *table)
291 {
292 struct table *t = table;
293
294 if (!t)
295 return;
296
297 env_free(t, t->total_size);
298 }
299
300 static int
table_add(void * table,struct rte_swx_table_entry * entry)301 table_add(void *table, struct rte_swx_table_entry *entry)
302 {
303 struct table *t = table;
304 struct bucket_extension *bkt0, *bkt, *bkt_prev;
305 uint32_t input_sig, bkt_id, i;
306
307 CHECK(t, EINVAL);
308 CHECK(entry, EINVAL);
309 CHECK(entry->key, EINVAL);
310
311 input_sig = t->params.hash_func(entry->key, t->params.key_size, 0);
312 bkt_id = input_sig & (t->n_buckets - 1);
313 bkt0 = &t->buckets[bkt_id];
314 input_sig = (input_sig >> 16) | 1;
315
316 /* Key is present in the bucket. */
317 for (bkt = bkt0; bkt; bkt = bkt->next)
318 for (i = 0; i < KEYS_PER_BUCKET; i++)
319 if (bkt_keycmp(t, bkt, entry->key, i, input_sig)) {
320 bkt_key_data_update(t, bkt, entry, i);
321 return 0;
322 }
323
324 /* Key is not present in the bucket. Bucket not full. */
325 for (bkt = bkt0, bkt_prev = NULL; bkt; bkt_prev = bkt, bkt = bkt->next)
326 for (i = 0; i < KEYS_PER_BUCKET; i++)
327 if (bkt_key_is_empty(bkt, i)) {
328 uint32_t new_bkt_key_id;
329
330 /* Allocate new key & install. */
331 CHECK(t->key_stack_tos, ENOSPC);
332 new_bkt_key_id = t->key_stack[--t->key_stack_tos];
333 bkt_key_install(t, bkt, entry, i, new_bkt_key_id, input_sig);
334 return 0;
335 }
336
337 /* Bucket full: extend bucket. */
338 if (t->bkt_ext_stack_tos && t->key_stack_tos) {
339 struct bucket_extension *new_bkt;
340 uint32_t new_bkt_id, new_bkt_key_id;
341
342 /* Allocate new bucket extension & install. */
343 new_bkt_id = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
344 new_bkt = &t->buckets_ext[new_bkt_id];
345 memset(new_bkt, 0, sizeof(*new_bkt));
346 bkt_prev->next = new_bkt;
347
348 /* Allocate new key & install. */
349 new_bkt_key_id = t->key_stack[--t->key_stack_tos];
350 bkt_key_install(t, new_bkt, entry, 0, new_bkt_key_id, input_sig);
351 return 0;
352 }
353
354 CHECK(0, ENOSPC);
355 }
356
357 static int
table_del(void * table,struct rte_swx_table_entry * entry)358 table_del(void *table, struct rte_swx_table_entry *entry)
359 {
360 struct table *t = table;
361 struct bucket_extension *bkt0, *bkt, *bkt_prev;
362 uint32_t input_sig, bkt_id, i;
363
364 CHECK(t, EINVAL);
365 CHECK(entry, EINVAL);
366 CHECK(entry->key, EINVAL);
367
368 input_sig = t->params.hash_func(entry->key, t->params.key_size, 0);
369 bkt_id = input_sig & (t->n_buckets - 1);
370 bkt0 = &t->buckets[bkt_id];
371 input_sig = (input_sig >> 16) | 1;
372
373 /* Key is present in the bucket. */
374 for (bkt = bkt0, bkt_prev = NULL; bkt; bkt_prev = bkt, bkt = bkt->next)
375 for (i = 0; i < KEYS_PER_BUCKET; i++)
376 if (bkt_keycmp(t, bkt, entry->key, i, input_sig)) {
377 /* Key free. */
378 bkt->sig[i] = 0;
379 t->key_stack[t->key_stack_tos++] = bkt->key_id[i];
380
381 /* Bucket extension free if empty and not the 1st in bucket. */
382 if (bkt_prev && bkt_is_empty(bkt)) {
383 bkt_prev->next = bkt->next;
384 bkt_id = bkt - t->buckets_ext;
385 t->bkt_ext_stack[t->bkt_ext_stack_tos++] = bkt_id;
386 }
387
388 return 0;
389 }
390
391 return 0;
392 }
393
394 static uint64_t
table_mailbox_size_get_unoptimized(void)395 table_mailbox_size_get_unoptimized(void)
396 {
397 return 0;
398 }
399
400 static int
table_lookup_unoptimized(void * table,void * mailbox __rte_unused,uint8_t ** key,uint64_t * action_id,uint8_t ** action_data,size_t * entry_id,int * hit)401 table_lookup_unoptimized(void *table,
402 void *mailbox __rte_unused,
403 uint8_t **key,
404 uint64_t *action_id,
405 uint8_t **action_data,
406 size_t *entry_id,
407 int *hit)
408 {
409 struct table *t = table;
410 struct bucket_extension *bkt0, *bkt;
411 uint8_t *input_key;
412 uint32_t input_sig, bkt_id, i;
413
414 input_key = &(*key)[t->params.key_offset];
415
416 input_sig = t->params.hash_func(input_key, t->params.key_size, 0);
417 bkt_id = input_sig & (t->n_buckets - 1);
418 bkt0 = &t->buckets[bkt_id];
419 input_sig = (input_sig >> 16) | 1;
420
421 /* Key is present in the bucket. */
422 for (bkt = bkt0; bkt; bkt = bkt->next)
423 for (i = 0; i < KEYS_PER_BUCKET; i++)
424 if (bkt_keycmp(t, bkt, input_key, i, input_sig)) {
425 uint32_t bkt_key_id;
426 uint64_t *bkt_data;
427
428 /* Key. */
429 bkt_key_id = bkt->key_id[i];
430
431 /* Key data. */
432 bkt_data = table_key_data(t, bkt_key_id);
433 *action_id = bkt_data[0];
434 *action_data = (uint8_t *)&bkt_data[1];
435 *entry_id = bkt_key_id;
436 *hit = 1;
437 return 1;
438 }
439
440 *hit = 0;
441 return 1;
442 }
443
444 struct mailbox {
445 struct bucket_extension *bkt;
446 uint32_t input_sig;
447 uint32_t bkt_key_id;
448 uint32_t sig_match;
449 uint32_t sig_match_many;
450 int state;
451 };
452
453 static uint64_t
table_mailbox_size_get(void)454 table_mailbox_size_get(void)
455 {
456 return sizeof(struct mailbox);
457 }
458
459 /*
460 * mask = match bitmask
461 * match = at least one match
462 * match_many = more than one match
463 * match_pos = position of first match
464 *
465 *+------+-------+------------+-----------+
466 *| mask | match | match_many | match_pos |
467 *+------+-------+------------+-----------+
468 *| 0000 | 0 | 0 | 00 |
469 *| 0001 | 1 | 0 | 00 |
470 *| 0010 | 1 | 0 | 01 |
471 *| 0011 | 1 | 1 | 00 |
472 *+------+-------+------------+-----------+
473 *| 0100 | 1 | 0 | 10 |
474 *| 0101 | 1 | 1 | 00 |
475 *| 0110 | 1 | 1 | 01 |
476 *| 0111 | 1 | 1 | 00 |
477 *+------+-------+------------+-----------+
478 *| 1000 | 1 | 0 | 11 |
479 *| 1001 | 1 | 1 | 00 |
480 *| 1010 | 1 | 1 | 01 |
481 *| 1011 | 1 | 1 | 00 |
482 *+------+-------+------------+-----------+
483 *| 1100 | 1 | 1 | 10 |
484 *| 1101 | 1 | 1 | 00 |
485 *| 1110 | 1 | 1 | 01 |
486 *| 1111 | 1 | 1 | 00 |
487 *+------+-------+------------+-----------+
488 *
489 * match = 1111_1111_1111_1110 = 0xFFFE
490 * match_many = 1111_1110_1110_1000 = 0xFEE8
491 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000 = 0x12131210
492 */
493
494 #define LUT_MATCH 0xFFFE
495 #define LUT_MATCH_MANY 0xFEE8
496 #define LUT_MATCH_POS 0x12131210
497
498 static int
table_lookup(void * table,void * mailbox,uint8_t ** key,uint64_t * action_id,uint8_t ** action_data,size_t * entry_id,int * hit)499 table_lookup(void *table,
500 void *mailbox,
501 uint8_t **key,
502 uint64_t *action_id,
503 uint8_t **action_data,
504 size_t *entry_id,
505 int *hit)
506 {
507 struct table *t = table;
508 struct mailbox *m = mailbox;
509
510 switch (m->state) {
511 case 0: {
512 uint8_t *input_key = &(*key)[t->params.key_offset];
513 struct bucket_extension *bkt;
514 uint32_t input_sig, bkt_id;
515
516 input_sig = t->params.hash_func(input_key, t->params.key_size, 0);
517 bkt_id = input_sig & (t->n_buckets - 1);
518 bkt = &t->buckets[bkt_id];
519 rte_prefetch0(bkt);
520
521 m->bkt = bkt;
522 m->input_sig = (input_sig >> 16) | 1;
523 m->state++;
524 return 0;
525 }
526
527 case 1: {
528 struct bucket_extension *bkt = m->bkt;
529 uint32_t input_sig = m->input_sig;
530 uint32_t bkt_sig0, bkt_sig1, bkt_sig2, bkt_sig3;
531 uint32_t mask0 = 0, mask1 = 0, mask2 = 0, mask3 = 0, mask_all;
532 uint32_t sig_match = LUT_MATCH;
533 uint32_t sig_match_many = LUT_MATCH_MANY;
534 uint32_t sig_match_pos = LUT_MATCH_POS;
535 uint32_t bkt_key_id;
536
537 bkt_sig0 = input_sig ^ bkt->sig[0];
538 if (!bkt_sig0)
539 mask0 = 1 << 0;
540
541 bkt_sig1 = input_sig ^ bkt->sig[1];
542 if (!bkt_sig1)
543 mask1 = 1 << 1;
544
545 bkt_sig2 = input_sig ^ bkt->sig[2];
546 if (!bkt_sig2)
547 mask2 = 1 << 2;
548
549 bkt_sig3 = input_sig ^ bkt->sig[3];
550 if (!bkt_sig3)
551 mask3 = 1 << 3;
552
553 mask_all = (mask0 | mask1) | (mask2 | mask3);
554 sig_match = (sig_match >> mask_all) & 1;
555 sig_match_many = (sig_match_many >> mask_all) & 1;
556 sig_match_pos = (sig_match_pos >> (mask_all << 1)) & 3;
557
558 bkt_key_id = bkt->key_id[sig_match_pos];
559 rte_prefetch0(table_key(t, bkt_key_id));
560 rte_prefetch0(table_key_data(t, bkt_key_id));
561
562 m->bkt_key_id = bkt_key_id;
563 m->sig_match = sig_match;
564 m->sig_match_many = sig_match_many;
565 m->state++;
566 return 0;
567 }
568
569 case 2: {
570 uint8_t *input_key = &(*key)[t->params.key_offset];
571 struct bucket_extension *bkt = m->bkt;
572 uint32_t bkt_key_id = m->bkt_key_id;
573 uint8_t *bkt_key = table_key(t, bkt_key_id);
574 uint64_t *bkt_data = table_key_data(t, bkt_key_id);
575 uint32_t lkp_hit;
576
577 lkp_hit = t->keycmp_func(bkt_key, input_key, t->params.key_size);
578 lkp_hit &= m->sig_match;
579 *action_id = bkt_data[0];
580 *action_data = (uint8_t *)&bkt_data[1];
581 *entry_id = bkt_key_id;
582 *hit = lkp_hit;
583
584 m->state = 0;
585
586 if (!lkp_hit && (m->sig_match_many || bkt->next))
587 return table_lookup_unoptimized(t,
588 m,
589 key,
590 action_id,
591 action_data,
592 entry_id,
593 hit);
594
595 return 1;
596 }
597
598 default:
599 return 0;
600 }
601 }
602
603 static void *
table_create(struct rte_swx_table_params * params,struct rte_swx_table_entry_list * entries,const char * args,int numa_node)604 table_create(struct rte_swx_table_params *params,
605 struct rte_swx_table_entry_list *entries,
606 const char *args,
607 int numa_node)
608 {
609 struct table *t;
610 struct rte_swx_table_entry *entry;
611 int status;
612
613 /* Table create. */
614 status = __table_create(&t, NULL, params, args, numa_node);
615 if (status)
616 return NULL;
617
618 /* Table add entries. */
619 if (!entries)
620 return t;
621
622 TAILQ_FOREACH(entry, entries, node) {
623 int status;
624
625 status = table_add(t, entry);
626 if (status) {
627 table_free(t);
628 return NULL;
629 }
630 }
631
632 return t;
633 }
634
635 static uint64_t
table_footprint(struct rte_swx_table_params * params,struct rte_swx_table_entry_list * entries __rte_unused,const char * args)636 table_footprint(struct rte_swx_table_params *params,
637 struct rte_swx_table_entry_list *entries __rte_unused,
638 const char *args)
639 {
640 uint64_t memory_footprint;
641 int status;
642
643 status = __table_create(NULL, &memory_footprint, params, args, 0);
644 if (status)
645 return 0;
646
647 return memory_footprint;
648 }
649
650 struct rte_swx_table_ops rte_swx_table_exact_match_unoptimized_ops = {
651 .footprint_get = table_footprint,
652 .mailbox_size_get = table_mailbox_size_get_unoptimized,
653 .create = table_create,
654 .add = table_add,
655 .del = table_del,
656 .lkp = table_lookup_unoptimized,
657 .free = table_free,
658 };
659
660 struct rte_swx_table_ops rte_swx_table_exact_match_ops = {
661 .footprint_get = table_footprint,
662 .mailbox_size_get = table_mailbox_size_get,
663 .create = table_create,
664 .add = table_add,
665 .del = table_del,
666 .lkp = table_lookup,
667 .free = table_free,
668 };
669