1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 #include <string.h> 5 #include <stdio.h> 6 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 #include <rte_log.h> 10 11 #include "rte_table_hash.h" 12 #include "rte_lru.h" 13 14 #include "table_log.h" 15 16 #define KEY_SIZE 32 17 18 #define KEYS_PER_BUCKET 4 19 20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU 21 22 #ifdef RTE_TABLE_STATS_COLLECT 23 24 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \ 25 table->stats.n_pkts_in += val 26 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \ 27 table->stats.n_pkts_lookup_miss += val 28 29 #else 30 31 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) 32 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) 33 34 #endif 35 36 #ifdef RTE_ARCH_64 37 struct rte_bucket_4_32 { 38 /* Cache line 0 */ 39 uint64_t signature[4 + 1]; 40 uint64_t lru_list; 41 struct rte_bucket_4_32 *next; 42 uint64_t next_valid; 43 44 /* Cache lines 1 and 2 */ 45 uint64_t key[4][4]; 46 47 /* Cache line 3 */ 48 uint8_t data[]; 49 }; 50 #else 51 struct rte_bucket_4_32 { 52 /* Cache line 0 */ 53 uint64_t signature[4 + 1]; 54 uint64_t lru_list; 55 struct rte_bucket_4_32 *next; 56 uint32_t pad; 57 uint64_t next_valid; 58 59 /* Cache lines 1 and 2 */ 60 uint64_t key[4][4]; 61 62 /* Cache line 3 */ 63 uint8_t data[]; 64 }; 65 #endif 66 67 struct rte_table_hash { 68 struct rte_table_stats stats; 69 70 /* Input parameters */ 71 uint32_t n_buckets; 72 uint32_t key_size; 73 uint32_t entry_size; 74 uint32_t bucket_size; 75 uint32_t key_offset; 76 uint64_t key_mask[4]; 77 rte_table_hash_op_hash f_hash; 78 uint64_t seed; 79 80 /* Extendible buckets */ 81 uint32_t n_buckets_ext; 82 uint32_t stack_pos; 83 uint32_t *stack; 84 85 /* Lookup table */ 86 uint8_t memory[0] __rte_cache_aligned; 87 }; 88 89 static int 90 keycmp(void *a, void *b, void *b_mask) 91 { 92 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask; 93 94 return (a64[0] != (b64[0] & b_mask64[0])) || 95 (a64[1] != (b64[1] & b_mask64[1])) || 96 (a64[2] != (b64[2] & b_mask64[2])) || 97 (a64[3] != (b64[3] & b_mask64[3])); 98 } 99 100 static void 101 keycpy(void *dst, void *src, void *src_mask) 102 { 103 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask; 104 105 dst64[0] = src64[0] & src_mask64[0]; 106 dst64[1] = src64[1] & src_mask64[1]; 107 dst64[2] = src64[2] & src_mask64[2]; 108 dst64[3] = src64[3] & src_mask64[3]; 109 } 110 111 static int 112 check_params_create(struct rte_table_hash_params *params) 113 { 114 /* name */ 115 if (params->name == NULL) { 116 TABLE_LOG(ERR, "%s: name invalid value", __func__); 117 return -EINVAL; 118 } 119 120 /* key_size */ 121 if (params->key_size != KEY_SIZE) { 122 TABLE_LOG(ERR, "%s: key_size invalid value", __func__); 123 return -EINVAL; 124 } 125 126 /* n_keys */ 127 if (params->n_keys == 0) { 128 TABLE_LOG(ERR, "%s: n_keys is zero", __func__); 129 return -EINVAL; 130 } 131 132 /* n_buckets */ 133 if ((params->n_buckets == 0) || 134 (!rte_is_power_of_2(params->n_buckets))) { 135 TABLE_LOG(ERR, "%s: n_buckets invalid value", __func__); 136 return -EINVAL; 137 } 138 139 /* f_hash */ 140 if (params->f_hash == NULL) { 141 TABLE_LOG(ERR, "%s: f_hash function pointer is NULL", 142 __func__); 143 return -EINVAL; 144 } 145 146 return 0; 147 } 148 149 static void * 150 rte_table_hash_create_key32_lru(void *params, 151 int socket_id, 152 uint32_t entry_size) 153 { 154 struct rte_table_hash_params *p = params; 155 struct rte_table_hash *f; 156 uint64_t bucket_size, total_size; 157 uint32_t n_buckets, i; 158 159 /* Check input parameters */ 160 if ((check_params_create(p) != 0) || 161 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) || 162 ((sizeof(struct rte_bucket_4_32) % 64) != 0)) 163 return NULL; 164 165 /* 166 * Table dimensioning 167 * 168 * Objective: Pick the number of buckets (n_buckets) so that there a chance 169 * to store n_keys keys in the table. 170 * 171 * Note: Since the buckets do not get extended, it is not possible to 172 * guarantee that n_keys keys can be stored in the table at any time. In the 173 * worst case scenario when all the n_keys fall into the same bucket, only 174 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case 175 * defeats the purpose of the hash table. It indicates unsuitable f_hash or 176 * n_keys to n_buckets ratio. 177 * 178 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET 179 */ 180 n_buckets = rte_align32pow2( 181 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET); 182 n_buckets = RTE_MAX(n_buckets, p->n_buckets); 183 184 /* Memory allocation */ 185 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) + 186 KEYS_PER_BUCKET * entry_size); 187 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size; 188 if (total_size > SIZE_MAX) { 189 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes " 190 "for hash table %s", 191 __func__, total_size, p->name); 192 return NULL; 193 } 194 195 f = rte_zmalloc_socket(p->name, 196 (size_t)total_size, 197 RTE_CACHE_LINE_SIZE, 198 socket_id); 199 if (f == NULL) { 200 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes " 201 "for hash table %s", 202 __func__, total_size, p->name); 203 return NULL; 204 } 205 TABLE_LOG(INFO, 206 "%s: Hash table %s memory footprint " 207 "is %" PRIu64 " bytes", 208 __func__, p->name, total_size); 209 210 /* Memory initialization */ 211 f->n_buckets = n_buckets; 212 f->key_size = KEY_SIZE; 213 f->entry_size = entry_size; 214 f->bucket_size = bucket_size; 215 f->key_offset = p->key_offset; 216 f->f_hash = p->f_hash; 217 f->seed = p->seed; 218 219 if (p->key_mask != NULL) { 220 f->key_mask[0] = ((uint64_t *)p->key_mask)[0]; 221 f->key_mask[1] = ((uint64_t *)p->key_mask)[1]; 222 f->key_mask[2] = ((uint64_t *)p->key_mask)[2]; 223 f->key_mask[3] = ((uint64_t *)p->key_mask)[3]; 224 } else { 225 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU; 226 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU; 227 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU; 228 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU; 229 } 230 231 for (i = 0; i < n_buckets; i++) { 232 struct rte_bucket_4_32 *bucket; 233 234 bucket = (struct rte_bucket_4_32 *) &f->memory[i * 235 f->bucket_size]; 236 bucket->lru_list = 0x0000000100020003LLU; 237 } 238 239 return f; 240 } 241 242 static int 243 rte_table_hash_free_key32_lru(void *table) 244 { 245 struct rte_table_hash *f = table; 246 247 /* Check input parameters */ 248 if (f == NULL) { 249 TABLE_LOG(ERR, "%s: table parameter is NULL", __func__); 250 return -EINVAL; 251 } 252 253 rte_free(f); 254 return 0; 255 } 256 257 static int 258 rte_table_hash_entry_add_key32_lru( 259 void *table, 260 void *key, 261 void *entry, 262 int *key_found, 263 void **entry_ptr) 264 { 265 struct rte_table_hash *f = table; 266 struct rte_bucket_4_32 *bucket; 267 uint64_t signature, pos; 268 uint32_t bucket_index, i; 269 270 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed); 271 bucket_index = signature & (f->n_buckets - 1); 272 bucket = (struct rte_bucket_4_32 *) 273 &f->memory[bucket_index * f->bucket_size]; 274 signature |= RTE_BUCKET_ENTRY_VALID; 275 276 /* Key is present in the bucket */ 277 for (i = 0; i < 4; i++) { 278 uint64_t bucket_signature = bucket->signature[i]; 279 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 280 281 if ((bucket_signature == signature) && 282 (keycmp(bucket_key, key, f->key_mask) == 0)) { 283 uint8_t *bucket_data = &bucket->data[i * f->entry_size]; 284 285 memcpy(bucket_data, entry, f->entry_size); 286 lru_update(bucket, i); 287 *key_found = 1; 288 *entry_ptr = (void *) bucket_data; 289 return 0; 290 } 291 } 292 293 /* Key is not present in the bucket */ 294 for (i = 0; i < 4; i++) { 295 uint64_t bucket_signature = bucket->signature[i]; 296 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 297 298 if (bucket_signature == 0) { 299 uint8_t *bucket_data = &bucket->data[i * f->entry_size]; 300 301 bucket->signature[i] = signature; 302 keycpy(bucket_key, key, f->key_mask); 303 memcpy(bucket_data, entry, f->entry_size); 304 lru_update(bucket, i); 305 *key_found = 0; 306 *entry_ptr = (void *) bucket_data; 307 308 return 0; 309 } 310 } 311 312 /* Bucket full: replace LRU entry */ 313 pos = lru_pos(bucket); 314 bucket->signature[pos] = signature; 315 keycpy(&bucket->key[pos], key, f->key_mask); 316 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size); 317 lru_update(bucket, pos); 318 *key_found = 0; 319 *entry_ptr = (void *) &bucket->data[pos * f->entry_size]; 320 321 return 0; 322 } 323 324 static int 325 rte_table_hash_entry_delete_key32_lru( 326 void *table, 327 void *key, 328 int *key_found, 329 void *entry) 330 { 331 struct rte_table_hash *f = table; 332 struct rte_bucket_4_32 *bucket; 333 uint64_t signature; 334 uint32_t bucket_index, i; 335 336 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed); 337 bucket_index = signature & (f->n_buckets - 1); 338 bucket = (struct rte_bucket_4_32 *) 339 &f->memory[bucket_index * f->bucket_size]; 340 signature |= RTE_BUCKET_ENTRY_VALID; 341 342 /* Key is present in the bucket */ 343 for (i = 0; i < 4; i++) { 344 uint64_t bucket_signature = bucket->signature[i]; 345 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 346 347 if ((bucket_signature == signature) && 348 (keycmp(bucket_key, key, f->key_mask) == 0)) { 349 uint8_t *bucket_data = &bucket->data[i * f->entry_size]; 350 351 bucket->signature[i] = 0; 352 *key_found = 1; 353 if (entry) 354 memcpy(entry, bucket_data, f->entry_size); 355 356 return 0; 357 } 358 } 359 360 /* Key is not present in the bucket */ 361 *key_found = 0; 362 return 0; 363 } 364 365 static void * 366 rte_table_hash_create_key32_ext(void *params, 367 int socket_id, 368 uint32_t entry_size) 369 { 370 struct rte_table_hash_params *p = params; 371 struct rte_table_hash *f; 372 uint64_t bucket_size, stack_size, total_size; 373 uint32_t n_buckets_ext, i; 374 375 /* Check input parameters */ 376 if ((check_params_create(p) != 0) || 377 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) || 378 ((sizeof(struct rte_bucket_4_32) % 64) != 0)) 379 return NULL; 380 381 /* 382 * Table dimensioning 383 * 384 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that 385 * it is guaranteed that n_keys keys can be stored in the table at any time. 386 * 387 * The worst case scenario takes place when all the n_keys keys fall into 388 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst 389 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the 390 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall 391 * into a different bucket. This case defeats the purpose of the hash table. 392 * It indicates unsuitable f_hash or n_keys to n_buckets ratio. 393 * 394 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1 395 */ 396 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1; 397 398 /* Memory allocation */ 399 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) + 400 KEYS_PER_BUCKET * entry_size); 401 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t)); 402 total_size = sizeof(struct rte_table_hash) + 403 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size; 404 if (total_size > SIZE_MAX) { 405 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes " 406 "for hash table %s", 407 __func__, total_size, p->name); 408 return NULL; 409 } 410 411 f = rte_zmalloc_socket(p->name, 412 (size_t)total_size, 413 RTE_CACHE_LINE_SIZE, 414 socket_id); 415 if (f == NULL) { 416 TABLE_LOG(ERR, "%s: Cannot allocate %" PRIu64 " bytes " 417 "for hash table %s", 418 __func__, total_size, p->name); 419 return NULL; 420 } 421 TABLE_LOG(INFO, 422 "%s: Hash table %s memory footprint " 423 "is %" PRIu64" bytes", 424 __func__, p->name, total_size); 425 426 /* Memory initialization */ 427 f->n_buckets = p->n_buckets; 428 f->key_size = KEY_SIZE; 429 f->entry_size = entry_size; 430 f->bucket_size = bucket_size; 431 f->key_offset = p->key_offset; 432 f->f_hash = p->f_hash; 433 f->seed = p->seed; 434 435 f->n_buckets_ext = n_buckets_ext; 436 f->stack_pos = n_buckets_ext; 437 f->stack = (uint32_t *) 438 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size]; 439 440 if (p->key_mask != NULL) { 441 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]); 442 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]); 443 f->key_mask[2] = (((uint64_t *)p->key_mask)[2]); 444 f->key_mask[3] = (((uint64_t *)p->key_mask)[3]); 445 } else { 446 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU; 447 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU; 448 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU; 449 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU; 450 } 451 452 for (i = 0; i < n_buckets_ext; i++) 453 f->stack[i] = i; 454 455 return f; 456 } 457 458 static int 459 rte_table_hash_free_key32_ext(void *table) 460 { 461 struct rte_table_hash *f = table; 462 463 /* Check input parameters */ 464 if (f == NULL) { 465 TABLE_LOG(ERR, "%s: table parameter is NULL", __func__); 466 return -EINVAL; 467 } 468 469 rte_free(f); 470 return 0; 471 } 472 473 static int 474 rte_table_hash_entry_add_key32_ext( 475 void *table, 476 void *key, 477 void *entry, 478 int *key_found, 479 void **entry_ptr) 480 { 481 struct rte_table_hash *f = table; 482 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev; 483 uint64_t signature; 484 uint32_t bucket_index, i; 485 486 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed); 487 bucket_index = signature & (f->n_buckets - 1); 488 bucket0 = (struct rte_bucket_4_32 *) 489 &f->memory[bucket_index * f->bucket_size]; 490 signature |= RTE_BUCKET_ENTRY_VALID; 491 492 /* Key is present in the bucket */ 493 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) { 494 for (i = 0; i < 4; i++) { 495 uint64_t bucket_signature = bucket->signature[i]; 496 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 497 498 if ((bucket_signature == signature) && 499 (keycmp(bucket_key, key, f->key_mask) == 0)) { 500 uint8_t *bucket_data = &bucket->data[i * 501 f->entry_size]; 502 503 memcpy(bucket_data, entry, f->entry_size); 504 *key_found = 1; 505 *entry_ptr = (void *) bucket_data; 506 507 return 0; 508 } 509 } 510 } 511 512 /* Key is not present in the bucket */ 513 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; 514 bucket_prev = bucket, bucket = bucket->next) 515 for (i = 0; i < 4; i++) { 516 uint64_t bucket_signature = bucket->signature[i]; 517 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 518 519 if (bucket_signature == 0) { 520 uint8_t *bucket_data = &bucket->data[i * 521 f->entry_size]; 522 523 bucket->signature[i] = signature; 524 keycpy(bucket_key, key, f->key_mask); 525 memcpy(bucket_data, entry, f->entry_size); 526 *key_found = 0; 527 *entry_ptr = (void *) bucket_data; 528 529 return 0; 530 } 531 } 532 533 /* Bucket full: extend bucket */ 534 if (f->stack_pos > 0) { 535 bucket_index = f->stack[--f->stack_pos]; 536 537 bucket = (struct rte_bucket_4_32 *) 538 &f->memory[(f->n_buckets + bucket_index) * 539 f->bucket_size]; 540 bucket_prev->next = bucket; 541 bucket_prev->next_valid = 1; 542 543 bucket->signature[0] = signature; 544 keycpy(&bucket->key[0], key, f->key_mask); 545 memcpy(&bucket->data[0], entry, f->entry_size); 546 *key_found = 0; 547 *entry_ptr = (void *) &bucket->data[0]; 548 return 0; 549 } 550 551 return -ENOSPC; 552 } 553 554 static int 555 rte_table_hash_entry_delete_key32_ext( 556 void *table, 557 void *key, 558 int *key_found, 559 void *entry) 560 { 561 struct rte_table_hash *f = table; 562 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev; 563 uint64_t signature; 564 uint32_t bucket_index, i; 565 566 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed); 567 bucket_index = signature & (f->n_buckets - 1); 568 bucket0 = (struct rte_bucket_4_32 *) 569 &f->memory[bucket_index * f->bucket_size]; 570 signature |= RTE_BUCKET_ENTRY_VALID; 571 572 /* Key is present in the bucket */ 573 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; 574 bucket_prev = bucket, bucket = bucket->next) 575 for (i = 0; i < 4; i++) { 576 uint64_t bucket_signature = bucket->signature[i]; 577 uint8_t *bucket_key = (uint8_t *) &bucket->key[i]; 578 579 if ((bucket_signature == signature) && 580 (keycmp(bucket_key, key, f->key_mask) == 0)) { 581 uint8_t *bucket_data = &bucket->data[i * 582 f->entry_size]; 583 584 bucket->signature[i] = 0; 585 *key_found = 1; 586 if (entry) 587 memcpy(entry, bucket_data, f->entry_size); 588 589 if ((bucket->signature[0] == 0) && 590 (bucket->signature[1] == 0) && 591 (bucket->signature[2] == 0) && 592 (bucket->signature[3] == 0) && 593 (bucket_prev != NULL)) { 594 bucket_prev->next = bucket->next; 595 bucket_prev->next_valid = 596 bucket->next_valid; 597 598 memset(bucket, 0, 599 sizeof(struct rte_bucket_4_32)); 600 bucket_index = (((uint8_t *)bucket - 601 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets; 602 f->stack[f->stack_pos++] = bucket_index; 603 } 604 605 return 0; 606 } 607 } 608 609 /* Key is not present in the bucket */ 610 *key_found = 0; 611 return 0; 612 } 613 614 #define lookup_key32_cmp(key_in, bucket, pos, f) \ 615 { \ 616 uint64_t xor[4][4], or[4], signature[4], k[4]; \ 617 \ 618 k[0] = key_in[0] & f->key_mask[0]; \ 619 k[1] = key_in[1] & f->key_mask[1]; \ 620 k[2] = key_in[2] & f->key_mask[2]; \ 621 k[3] = key_in[3] & f->key_mask[3]; \ 622 \ 623 signature[0] = ((~bucket->signature[0]) & 1); \ 624 signature[1] = ((~bucket->signature[1]) & 1); \ 625 signature[2] = ((~bucket->signature[2]) & 1); \ 626 signature[3] = ((~bucket->signature[3]) & 1); \ 627 \ 628 xor[0][0] = k[0] ^ bucket->key[0][0]; \ 629 xor[0][1] = k[1] ^ bucket->key[0][1]; \ 630 xor[0][2] = k[2] ^ bucket->key[0][2]; \ 631 xor[0][3] = k[3] ^ bucket->key[0][3]; \ 632 \ 633 xor[1][0] = k[0] ^ bucket->key[1][0]; \ 634 xor[1][1] = k[1] ^ bucket->key[1][1]; \ 635 xor[1][2] = k[2] ^ bucket->key[1][2]; \ 636 xor[1][3] = k[3] ^ bucket->key[1][3]; \ 637 \ 638 xor[2][0] = k[0] ^ bucket->key[2][0]; \ 639 xor[2][1] = k[1] ^ bucket->key[2][1]; \ 640 xor[2][2] = k[2] ^ bucket->key[2][2]; \ 641 xor[2][3] = k[3] ^ bucket->key[2][3]; \ 642 \ 643 xor[3][0] = k[0] ^ bucket->key[3][0]; \ 644 xor[3][1] = k[1] ^ bucket->key[3][1]; \ 645 xor[3][2] = k[2] ^ bucket->key[3][2]; \ 646 xor[3][3] = k[3] ^ bucket->key[3][3]; \ 647 \ 648 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\ 649 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\ 650 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\ 651 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\ 652 \ 653 pos = 4; \ 654 if (or[0] == 0) \ 655 pos = 0; \ 656 if (or[1] == 0) \ 657 pos = 1; \ 658 if (or[2] == 0) \ 659 pos = 2; \ 660 if (or[3] == 0) \ 661 pos = 3; \ 662 } 663 664 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \ 665 { \ 666 uint64_t pkt_mask; \ 667 uint32_t key_offset = f->key_offset; \ 668 \ 669 pkt0_index = rte_ctz64(pkts_mask); \ 670 pkt_mask = 1LLU << pkt0_index; \ 671 pkts_mask &= ~pkt_mask; \ 672 \ 673 mbuf0 = pkts[pkt0_index]; \ 674 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\ 675 } 676 677 #define lookup1_stage1(mbuf1, bucket1, f) \ 678 { \ 679 uint64_t *key; \ 680 uint64_t signature; \ 681 uint32_t bucket_index; \ 682 \ 683 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \ 684 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \ 685 \ 686 bucket_index = signature & (f->n_buckets - 1); \ 687 bucket1 = (struct rte_bucket_4_32 *) \ 688 &f->memory[bucket_index * f->bucket_size]; \ 689 rte_prefetch0(bucket1); \ 690 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\ 691 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\ 692 } 693 694 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \ 695 pkts_mask_out, entries, f) \ 696 { \ 697 void *a; \ 698 uint64_t pkt_mask; \ 699 uint64_t *key; \ 700 uint32_t pos; \ 701 \ 702 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\ 703 lookup_key32_cmp(key, bucket2, pos, f); \ 704 \ 705 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\ 706 pkts_mask_out |= pkt_mask; \ 707 \ 708 a = (void *) &bucket2->data[pos * f->entry_size]; \ 709 rte_prefetch0(a); \ 710 entries[pkt2_index] = a; \ 711 lru_update(bucket2, pos); \ 712 } 713 714 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\ 715 entries, buckets_mask, buckets, keys, f) \ 716 { \ 717 struct rte_bucket_4_32 *bucket_next; \ 718 void *a; \ 719 uint64_t pkt_mask, bucket_mask; \ 720 uint64_t *key; \ 721 uint32_t pos; \ 722 \ 723 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\ 724 lookup_key32_cmp(key, bucket2, pos, f); \ 725 \ 726 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\ 727 pkts_mask_out |= pkt_mask; \ 728 \ 729 a = (void *) &bucket2->data[pos * f->entry_size]; \ 730 rte_prefetch0(a); \ 731 entries[pkt2_index] = a; \ 732 \ 733 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\ 734 buckets_mask |= bucket_mask; \ 735 bucket_next = bucket2->next; \ 736 buckets[pkt2_index] = bucket_next; \ 737 keys[pkt2_index] = key; \ 738 } 739 740 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \ 741 entries, buckets_mask, f) \ 742 { \ 743 struct rte_bucket_4_32 *bucket, *bucket_next; \ 744 void *a; \ 745 uint64_t pkt_mask, bucket_mask; \ 746 uint64_t *key; \ 747 uint32_t pos; \ 748 \ 749 bucket = buckets[pkt_index]; \ 750 key = keys[pkt_index]; \ 751 \ 752 lookup_key32_cmp(key, bucket, pos, f); \ 753 \ 754 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\ 755 pkts_mask_out |= pkt_mask; \ 756 \ 757 a = (void *) &bucket->data[pos * f->entry_size]; \ 758 rte_prefetch0(a); \ 759 entries[pkt_index] = a; \ 760 \ 761 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\ 762 buckets_mask |= bucket_mask; \ 763 bucket_next = bucket->next; \ 764 rte_prefetch0(bucket_next); \ 765 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\ 766 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \ 767 2 * RTE_CACHE_LINE_SIZE)); \ 768 buckets[pkt_index] = bucket_next; \ 769 keys[pkt_index] = key; \ 770 } 771 772 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\ 773 pkts, pkts_mask, f) \ 774 { \ 775 uint64_t pkt00_mask, pkt01_mask; \ 776 uint32_t key_offset = f->key_offset; \ 777 \ 778 pkt00_index = rte_ctz64(pkts_mask); \ 779 pkt00_mask = 1LLU << pkt00_index; \ 780 pkts_mask &= ~pkt00_mask; \ 781 \ 782 mbuf00 = pkts[pkt00_index]; \ 783 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ 784 \ 785 pkt01_index = rte_ctz64(pkts_mask); \ 786 pkt01_mask = 1LLU << pkt01_index; \ 787 pkts_mask &= ~pkt01_mask; \ 788 \ 789 mbuf01 = pkts[pkt01_index]; \ 790 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\ 791 } 792 793 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\ 794 mbuf00, mbuf01, pkts, pkts_mask, f) \ 795 { \ 796 uint64_t pkt00_mask, pkt01_mask; \ 797 uint32_t key_offset = f->key_offset; \ 798 \ 799 pkt00_index = rte_ctz64(pkts_mask); \ 800 pkt00_mask = 1LLU << pkt00_index; \ 801 pkts_mask &= ~pkt00_mask; \ 802 \ 803 mbuf00 = pkts[pkt00_index]; \ 804 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \ 805 \ 806 pkt01_index = rte_ctz64(pkts_mask); \ 807 if (pkts_mask == 0) \ 808 pkt01_index = pkt00_index; \ 809 \ 810 pkt01_mask = 1LLU << pkt01_index; \ 811 pkts_mask &= ~pkt01_mask; \ 812 \ 813 mbuf01 = pkts[pkt01_index]; \ 814 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \ 815 } 816 817 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \ 818 { \ 819 uint64_t *key10, *key11; \ 820 uint64_t signature10, signature11; \ 821 uint32_t bucket10_index, bucket11_index; \ 822 \ 823 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \ 824 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \ 825 \ 826 bucket10_index = signature10 & (f->n_buckets - 1); \ 827 bucket10 = (struct rte_bucket_4_32 *) \ 828 &f->memory[bucket10_index * f->bucket_size]; \ 829 rte_prefetch0(bucket10); \ 830 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\ 831 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\ 832 \ 833 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \ 834 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\ 835 \ 836 bucket11_index = signature11 & (f->n_buckets - 1); \ 837 bucket11 = (struct rte_bucket_4_32 *) \ 838 &f->memory[bucket11_index * f->bucket_size]; \ 839 rte_prefetch0(bucket11); \ 840 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\ 841 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\ 842 } 843 844 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\ 845 bucket20, bucket21, pkts_mask_out, entries, f) \ 846 { \ 847 void *a20, *a21; \ 848 uint64_t pkt20_mask, pkt21_mask; \ 849 uint64_t *key20, *key21; \ 850 uint32_t pos20, pos21; \ 851 \ 852 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\ 853 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\ 854 \ 855 lookup_key32_cmp(key20, bucket20, pos20, f); \ 856 lookup_key32_cmp(key21, bucket21, pos21, f); \ 857 \ 858 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\ 859 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\ 860 pkts_mask_out |= pkt20_mask | pkt21_mask; \ 861 \ 862 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \ 863 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \ 864 rte_prefetch0(a20); \ 865 rte_prefetch0(a21); \ 866 entries[pkt20_index] = a20; \ 867 entries[pkt21_index] = a21; \ 868 lru_update(bucket20, pos20); \ 869 lru_update(bucket21, pos21); \ 870 } 871 872 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \ 873 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\ 874 { \ 875 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \ 876 void *a20, *a21; \ 877 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\ 878 uint64_t *key20, *key21; \ 879 uint32_t pos20, pos21; \ 880 \ 881 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\ 882 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\ 883 \ 884 lookup_key32_cmp(key20, bucket20, pos20, f); \ 885 lookup_key32_cmp(key21, bucket21, pos21, f); \ 886 \ 887 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\ 888 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\ 889 pkts_mask_out |= pkt20_mask | pkt21_mask; \ 890 \ 891 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \ 892 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \ 893 rte_prefetch0(a20); \ 894 rte_prefetch0(a21); \ 895 entries[pkt20_index] = a20; \ 896 entries[pkt21_index] = a21; \ 897 \ 898 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\ 899 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\ 900 buckets_mask |= bucket20_mask | bucket21_mask; \ 901 bucket20_next = bucket20->next; \ 902 bucket21_next = bucket21->next; \ 903 buckets[pkt20_index] = bucket20_next; \ 904 buckets[pkt21_index] = bucket21_next; \ 905 keys[pkt20_index] = key20; \ 906 keys[pkt21_index] = key21; \ 907 } 908 909 static int 910 rte_table_hash_lookup_key32_lru( 911 void *table, 912 struct rte_mbuf **pkts, 913 uint64_t pkts_mask, 914 uint64_t *lookup_hit_mask, 915 void **entries) 916 { 917 struct rte_table_hash *f = (struct rte_table_hash *) table; 918 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21; 919 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21; 920 uint32_t pkt00_index, pkt01_index, pkt10_index; 921 uint32_t pkt11_index, pkt20_index, pkt21_index; 922 uint64_t pkts_mask_out = 0; 923 924 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); 925 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in); 926 927 /* Cannot run the pipeline with less than 5 packets */ 928 if (rte_popcount64(pkts_mask) < 5) { 929 for ( ; pkts_mask; ) { 930 struct rte_bucket_4_32 *bucket; 931 struct rte_mbuf *mbuf; 932 uint32_t pkt_index; 933 934 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f); 935 lookup1_stage1(mbuf, bucket, f); 936 lookup1_stage2_lru(pkt_index, mbuf, bucket, 937 pkts_mask_out, entries, f); 938 } 939 940 *lookup_hit_mask = pkts_mask_out; 941 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); 942 return 0; 943 } 944 945 /* 946 * Pipeline fill 947 * 948 */ 949 /* Pipeline stage 0 */ 950 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts, 951 pkts_mask, f); 952 953 /* Pipeline feed */ 954 mbuf10 = mbuf00; 955 mbuf11 = mbuf01; 956 pkt10_index = pkt00_index; 957 pkt11_index = pkt01_index; 958 959 /* Pipeline stage 0 */ 960 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts, 961 pkts_mask, f); 962 963 /* Pipeline stage 1 */ 964 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 965 966 /* 967 * Pipeline run 968 * 969 */ 970 for ( ; pkts_mask; ) { 971 /* Pipeline feed */ 972 bucket20 = bucket10; 973 bucket21 = bucket11; 974 mbuf20 = mbuf10; 975 mbuf21 = mbuf11; 976 mbuf10 = mbuf00; 977 mbuf11 = mbuf01; 978 pkt20_index = pkt10_index; 979 pkt21_index = pkt11_index; 980 pkt10_index = pkt00_index; 981 pkt11_index = pkt01_index; 982 983 /* Pipeline stage 0 */ 984 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index, 985 mbuf00, mbuf01, pkts, pkts_mask, f); 986 987 /* Pipeline stage 1 */ 988 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 989 990 /* Pipeline stage 2 */ 991 lookup2_stage2_lru(pkt20_index, pkt21_index, 992 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, 993 entries, f); 994 } 995 996 /* 997 * Pipeline flush 998 * 999 */ 1000 /* Pipeline feed */ 1001 bucket20 = bucket10; 1002 bucket21 = bucket11; 1003 mbuf20 = mbuf10; 1004 mbuf21 = mbuf11; 1005 mbuf10 = mbuf00; 1006 mbuf11 = mbuf01; 1007 pkt20_index = pkt10_index; 1008 pkt21_index = pkt11_index; 1009 pkt10_index = pkt00_index; 1010 pkt11_index = pkt01_index; 1011 1012 /* Pipeline stage 1 */ 1013 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 1014 1015 /* Pipeline stage 2 */ 1016 lookup2_stage2_lru(pkt20_index, pkt21_index, 1017 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f); 1018 1019 /* Pipeline feed */ 1020 bucket20 = bucket10; 1021 bucket21 = bucket11; 1022 mbuf20 = mbuf10; 1023 mbuf21 = mbuf11; 1024 pkt20_index = pkt10_index; 1025 pkt21_index = pkt11_index; 1026 1027 /* Pipeline stage 2 */ 1028 lookup2_stage2_lru(pkt20_index, pkt21_index, 1029 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f); 1030 1031 *lookup_hit_mask = pkts_mask_out; 1032 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); 1033 return 0; 1034 } /* rte_table_hash_lookup_key32_lru() */ 1035 1036 static int 1037 rte_table_hash_lookup_key32_ext( 1038 void *table, 1039 struct rte_mbuf **pkts, 1040 uint64_t pkts_mask, 1041 uint64_t *lookup_hit_mask, 1042 void **entries) 1043 { 1044 struct rte_table_hash *f = (struct rte_table_hash *) table; 1045 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21; 1046 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21; 1047 uint32_t pkt00_index, pkt01_index, pkt10_index; 1048 uint32_t pkt11_index, pkt20_index, pkt21_index; 1049 uint64_t pkts_mask_out = 0, buckets_mask = 0; 1050 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX]; 1051 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX]; 1052 1053 __rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask); 1054 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in); 1055 1056 /* Cannot run the pipeline with less than 5 packets */ 1057 if (rte_popcount64(pkts_mask) < 5) { 1058 for ( ; pkts_mask; ) { 1059 struct rte_bucket_4_32 *bucket; 1060 struct rte_mbuf *mbuf; 1061 uint32_t pkt_index; 1062 1063 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f); 1064 lookup1_stage1(mbuf, bucket, f); 1065 lookup1_stage2_ext(pkt_index, mbuf, bucket, 1066 pkts_mask_out, entries, buckets_mask, buckets, 1067 keys, f); 1068 } 1069 1070 goto grind_next_buckets; 1071 } 1072 1073 /* 1074 * Pipeline fill 1075 * 1076 */ 1077 /* Pipeline stage 0 */ 1078 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts, 1079 pkts_mask, f); 1080 1081 /* Pipeline feed */ 1082 mbuf10 = mbuf00; 1083 mbuf11 = mbuf01; 1084 pkt10_index = pkt00_index; 1085 pkt11_index = pkt01_index; 1086 1087 /* Pipeline stage 0 */ 1088 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts, 1089 pkts_mask, f); 1090 1091 /* Pipeline stage 1 */ 1092 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 1093 1094 /* 1095 * Pipeline run 1096 * 1097 */ 1098 for ( ; pkts_mask; ) { 1099 /* Pipeline feed */ 1100 bucket20 = bucket10; 1101 bucket21 = bucket11; 1102 mbuf20 = mbuf10; 1103 mbuf21 = mbuf11; 1104 mbuf10 = mbuf00; 1105 mbuf11 = mbuf01; 1106 pkt20_index = pkt10_index; 1107 pkt21_index = pkt11_index; 1108 pkt10_index = pkt00_index; 1109 pkt11_index = pkt01_index; 1110 1111 /* Pipeline stage 0 */ 1112 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index, 1113 mbuf00, mbuf01, pkts, pkts_mask, f); 1114 1115 /* Pipeline stage 1 */ 1116 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 1117 1118 /* Pipeline stage 2 */ 1119 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, 1120 bucket20, bucket21, pkts_mask_out, entries, 1121 buckets_mask, buckets, keys, f); 1122 } 1123 1124 /* 1125 * Pipeline flush 1126 * 1127 */ 1128 /* Pipeline feed */ 1129 bucket20 = bucket10; 1130 bucket21 = bucket11; 1131 mbuf20 = mbuf10; 1132 mbuf21 = mbuf11; 1133 mbuf10 = mbuf00; 1134 mbuf11 = mbuf01; 1135 pkt20_index = pkt10_index; 1136 pkt21_index = pkt11_index; 1137 pkt10_index = pkt00_index; 1138 pkt11_index = pkt01_index; 1139 1140 /* Pipeline stage 1 */ 1141 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f); 1142 1143 /* Pipeline stage 2 */ 1144 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, 1145 bucket20, bucket21, pkts_mask_out, entries, 1146 buckets_mask, buckets, keys, f); 1147 1148 /* Pipeline feed */ 1149 bucket20 = bucket10; 1150 bucket21 = bucket11; 1151 mbuf20 = mbuf10; 1152 mbuf21 = mbuf11; 1153 pkt20_index = pkt10_index; 1154 pkt21_index = pkt11_index; 1155 1156 /* Pipeline stage 2 */ 1157 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, 1158 bucket20, bucket21, pkts_mask_out, entries, 1159 buckets_mask, buckets, keys, f); 1160 1161 grind_next_buckets: 1162 /* Grind next buckets */ 1163 for ( ; buckets_mask; ) { 1164 uint64_t buckets_mask_next = 0; 1165 1166 for ( ; buckets_mask; ) { 1167 uint64_t pkt_mask; 1168 uint32_t pkt_index; 1169 1170 pkt_index = rte_ctz64(buckets_mask); 1171 pkt_mask = 1LLU << pkt_index; 1172 buckets_mask &= ~pkt_mask; 1173 1174 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, 1175 entries, buckets_mask_next, f); 1176 } 1177 1178 buckets_mask = buckets_mask_next; 1179 } 1180 1181 *lookup_hit_mask = pkts_mask_out; 1182 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - rte_popcount64(pkts_mask_out)); 1183 return 0; 1184 } /* rte_table_hash_lookup_key32_ext() */ 1185 1186 static int 1187 rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear) 1188 { 1189 struct rte_table_hash *t = table; 1190 1191 if (stats != NULL) 1192 memcpy(stats, &t->stats, sizeof(t->stats)); 1193 1194 if (clear) 1195 memset(&t->stats, 0, sizeof(t->stats)); 1196 1197 return 0; 1198 } 1199 1200 struct rte_table_ops rte_table_hash_key32_lru_ops = { 1201 .f_create = rte_table_hash_create_key32_lru, 1202 .f_free = rte_table_hash_free_key32_lru, 1203 .f_add = rte_table_hash_entry_add_key32_lru, 1204 .f_delete = rte_table_hash_entry_delete_key32_lru, 1205 .f_add_bulk = NULL, 1206 .f_delete_bulk = NULL, 1207 .f_lookup = rte_table_hash_lookup_key32_lru, 1208 .f_stats = rte_table_hash_key32_stats_read, 1209 }; 1210 1211 struct rte_table_ops rte_table_hash_key32_ext_ops = { 1212 .f_create = rte_table_hash_create_key32_ext, 1213 .f_free = rte_table_hash_free_key32_ext, 1214 .f_add = rte_table_hash_entry_add_key32_ext, 1215 .f_delete = rte_table_hash_entry_delete_key32_ext, 1216 .f_add_bulk = NULL, 1217 .f_delete_bulk = NULL, 1218 .f_lookup = rte_table_hash_lookup_key32_ext, 1219 .f_stats = rte_table_hash_key32_stats_read, 1220 }; 1221