1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <stdio.h> 7 8 #include <rte_common.h> 9 #include <rte_malloc.h> 10 #include <rte_log.h> 11 12 #include "rte_table_hash.h" 13 14 #define KEYS_PER_BUCKET 4 15 16 struct bucket { 17 union { 18 uintptr_t next; 19 uint64_t lru_list; 20 }; 21 uint16_t sig[KEYS_PER_BUCKET]; 22 uint32_t key_pos[KEYS_PER_BUCKET]; 23 }; 24 25 #define BUCKET_NEXT(bucket) \ 26 ((void *) ((bucket)->next & (~1LU))) 27 28 #define BUCKET_NEXT_VALID(bucket) \ 29 ((bucket)->next & 1LU) 30 31 #define BUCKET_NEXT_SET(bucket, bucket_next) \ 32 do \ 33 (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\ 34 while (0) 35 36 #define BUCKET_NEXT_SET_NULL(bucket) \ 37 do \ 38 (bucket)->next = 0; \ 39 while (0) 40 41 #define BUCKET_NEXT_COPY(bucket, bucket2) \ 42 do \ 43 (bucket)->next = (bucket2)->next; \ 44 while (0) 45 46 #ifdef RTE_TABLE_STATS_COLLECT 47 48 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) \ 49 table->stats.n_pkts_in += val 50 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) \ 51 table->stats.n_pkts_lookup_miss += val 52 53 #else 54 55 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) 56 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) 57 58 #endif 59 60 struct grinder { 61 struct bucket *bkt; 62 uint64_t sig; 63 uint64_t match; 64 uint32_t key_index; 65 }; 66 67 struct rte_table_hash { 68 struct rte_table_stats stats; 69 70 /* Input parameters */ 71 uint32_t key_size; 72 uint32_t entry_size; 73 uint32_t n_keys; 74 uint32_t n_buckets; 75 uint32_t n_buckets_ext; 76 rte_table_hash_op_hash f_hash; 77 uint64_t seed; 78 uint32_t key_offset; 79 80 /* Internal */ 81 uint64_t bucket_mask; 82 uint32_t key_size_shl; 83 uint32_t data_size_shl; 84 uint32_t key_stack_tos; 85 uint32_t bkt_ext_stack_tos; 86 87 /* Grinder */ 88 struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX]; 89 90 /* Tables */ 91 uint64_t *key_mask; 92 struct bucket *buckets; 93 struct bucket *buckets_ext; 94 uint8_t *key_mem; 95 uint8_t *data_mem; 96 uint32_t *key_stack; 97 uint32_t *bkt_ext_stack; 98 99 /* Table memory */ 100 uint8_t memory[0] __rte_cache_aligned; 101 }; 102 103 static int 104 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes) 105 { 106 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask; 107 uint32_t i; 108 109 for (i = 0; i < n_bytes / sizeof(uint64_t); i++) 110 if (a64[i] != (b64[i] & b_mask64[i])) 111 return 1; 112 113 return 0; 114 } 115 116 static void 117 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes) 118 { 119 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask; 120 uint32_t i; 121 122 for (i = 0; i < n_bytes / sizeof(uint64_t); i++) 123 dst64[i] = src64[i] & src_mask64[i]; 124 } 125 126 static int 127 check_params_create(struct rte_table_hash_params *params) 128 { 129 /* name */ 130 if (params->name == NULL) { 131 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__); 132 return -EINVAL; 133 } 134 135 /* key_size */ 136 if ((params->key_size < sizeof(uint64_t)) || 137 (!rte_is_power_of_2(params->key_size))) { 138 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__); 139 return -EINVAL; 140 } 141 142 /* n_keys */ 143 if (params->n_keys == 0) { 144 RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__); 145 return -EINVAL; 146 } 147 148 /* n_buckets */ 149 if ((params->n_buckets == 0) || 150 (!rte_is_power_of_2(params->n_buckets))) { 151 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__); 152 return -EINVAL; 153 } 154 155 /* f_hash */ 156 if (params->f_hash == NULL) { 157 RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__); 158 return -EINVAL; 159 } 160 161 return 0; 162 } 163 164 static void * 165 rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size) 166 { 167 struct rte_table_hash_params *p = params; 168 struct rte_table_hash *t; 169 uint64_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz; 170 uint64_t key_stack_sz, bkt_ext_stack_sz, data_sz, total_size; 171 uint64_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset; 172 uint64_t key_stack_offset, bkt_ext_stack_offset, data_offset; 173 uint32_t n_buckets_ext, i; 174 175 /* Check input parameters */ 176 if ((check_params_create(p) != 0) || 177 (!rte_is_power_of_2(entry_size)) || 178 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) || 179 (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) 180 return NULL; 181 182 /* 183 * Table dimensioning 184 * 185 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that 186 * it is guaranteed that n_keys keys can be stored in the table at any time. 187 * 188 * The worst case scenario takes place when all the n_keys keys fall into 189 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst 190 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the 191 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall 192 * into a different bucket. This case defeats the purpose of the hash table. 193 * It indicates unsuitable f_hash or n_keys to n_buckets ratio. 194 * 195 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1 196 */ 197 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1; 198 199 /* Memory allocation */ 200 table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash)); 201 key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size); 202 bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket)); 203 bucket_ext_sz = 204 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(struct bucket)); 205 key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size); 206 key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t)); 207 bkt_ext_stack_sz = 208 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t)); 209 data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size); 210 total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz + 211 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz; 212 213 if (total_size > SIZE_MAX) { 214 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes" 215 " for hash table %s\n", 216 __func__, total_size, p->name); 217 return NULL; 218 } 219 220 t = rte_zmalloc_socket(p->name, 221 (size_t)total_size, 222 RTE_CACHE_LINE_SIZE, 223 socket_id); 224 if (t == NULL) { 225 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes" 226 " for hash table %s\n", 227 __func__, total_size, p->name); 228 return NULL; 229 } 230 RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory " 231 "footprint is %" PRIu64 " bytes\n", 232 __func__, p->key_size, p->name, total_size); 233 234 /* Memory initialization */ 235 t->key_size = p->key_size; 236 t->entry_size = entry_size; 237 t->n_keys = p->n_keys; 238 t->n_buckets = p->n_buckets; 239 t->n_buckets_ext = n_buckets_ext; 240 t->f_hash = p->f_hash; 241 t->seed = p->seed; 242 t->key_offset = p->key_offset; 243 244 /* Internal */ 245 t->bucket_mask = t->n_buckets - 1; 246 t->key_size_shl = __builtin_ctzl(p->key_size); 247 t->data_size_shl = __builtin_ctzl(entry_size); 248 249 /* Tables */ 250 key_mask_offset = 0; 251 bucket_offset = key_mask_offset + key_mask_sz; 252 bucket_ext_offset = bucket_offset + bucket_sz; 253 key_offset = bucket_ext_offset + bucket_ext_sz; 254 key_stack_offset = key_offset + key_sz; 255 bkt_ext_stack_offset = key_stack_offset + key_stack_sz; 256 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz; 257 258 t->key_mask = (uint64_t *) &t->memory[key_mask_offset]; 259 t->buckets = (struct bucket *) &t->memory[bucket_offset]; 260 t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset]; 261 t->key_mem = &t->memory[key_offset]; 262 t->key_stack = (uint32_t *) &t->memory[key_stack_offset]; 263 t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset]; 264 t->data_mem = &t->memory[data_offset]; 265 266 /* Key mask */ 267 if (p->key_mask == NULL) 268 memset(t->key_mask, 0xFF, p->key_size); 269 else 270 memcpy(t->key_mask, p->key_mask, p->key_size); 271 272 /* Key stack */ 273 for (i = 0; i < t->n_keys; i++) 274 t->key_stack[i] = t->n_keys - 1 - i; 275 t->key_stack_tos = t->n_keys; 276 277 /* Bucket ext stack */ 278 for (i = 0; i < t->n_buckets_ext; i++) 279 t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i; 280 t->bkt_ext_stack_tos = t->n_buckets_ext; 281 282 return t; 283 } 284 285 static int 286 rte_table_hash_ext_free(void *table) 287 { 288 struct rte_table_hash *t = table; 289 290 /* Check input parameters */ 291 if (t == NULL) 292 return -EINVAL; 293 294 rte_free(t); 295 return 0; 296 } 297 298 static int 299 rte_table_hash_ext_entry_add(void *table, void *key, void *entry, 300 int *key_found, void **entry_ptr) 301 { 302 struct rte_table_hash *t = table; 303 struct bucket *bkt0, *bkt, *bkt_prev; 304 uint64_t sig; 305 uint32_t bkt_index, i; 306 307 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed); 308 bkt_index = sig & t->bucket_mask; 309 bkt0 = &t->buckets[bkt_index]; 310 sig = (sig >> 16) | 1LLU; 311 312 /* Key is present in the bucket */ 313 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt)) 314 for (i = 0; i < KEYS_PER_BUCKET; i++) { 315 uint64_t bkt_sig = (uint64_t) bkt->sig[i]; 316 uint32_t bkt_key_index = bkt->key_pos[i]; 317 uint8_t *bkt_key = 318 &t->key_mem[bkt_key_index << t->key_size_shl]; 319 320 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask, 321 t->key_size) == 0)) { 322 uint8_t *data = &t->data_mem[bkt_key_index << 323 t->data_size_shl]; 324 325 memcpy(data, entry, t->entry_size); 326 *key_found = 1; 327 *entry_ptr = (void *) data; 328 return 0; 329 } 330 } 331 332 /* Key is not present in the bucket */ 333 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt, 334 bkt = BUCKET_NEXT(bkt)) 335 for (i = 0; i < KEYS_PER_BUCKET; i++) { 336 uint64_t bkt_sig = (uint64_t) bkt->sig[i]; 337 338 if (bkt_sig == 0) { 339 uint32_t bkt_key_index; 340 uint8_t *bkt_key, *data; 341 342 /* Allocate new key */ 343 if (t->key_stack_tos == 0) /* No free keys */ 344 return -ENOSPC; 345 346 bkt_key_index = t->key_stack[ 347 --t->key_stack_tos]; 348 349 /* Install new key */ 350 bkt_key = &t->key_mem[bkt_key_index << 351 t->key_size_shl]; 352 data = &t->data_mem[bkt_key_index << 353 t->data_size_shl]; 354 355 bkt->sig[i] = (uint16_t) sig; 356 bkt->key_pos[i] = bkt_key_index; 357 keycpy(bkt_key, key, t->key_mask, t->key_size); 358 memcpy(data, entry, t->entry_size); 359 360 *key_found = 0; 361 *entry_ptr = (void *) data; 362 return 0; 363 } 364 } 365 366 /* Bucket full: extend bucket */ 367 if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) { 368 uint32_t bkt_key_index; 369 uint8_t *bkt_key, *data; 370 371 /* Allocate new bucket ext */ 372 bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos]; 373 bkt = &t->buckets_ext[bkt_index]; 374 375 /* Chain the new bucket ext */ 376 BUCKET_NEXT_SET(bkt_prev, bkt); 377 BUCKET_NEXT_SET_NULL(bkt); 378 379 /* Allocate new key */ 380 bkt_key_index = t->key_stack[--t->key_stack_tos]; 381 bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl]; 382 383 data = &t->data_mem[bkt_key_index << t->data_size_shl]; 384 385 /* Install new key into bucket */ 386 bkt->sig[0] = (uint16_t) sig; 387 bkt->key_pos[0] = bkt_key_index; 388 keycpy(bkt_key, key, t->key_mask, t->key_size); 389 memcpy(data, entry, t->entry_size); 390 391 *key_found = 0; 392 *entry_ptr = (void *) data; 393 return 0; 394 } 395 396 return -ENOSPC; 397 } 398 399 static int 400 rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found, 401 void *entry) 402 { 403 struct rte_table_hash *t = table; 404 struct bucket *bkt0, *bkt, *bkt_prev; 405 uint64_t sig; 406 uint32_t bkt_index, i; 407 408 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed); 409 bkt_index = sig & t->bucket_mask; 410 bkt0 = &t->buckets[bkt_index]; 411 sig = (sig >> 16) | 1LLU; 412 413 /* Key is present in the bucket */ 414 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt, 415 bkt = BUCKET_NEXT(bkt)) 416 for (i = 0; i < KEYS_PER_BUCKET; i++) { 417 uint64_t bkt_sig = (uint64_t) bkt->sig[i]; 418 uint32_t bkt_key_index = bkt->key_pos[i]; 419 uint8_t *bkt_key = &t->key_mem[bkt_key_index << 420 t->key_size_shl]; 421 422 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask, 423 t->key_size) == 0)) { 424 uint8_t *data = &t->data_mem[bkt_key_index << 425 t->data_size_shl]; 426 427 /* Uninstall key from bucket */ 428 bkt->sig[i] = 0; 429 *key_found = 1; 430 if (entry) 431 memcpy(entry, data, t->entry_size); 432 433 /* Free key */ 434 t->key_stack[t->key_stack_tos++] = 435 bkt_key_index; 436 437 /*Check if bucket is unused */ 438 if ((bkt_prev != NULL) && 439 (bkt->sig[0] == 0) && (bkt->sig[1] == 0) && 440 (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) { 441 /* Unchain bucket */ 442 BUCKET_NEXT_COPY(bkt_prev, bkt); 443 444 /* Clear bucket */ 445 memset(bkt, 0, sizeof(struct bucket)); 446 447 /* Free bucket back to buckets ext */ 448 bkt_index = bkt - t->buckets_ext; 449 t->bkt_ext_stack[t->bkt_ext_stack_tos++] 450 = bkt_index; 451 } 452 453 return 0; 454 } 455 } 456 457 /* Key is not present in the bucket */ 458 *key_found = 0; 459 return 0; 460 } 461 462 static int rte_table_hash_ext_lookup_unoptimized( 463 void *table, 464 struct rte_mbuf **pkts, 465 uint64_t pkts_mask, 466 uint64_t *lookup_hit_mask, 467 void **entries) 468 { 469 struct rte_table_hash *t = (struct rte_table_hash *) table; 470 uint64_t pkts_mask_out = 0; 471 472 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); 473 474 for ( ; pkts_mask; ) { 475 struct bucket *bkt0, *bkt; 476 struct rte_mbuf *pkt; 477 uint8_t *key; 478 uint64_t pkt_mask, sig; 479 uint32_t pkt_index, bkt_index, i; 480 481 pkt_index = __builtin_ctzll(pkts_mask); 482 pkt_mask = 1LLU << pkt_index; 483 pkts_mask &= ~pkt_mask; 484 485 pkt = pkts[pkt_index]; 486 key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset); 487 sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed); 488 489 bkt_index = sig & t->bucket_mask; 490 bkt0 = &t->buckets[bkt_index]; 491 sig = (sig >> 16) | 1LLU; 492 493 /* Key is present in the bucket */ 494 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt)) 495 for (i = 0; i < KEYS_PER_BUCKET; i++) { 496 uint64_t bkt_sig = (uint64_t) bkt->sig[i]; 497 uint32_t bkt_key_index = bkt->key_pos[i]; 498 uint8_t *bkt_key = &t->key_mem[bkt_key_index << 499 t->key_size_shl]; 500 501 if ((sig == bkt_sig) && (keycmp(bkt_key, key, 502 t->key_mask, t->key_size) == 0)) { 503 uint8_t *data = &t->data_mem[ 504 bkt_key_index << t->data_size_shl]; 505 506 pkts_mask_out |= pkt_mask; 507 entries[pkt_index] = (void *) data; 508 break; 509 } 510 } 511 } 512 513 *lookup_hit_mask = pkts_mask_out; 514 return 0; 515 } 516 517 /*** 518 * 519 * mask = match bitmask 520 * match = at least one match 521 * match_many = more than one match 522 * match_pos = position of first match 523 * 524 *---------------------------------------- 525 * mask match match_many match_pos 526 *---------------------------------------- 527 * 0000 0 0 00 528 * 0001 1 0 00 529 * 0010 1 0 01 530 * 0011 1 1 00 531 *---------------------------------------- 532 * 0100 1 0 10 533 * 0101 1 1 00 534 * 0110 1 1 01 535 * 0111 1 1 00 536 *---------------------------------------- 537 * 1000 1 0 11 538 * 1001 1 1 00 539 * 1010 1 1 01 540 * 1011 1 1 00 541 *---------------------------------------- 542 * 1100 1 1 10 543 * 1101 1 1 00 544 * 1110 1 1 01 545 * 1111 1 1 00 546 *---------------------------------------- 547 * 548 * match = 1111_1111_1111_1110 549 * match_many = 1111_1110_1110_1000 550 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000 551 * 552 * match = 0xFFFELLU 553 * match_many = 0xFEE8LLU 554 * match_pos = 0x12131210LLU 555 * 556 ***/ 557 558 #define LUT_MATCH 0xFFFELLU 559 #define LUT_MATCH_MANY 0xFEE8LLU 560 #define LUT_MATCH_POS 0x12131210LLU 561 562 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \ 563 { \ 564 uint64_t bucket_sig[4], mask[4], mask_all; \ 565 \ 566 bucket_sig[0] = bucket->sig[0]; \ 567 bucket_sig[1] = bucket->sig[1]; \ 568 bucket_sig[2] = bucket->sig[2]; \ 569 bucket_sig[3] = bucket->sig[3]; \ 570 \ 571 bucket_sig[0] ^= mbuf_sig; \ 572 bucket_sig[1] ^= mbuf_sig; \ 573 bucket_sig[2] ^= mbuf_sig; \ 574 bucket_sig[3] ^= mbuf_sig; \ 575 \ 576 mask[0] = 0; \ 577 mask[1] = 0; \ 578 mask[2] = 0; \ 579 mask[3] = 0; \ 580 \ 581 if (bucket_sig[0] == 0) \ 582 mask[0] = 1; \ 583 if (bucket_sig[1] == 0) \ 584 mask[1] = 2; \ 585 if (bucket_sig[2] == 0) \ 586 mask[2] = 4; \ 587 if (bucket_sig[3] == 0) \ 588 mask[3] = 8; \ 589 \ 590 mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \ 591 \ 592 match = (LUT_MATCH >> mask_all) & 1; \ 593 match_many = (LUT_MATCH_MANY >> mask_all) & 1; \ 594 match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \ 595 } 596 597 #define lookup_cmp_key(mbuf, key, match_key, f) \ 598 { \ 599 uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\ 600 uint64_t *bkt_key = (uint64_t *) key; \ 601 uint64_t *key_mask = f->key_mask; \ 602 \ 603 switch (f->key_size) { \ 604 case 8: \ 605 { \ 606 uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \ 607 match_key = 0; \ 608 if (xor == 0) \ 609 match_key = 1; \ 610 } \ 611 break; \ 612 \ 613 case 16: \ 614 { \ 615 uint64_t xor[2], or; \ 616 \ 617 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \ 618 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \ 619 or = xor[0] | xor[1]; \ 620 match_key = 0; \ 621 if (or == 0) \ 622 match_key = 1; \ 623 } \ 624 break; \ 625 \ 626 case 32: \ 627 { \ 628 uint64_t xor[4], or; \ 629 \ 630 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \ 631 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \ 632 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \ 633 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \ 634 or = xor[0] | xor[1] | xor[2] | xor[3]; \ 635 match_key = 0; \ 636 if (or == 0) \ 637 match_key = 1; \ 638 } \ 639 break; \ 640 \ 641 case 64: \ 642 { \ 643 uint64_t xor[8], or; \ 644 \ 645 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \ 646 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \ 647 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \ 648 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \ 649 xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4]; \ 650 xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5]; \ 651 xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6]; \ 652 xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7]; \ 653 or = xor[0] | xor[1] | xor[2] | xor[3] | \ 654 xor[4] | xor[5] | xor[6] | xor[7]; \ 655 match_key = 0; \ 656 if (or == 0) \ 657 match_key = 1; \ 658 } \ 659 break; \ 660 \ 661 default: \ 662 match_key = 0; \ 663 if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0) \ 664 match_key = 1; \ 665 } \ 666 } 667 668 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \ 669 { \ 670 uint64_t pkt00_mask, pkt01_mask; \ 671 struct rte_mbuf *mbuf00, *mbuf01; \ 672 uint32_t key_offset = t->key_offset; \ 673 \ 674 pkt00_index = __builtin_ctzll(pkts_mask); \ 675 pkt00_mask = 1LLU << pkt00_index; \ 676 pkts_mask &= ~pkt00_mask; \ 677 mbuf00 = pkts[pkt00_index]; \ 678 \ 679 pkt01_index = __builtin_ctzll(pkts_mask); \ 680 pkt01_mask = 1LLU << pkt01_index; \ 681 pkts_mask &= ~pkt01_mask; \ 682 mbuf01 = pkts[pkt01_index]; \ 683 \ 684 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ 685 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\ 686 } 687 688 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \ 689 pkt01_index) \ 690 { \ 691 uint64_t pkt00_mask, pkt01_mask; \ 692 struct rte_mbuf *mbuf00, *mbuf01; \ 693 uint32_t key_offset = t->key_offset; \ 694 \ 695 pkt00_index = __builtin_ctzll(pkts_mask); \ 696 pkt00_mask = 1LLU << pkt00_index; \ 697 pkts_mask &= ~pkt00_mask; \ 698 mbuf00 = pkts[pkt00_index]; \ 699 \ 700 pkt01_index = __builtin_ctzll(pkts_mask); \ 701 if (pkts_mask == 0) \ 702 pkt01_index = pkt00_index; \ 703 pkt01_mask = 1LLU << pkt01_index; \ 704 pkts_mask &= ~pkt01_mask; \ 705 mbuf01 = pkts[pkt01_index]; \ 706 \ 707 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\ 708 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\ 709 } 710 711 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \ 712 { \ 713 struct grinder *g10, *g11; \ 714 uint64_t sig10, sig11, bkt10_index, bkt11_index; \ 715 struct rte_mbuf *mbuf10, *mbuf11; \ 716 struct bucket *bkt10, *bkt11, *buckets = t->buckets; \ 717 uint8_t *key10, *key11; \ 718 uint64_t bucket_mask = t->bucket_mask; \ 719 rte_table_hash_op_hash f_hash = t->f_hash; \ 720 uint64_t seed = t->seed; \ 721 uint32_t key_size = t->key_size; \ 722 uint32_t key_offset = t->key_offset; \ 723 \ 724 mbuf10 = pkts[pkt10_index]; \ 725 key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \ 726 sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed); \ 727 bkt10_index = sig10 & bucket_mask; \ 728 bkt10 = &buckets[bkt10_index]; \ 729 \ 730 mbuf11 = pkts[pkt11_index]; \ 731 key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \ 732 sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed); \ 733 bkt11_index = sig11 & bucket_mask; \ 734 bkt11 = &buckets[bkt11_index]; \ 735 \ 736 rte_prefetch0(bkt10); \ 737 rte_prefetch0(bkt11); \ 738 \ 739 g10 = &g[pkt10_index]; \ 740 g10->sig = sig10; \ 741 g10->bkt = bkt10; \ 742 \ 743 g11 = &g[pkt11_index]; \ 744 g11->sig = sig11; \ 745 g11->bkt = bkt11; \ 746 } 747 748 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\ 749 { \ 750 struct grinder *g20, *g21; \ 751 uint64_t sig20, sig21; \ 752 struct bucket *bkt20, *bkt21; \ 753 uint8_t *key20, *key21, *key_mem = t->key_mem; \ 754 uint64_t match20, match21, match_many20, match_many21; \ 755 uint64_t match_pos20, match_pos21; \ 756 uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\ 757 \ 758 g20 = &g[pkt20_index]; \ 759 sig20 = g20->sig; \ 760 bkt20 = g20->bkt; \ 761 sig20 = (sig20 >> 16) | 1LLU; \ 762 lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\ 763 match20 <<= pkt20_index; \ 764 match_many20 |= BUCKET_NEXT_VALID(bkt20); \ 765 match_many20 <<= pkt20_index; \ 766 key20_index = bkt20->key_pos[match_pos20]; \ 767 key20 = &key_mem[key20_index << key_size_shl]; \ 768 \ 769 g21 = &g[pkt21_index]; \ 770 sig21 = g21->sig; \ 771 bkt21 = g21->bkt; \ 772 sig21 = (sig21 >> 16) | 1LLU; \ 773 lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\ 774 match21 <<= pkt21_index; \ 775 match_many21 |= BUCKET_NEXT_VALID(bkt21); \ 776 match_many21 <<= pkt21_index; \ 777 key21_index = bkt21->key_pos[match_pos21]; \ 778 key21 = &key_mem[key21_index << key_size_shl]; \ 779 \ 780 rte_prefetch0(key20); \ 781 rte_prefetch0(key21); \ 782 \ 783 pkts_mask_match_many |= match_many20 | match_many21; \ 784 \ 785 g20->match = match20; \ 786 g20->key_index = key20_index; \ 787 \ 788 g21->match = match21; \ 789 g21->key_index = key21_index; \ 790 } 791 792 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \ 793 entries) \ 794 { \ 795 struct grinder *g30, *g31; \ 796 struct rte_mbuf *mbuf30, *mbuf31; \ 797 uint8_t *key30, *key31, *key_mem = t->key_mem; \ 798 uint8_t *data30, *data31, *data_mem = t->data_mem; \ 799 uint64_t match30, match31, match_key30, match_key31, match_keys;\ 800 uint32_t key30_index, key31_index; \ 801 uint32_t key_size_shl = t->key_size_shl; \ 802 uint32_t data_size_shl = t->data_size_shl; \ 803 \ 804 mbuf30 = pkts[pkt30_index]; \ 805 g30 = &g[pkt30_index]; \ 806 match30 = g30->match; \ 807 key30_index = g30->key_index; \ 808 key30 = &key_mem[key30_index << key_size_shl]; \ 809 lookup_cmp_key(mbuf30, key30, match_key30, t); \ 810 match_key30 <<= pkt30_index; \ 811 match_key30 &= match30; \ 812 data30 = &data_mem[key30_index << data_size_shl]; \ 813 entries[pkt30_index] = data30; \ 814 \ 815 mbuf31 = pkts[pkt31_index]; \ 816 g31 = &g[pkt31_index]; \ 817 match31 = g31->match; \ 818 key31_index = g31->key_index; \ 819 key31 = &key_mem[key31_index << key_size_shl]; \ 820 lookup_cmp_key(mbuf31, key31, match_key31, t); \ 821 match_key31 <<= pkt31_index; \ 822 match_key31 &= match31; \ 823 data31 = &data_mem[key31_index << data_size_shl]; \ 824 entries[pkt31_index] = data31; \ 825 \ 826 rte_prefetch0(data30); \ 827 rte_prefetch0(data31); \ 828 \ 829 match_keys = match_key30 | match_key31; \ 830 pkts_mask_out |= match_keys; \ 831 } 832 833 /*** 834 * The lookup function implements a 4-stage pipeline, with each stage processing 835 * two different packets. The purpose of pipelined implementation is to hide the 836 * latency of prefetching the data structures and loosen the data dependency 837 * between instructions. 838 * 839 * p00 _______ p10 _______ p20 _______ p30 _______ 840 *----->| |----->| |----->| |----->| |-----> 841 * | 0 | | 1 | | 2 | | 3 | 842 *----->|_______|----->|_______|----->|_______|----->|_______|-----> 843 * p01 p11 p21 p31 844 * 845 * The naming convention is: 846 * pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1 847 * 848 ***/ 849 static int rte_table_hash_ext_lookup( 850 void *table, 851 struct rte_mbuf **pkts, 852 uint64_t pkts_mask, 853 uint64_t *lookup_hit_mask, 854 void **entries) 855 { 856 struct rte_table_hash *t = (struct rte_table_hash *) table; 857 struct grinder *g = t->grinders; 858 uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index; 859 uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index; 860 uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0; 861 int status = 0; 862 863 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); 864 RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in); 865 866 /* Cannot run the pipeline with less than 7 packets */ 867 if (__builtin_popcountll(pkts_mask) < 7) { 868 status = rte_table_hash_ext_lookup_unoptimized(table, pkts, 869 pkts_mask, lookup_hit_mask, entries); 870 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - 871 __builtin_popcountll(*lookup_hit_mask)); 872 return status; 873 } 874 875 /* Pipeline stage 0 */ 876 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index); 877 878 /* Pipeline feed */ 879 pkt10_index = pkt00_index; 880 pkt11_index = pkt01_index; 881 882 /* Pipeline stage 0 */ 883 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index); 884 885 /* Pipeline stage 1 */ 886 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); 887 888 /* Pipeline feed */ 889 pkt20_index = pkt10_index; 890 pkt21_index = pkt11_index; 891 pkt10_index = pkt00_index; 892 pkt11_index = pkt01_index; 893 894 /* Pipeline stage 0 */ 895 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index); 896 897 /* Pipeline stage 1 */ 898 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); 899 900 /* Pipeline stage 2 */ 901 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); 902 903 /* 904 * Pipeline run 905 * 906 */ 907 for ( ; pkts_mask; ) { 908 /* Pipeline feed */ 909 pkt30_index = pkt20_index; 910 pkt31_index = pkt21_index; 911 pkt20_index = pkt10_index; 912 pkt21_index = pkt11_index; 913 pkt10_index = pkt00_index; 914 pkt11_index = pkt01_index; 915 916 /* Pipeline stage 0 */ 917 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, 918 pkt00_index, pkt01_index); 919 920 /* Pipeline stage 1 */ 921 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); 922 923 /* Pipeline stage 2 */ 924 lookup2_stage2(t, g, pkt20_index, pkt21_index, 925 pkts_mask_match_many); 926 927 /* Pipeline stage 3 */ 928 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, 929 pkts_mask_out, entries); 930 } 931 932 /* Pipeline feed */ 933 pkt30_index = pkt20_index; 934 pkt31_index = pkt21_index; 935 pkt20_index = pkt10_index; 936 pkt21_index = pkt11_index; 937 pkt10_index = pkt00_index; 938 pkt11_index = pkt01_index; 939 940 /* Pipeline stage 1 */ 941 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); 942 943 /* Pipeline stage 2 */ 944 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); 945 946 /* Pipeline stage 3 */ 947 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, 948 entries); 949 950 /* Pipeline feed */ 951 pkt30_index = pkt20_index; 952 pkt31_index = pkt21_index; 953 pkt20_index = pkt10_index; 954 pkt21_index = pkt11_index; 955 956 /* Pipeline stage 2 */ 957 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); 958 959 /* Pipeline stage 3 */ 960 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, 961 entries); 962 963 /* Pipeline feed */ 964 pkt30_index = pkt20_index; 965 pkt31_index = pkt21_index; 966 967 /* Pipeline stage 3 */ 968 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, 969 entries); 970 971 /* Slow path */ 972 pkts_mask_match_many &= ~pkts_mask_out; 973 if (pkts_mask_match_many) { 974 uint64_t pkts_mask_out_slow = 0; 975 976 status = rte_table_hash_ext_lookup_unoptimized(table, pkts, 977 pkts_mask_match_many, &pkts_mask_out_slow, entries); 978 pkts_mask_out |= pkts_mask_out_slow; 979 } 980 981 *lookup_hit_mask = pkts_mask_out; 982 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out)); 983 return status; 984 } 985 986 static int 987 rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear) 988 { 989 struct rte_table_hash *t = table; 990 991 if (stats != NULL) 992 memcpy(stats, &t->stats, sizeof(t->stats)); 993 994 if (clear) 995 memset(&t->stats, 0, sizeof(t->stats)); 996 997 return 0; 998 } 999 1000 struct rte_table_ops rte_table_hash_ext_ops = { 1001 .f_create = rte_table_hash_ext_create, 1002 .f_free = rte_table_hash_ext_free, 1003 .f_add = rte_table_hash_ext_entry_add, 1004 .f_delete = rte_table_hash_ext_entry_delete, 1005 .f_add_bulk = NULL, 1006 .f_delete_bulk = NULL, 1007 .f_lookup = rte_table_hash_ext_lookup, 1008 .f_stats = rte_table_hash_ext_stats_read, 1009 }; 1010