1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com> 3 * Copyright(c) 2019 Intel Corporation 4 */ 5 6 #ifndef _TRIE_H_ 7 #define _TRIE_H_ 8 9 /** 10 * @file 11 * RTE IPv6 Longest Prefix Match (LPM) 12 */ 13 14 /* @internal Total number of tbl24 entries. */ 15 #define TRIE_TBL24_NUM_ENT (1 << 24) 16 /* Maximum depth value possible for IPv6 LPM. */ 17 #define TRIE_MAX_DEPTH 128 18 /* @internal Number of entries in a tbl8 group. */ 19 #define TRIE_TBL8_GRP_NUM_ENT 256ULL 20 /* @internal Total number of tbl8 groups in the tbl8. */ 21 #define TRIE_TBL8_NUM_GROUPS 65536 22 /* @internal bitmask with valid and valid_group fields set */ 23 #define TRIE_EXT_ENT 1 24 25 #define BITMAP_SLAB_BIT_SIZE_LOG2 6 26 #define BITMAP_SLAB_BIT_SIZE (1ULL << BITMAP_SLAB_BIT_SIZE_LOG2) 27 #define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1) 28 29 struct rte_trie_tbl { 30 uint32_t number_tbl8s; /**< Total number of tbl8s */ 31 uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */ 32 uint32_t cur_tbl8s; /**< Current cumber of tbl8s */ 33 uint64_t def_nh; /**< Default next hop */ 34 enum rte_fib_trie_nh_sz nh_sz; /**< Size of nexthop entry */ 35 uint64_t *tbl8; /**< tbl8 table. */ 36 uint32_t *tbl8_pool; /**< bitmap containing free tbl8 idxes*/ 37 uint32_t tbl8_pool_pos; 38 /* tbl24 table. */ 39 __extension__ uint64_t tbl24[0] __rte_cache_aligned; 40 }; 41 42 static inline uint32_t 43 get_tbl24_idx(const uint8_t *ip) 44 { 45 return ip[0] << 16|ip[1] << 8|ip[2]; 46 } 47 48 static inline void * 49 get_tbl24_p(struct rte_trie_tbl *dp, const uint8_t *ip, uint8_t nh_sz) 50 { 51 uint32_t tbl24_idx; 52 53 tbl24_idx = get_tbl24_idx(ip); 54 return (void *)&((uint8_t *)dp->tbl24)[tbl24_idx << nh_sz]; 55 } 56 57 static inline uint8_t 58 bits_in_nh(uint8_t nh_sz) 59 { 60 return 8 * (1 << nh_sz); 61 } 62 63 static inline uint64_t 64 get_max_nh(uint8_t nh_sz) 65 { 66 return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1); 67 } 68 69 static inline uint64_t 70 lookup_msk(uint8_t nh_sz) 71 { 72 return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1; 73 } 74 75 static inline uint8_t 76 get_psd_idx(uint32_t val, uint8_t nh_sz) 77 { 78 return val & ((1 << (3 - nh_sz)) - 1); 79 } 80 81 static inline uint32_t 82 get_tbl_pos(uint32_t val, uint8_t nh_sz) 83 { 84 return val >> (3 - nh_sz); 85 } 86 87 static inline uint64_t 88 get_tbl_val_by_idx(uint64_t *tbl, uint32_t idx, uint8_t nh_sz) 89 { 90 return ((tbl[get_tbl_pos(idx, nh_sz)] >> (get_psd_idx(idx, nh_sz) * 91 bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); 92 } 93 94 static inline void * 95 get_tbl_p_by_idx(uint64_t *tbl, uint64_t idx, uint8_t nh_sz) 96 { 97 return (uint8_t *)tbl + (idx << nh_sz); 98 } 99 100 static inline int 101 is_entry_extended(uint64_t ent) 102 { 103 return (ent & TRIE_EXT_ENT) == TRIE_EXT_ENT; 104 } 105 106 #define LOOKUP_FUNC(suffix, type, nh_sz) \ 107 static inline void rte_trie_lookup_bulk_##suffix(void *p, \ 108 uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], \ 109 uint64_t *next_hops, const unsigned int n) \ 110 { \ 111 struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; \ 112 uint64_t tmp; \ 113 uint32_t i, j; \ 114 \ 115 for (i = 0; i < n; i++) { \ 116 tmp = ((type *)dp->tbl24)[get_tbl24_idx(&ips[i][0])]; \ 117 j = 3; \ 118 while (is_entry_extended(tmp)) { \ 119 tmp = ((type *)dp->tbl8)[ips[i][j++] + \ 120 ((tmp >> 1) * TRIE_TBL8_GRP_NUM_ENT)]; \ 121 } \ 122 next_hops[i] = tmp >> 1; \ 123 } \ 124 } 125 LOOKUP_FUNC(2b, uint16_t, 1) 126 LOOKUP_FUNC(4b, uint32_t, 2) 127 LOOKUP_FUNC(8b, uint64_t, 3) 128 129 void * 130 trie_create(const char *name, int socket_id, struct rte_fib6_conf *conf); 131 132 void 133 trie_free(void *p); 134 135 rte_fib6_lookup_fn_t 136 trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type); 137 138 int 139 trie_modify(struct rte_fib6 *fib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], 140 uint8_t depth, uint64_t next_hop, int op); 141 142 #endif /* _TRIE_H_ */ 143