1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 * 4 * This file contain the implementations of the items 5 * related methods. Each Item have a method to prepare 6 * the item and add it into items array in given index. 7 */ 8 9 #include <stdint.h> 10 #include <rte_flow.h> 11 12 #include "items_gen.h" 13 #include "config.h" 14 15 /* Storage for additional parameters for items */ 16 struct additional_para { 17 rte_be32_t src_ip; 18 uint8_t core_idx; 19 }; 20 21 static void 22 add_ether(struct rte_flow_item *items, 23 uint8_t items_counter, 24 __rte_unused struct additional_para para) 25 { 26 static struct rte_flow_item_eth eth_spec; 27 static struct rte_flow_item_eth eth_mask; 28 29 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH; 30 items[items_counter].spec = ð_spec; 31 items[items_counter].mask = ð_mask; 32 } 33 34 static void 35 add_vlan(struct rte_flow_item *items, 36 uint8_t items_counter, 37 __rte_unused struct additional_para para) 38 { 39 static struct rte_flow_item_vlan vlan_spec = { 40 .hdr.vlan_tci = RTE_BE16(VLAN_VALUE), 41 }; 42 static struct rte_flow_item_vlan vlan_mask = { 43 .hdr.vlan_tci = RTE_BE16(0xffff), 44 }; 45 46 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN; 47 items[items_counter].spec = &vlan_spec; 48 items[items_counter].mask = &vlan_mask; 49 } 50 51 static void 52 add_ipv4(struct rte_flow_item *items, 53 uint8_t items_counter, struct additional_para para) 54 { 55 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE]; 56 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE]; 57 uint8_t ti = para.core_idx; 58 59 ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip); 60 ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff); 61 62 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4; 63 items[items_counter].spec = &ipv4_specs[ti]; 64 items[items_counter].mask = &ipv4_masks[ti]; 65 } 66 67 68 static void 69 add_ipv6(struct rte_flow_item *items, 70 uint8_t items_counter, struct additional_para para) 71 { 72 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE]; 73 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE]; 74 uint8_t ti = para.core_idx; 75 uint8_t i; 76 77 /** Set ipv6 src **/ 78 for (i = 0; i < 16; i++) { 79 /* Currently src_ip is limited to 32 bit */ 80 if (i < 4) 81 ipv6_specs[ti].hdr.src_addr.a[15 - i] = para.src_ip >> (i * 8); 82 ipv6_masks[ti].hdr.src_addr.a[15 - i] = 0xff; 83 } 84 85 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6; 86 items[items_counter].spec = &ipv6_specs[ti]; 87 items[items_counter].mask = &ipv6_masks[ti]; 88 } 89 90 static void 91 add_tcp(struct rte_flow_item *items, 92 uint8_t items_counter, 93 __rte_unused struct additional_para para) 94 { 95 static struct rte_flow_item_tcp tcp_spec; 96 static struct rte_flow_item_tcp tcp_mask; 97 98 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP; 99 items[items_counter].spec = &tcp_spec; 100 items[items_counter].mask = &tcp_mask; 101 } 102 103 static void 104 add_udp(struct rte_flow_item *items, 105 uint8_t items_counter, 106 __rte_unused struct additional_para para) 107 { 108 static struct rte_flow_item_udp udp_spec; 109 static struct rte_flow_item_udp udp_mask; 110 111 items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP; 112 items[items_counter].spec = &udp_spec; 113 items[items_counter].mask = &udp_mask; 114 } 115 116 static void 117 add_vxlan(struct rte_flow_item *items, 118 uint8_t items_counter, 119 struct additional_para para) 120 { 121 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE]; 122 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE]; 123 uint8_t ti = para.core_idx; 124 uint32_t vni_value; 125 uint8_t i; 126 127 vni_value = VNI_VALUE; 128 129 /* Set standard vxlan vni */ 130 for (i = 0; i < 3; i++) { 131 vxlan_specs[ti].hdr.vni[2 - i] = vni_value >> (i * 8); 132 vxlan_masks[ti].hdr.vni[2 - i] = 0xff; 133 } 134 135 /* Standard vxlan flags */ 136 vxlan_specs[ti].hdr.flags = 0x8; 137 138 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN; 139 items[items_counter].spec = &vxlan_specs[ti]; 140 items[items_counter].mask = &vxlan_masks[ti]; 141 } 142 143 static void 144 add_vxlan_gpe(struct rte_flow_item *items, 145 uint8_t items_counter, 146 __rte_unused struct additional_para para) 147 { 148 static alignas(RTE_CACHE_LINE_SIZE) 149 struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE]; 150 static alignas(RTE_CACHE_LINE_SIZE) 151 struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE]; 152 uint8_t ti = para.core_idx; 153 uint32_t vni_value; 154 uint8_t i; 155 156 vni_value = VNI_VALUE; 157 158 /* Set vxlan-gpe vni */ 159 for (i = 0; i < 3; i++) { 160 vxlan_gpe_specs[ti].hdr.vni[2 - i] = vni_value >> (i * 8); 161 vxlan_gpe_masks[ti].hdr.vni[2 - i] = 0xff; 162 } 163 164 /* vxlan-gpe flags */ 165 vxlan_gpe_specs[ti].hdr.flags = 0x0c; 166 167 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE; 168 items[items_counter].spec = &vxlan_gpe_specs[ti]; 169 items[items_counter].mask = &vxlan_gpe_masks[ti]; 170 } 171 172 static void 173 add_gre(struct rte_flow_item *items, 174 uint8_t items_counter, 175 __rte_unused struct additional_para para) 176 { 177 static struct rte_flow_item_gre gre_spec = { 178 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB), 179 }; 180 static struct rte_flow_item_gre gre_mask = { 181 .protocol = RTE_BE16(0xffff), 182 }; 183 184 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE; 185 items[items_counter].spec = &gre_spec; 186 items[items_counter].mask = &gre_mask; 187 } 188 189 static void 190 add_geneve(struct rte_flow_item *items, 191 uint8_t items_counter, 192 __rte_unused struct additional_para para) 193 { 194 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE]; 195 static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE]; 196 uint8_t ti = para.core_idx; 197 uint32_t vni_value; 198 uint8_t i; 199 200 vni_value = VNI_VALUE; 201 202 for (i = 0; i < 3; i++) { 203 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8); 204 geneve_masks[ti].vni[2 - i] = 0xff; 205 } 206 207 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE; 208 items[items_counter].spec = &geneve_specs[ti]; 209 items[items_counter].mask = &geneve_masks[ti]; 210 } 211 212 static void 213 add_gtp(struct rte_flow_item *items, 214 uint8_t items_counter, 215 __rte_unused struct additional_para para) 216 { 217 static struct rte_flow_item_gtp gtp_spec = { 218 .hdr.teid = RTE_BE32(TEID_VALUE), 219 }; 220 static struct rte_flow_item_gtp gtp_mask = { 221 .hdr.teid = RTE_BE32(0xffffffff), 222 }; 223 224 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP; 225 items[items_counter].spec = >p_spec; 226 items[items_counter].mask = >p_mask; 227 } 228 229 static void 230 add_meta_data(struct rte_flow_item *items, 231 uint8_t items_counter, 232 __rte_unused struct additional_para para) 233 { 234 static struct rte_flow_item_meta meta_spec = { 235 .data = RTE_BE32(META_DATA), 236 }; 237 static struct rte_flow_item_meta meta_mask = { 238 .data = RTE_BE32(0xffffffff), 239 }; 240 241 items[items_counter].type = RTE_FLOW_ITEM_TYPE_META; 242 items[items_counter].spec = &meta_spec; 243 items[items_counter].mask = &meta_mask; 244 } 245 246 247 static void 248 add_meta_tag(struct rte_flow_item *items, 249 uint8_t items_counter, 250 __rte_unused struct additional_para para) 251 { 252 static struct rte_flow_item_tag tag_spec = { 253 .data = RTE_BE32(META_DATA), 254 .index = TAG_INDEX, 255 }; 256 static struct rte_flow_item_tag tag_mask = { 257 .data = RTE_BE32(0xffffffff), 258 .index = 0xff, 259 }; 260 261 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG; 262 items[items_counter].spec = &tag_spec; 263 items[items_counter].mask = &tag_mask; 264 } 265 266 static void 267 add_icmpv4(struct rte_flow_item *items, 268 uint8_t items_counter, 269 __rte_unused struct additional_para para) 270 { 271 static struct rte_flow_item_icmp icmpv4_spec; 272 static struct rte_flow_item_icmp icmpv4_mask; 273 274 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP; 275 items[items_counter].spec = &icmpv4_spec; 276 items[items_counter].mask = &icmpv4_mask; 277 } 278 279 static void 280 add_icmpv6(struct rte_flow_item *items, 281 uint8_t items_counter, 282 __rte_unused struct additional_para para) 283 { 284 static struct rte_flow_item_icmp6 icmpv6_spec; 285 static struct rte_flow_item_icmp6 icmpv6_mask; 286 287 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6; 288 items[items_counter].spec = &icmpv6_spec; 289 items[items_counter].mask = &icmpv6_mask; 290 } 291 292 void 293 fill_items(struct rte_flow_item *items, 294 uint64_t *flow_items, uint32_t outer_ip_src, 295 uint8_t core_idx) 296 { 297 uint8_t items_counter = 0; 298 uint8_t i, j; 299 struct additional_para additional_para_data = { 300 .src_ip = outer_ip_src, 301 .core_idx = core_idx, 302 }; 303 304 /* Support outer items up to tunnel layer only. */ 305 static const struct items_dict { 306 uint64_t mask; 307 void (*funct)( 308 struct rte_flow_item *items, 309 uint8_t items_counter, 310 struct additional_para para 311 ); 312 } items_list[] = { 313 { 314 .mask = RTE_FLOW_ITEM_TYPE_META, 315 .funct = add_meta_data, 316 }, 317 { 318 .mask = RTE_FLOW_ITEM_TYPE_TAG, 319 .funct = add_meta_tag, 320 }, 321 { 322 .mask = RTE_FLOW_ITEM_TYPE_ETH, 323 .funct = add_ether, 324 }, 325 { 326 .mask = RTE_FLOW_ITEM_TYPE_VLAN, 327 .funct = add_vlan, 328 }, 329 { 330 .mask = RTE_FLOW_ITEM_TYPE_IPV4, 331 .funct = add_ipv4, 332 }, 333 { 334 .mask = RTE_FLOW_ITEM_TYPE_IPV6, 335 .funct = add_ipv6, 336 }, 337 { 338 .mask = RTE_FLOW_ITEM_TYPE_TCP, 339 .funct = add_tcp, 340 }, 341 { 342 .mask = RTE_FLOW_ITEM_TYPE_UDP, 343 .funct = add_udp, 344 }, 345 { 346 .mask = RTE_FLOW_ITEM_TYPE_VXLAN, 347 .funct = add_vxlan, 348 }, 349 { 350 .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, 351 .funct = add_vxlan_gpe, 352 }, 353 { 354 .mask = RTE_FLOW_ITEM_TYPE_GRE, 355 .funct = add_gre, 356 }, 357 { 358 .mask = RTE_FLOW_ITEM_TYPE_GENEVE, 359 .funct = add_geneve, 360 }, 361 { 362 .mask = RTE_FLOW_ITEM_TYPE_GTP, 363 .funct = add_gtp, 364 }, 365 { 366 .mask = RTE_FLOW_ITEM_TYPE_ICMP, 367 .funct = add_icmpv4, 368 }, 369 { 370 .mask = RTE_FLOW_ITEM_TYPE_ICMP6, 371 .funct = add_icmpv6, 372 }, 373 }; 374 375 for (j = 0; j < MAX_ITEMS_NUM; j++) { 376 if (flow_items[j] == 0) 377 break; 378 for (i = 0; i < RTE_DIM(items_list); i++) { 379 if ((flow_items[j] & 380 FLOW_ITEM_MASK(items_list[i].mask)) == 0) 381 continue; 382 items_list[i].funct( 383 items, items_counter++, 384 additional_para_data 385 ); 386 break; 387 } 388 } 389 390 items[items_counter].type = RTE_FLOW_ITEM_TYPE_END; 391 } 392