1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 #include "base/common.h" 6 #include "cxgbe_flow.h" 7 8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \ 9 do { \ 10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \ 11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \ 12 NULL, "Redefined match item with" \ 13 " different values found"); \ 14 (fs)->val.elem = (__v); \ 15 (fs)->mask.elem = (__m); \ 16 } while (0) 17 18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \ 19 do { \ 20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \ 21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \ 22 } while (0) 23 24 #define CXGBE_FILL_FS(v, m, elem) \ 25 __CXGBE_FILL_FS(v, m, fs, elem, e) 26 27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \ 28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem) 29 30 static int 31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e) 32 { 33 /* rte_flow specification does not allow it. */ 34 if (!i->spec && (i->mask || i->last)) 35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, 36 i, "last or mask given without spec"); 37 /* 38 * We don't support it. 39 * Although, we can support values in last as 0's or last == spec. 40 * But this will not provide user with any additional functionality 41 * and will only increase the complexity for us. 42 */ 43 if (i->last) 44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 45 i, "last is not supported by chelsio pmd"); 46 return 0; 47 } 48 49 static void 50 cxgbe_fill_filter_region(struct adapter *adap, 51 struct ch_filter_specification *fs) 52 { 53 struct tp_params *tp = &adap->params.tp; 54 u64 hash_filter_mask = tp->hash_filter_mask; 55 u64 ntuple_mask = 0; 56 57 fs->cap = 0; 58 59 if (!is_hashfilter(adap)) 60 return; 61 62 if (fs->type) { 63 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff, 64 0xff, 0xff, 0xff, 0xff, 65 0xff, 0xff, 0xff, 0xff, 66 0xff, 0xff, 0xff, 0xff}; 67 uint8_t bitoff[16] = {0}; 68 69 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) || 70 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) || 71 memcmp(fs->mask.lip, biton, sizeof(biton)) || 72 memcmp(fs->mask.fip, biton, sizeof(biton))) 73 return; 74 } else { 75 uint32_t biton = 0xffffffff; 76 uint32_t bitoff = 0x0U; 77 78 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) || 79 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) || 80 memcmp(fs->mask.lip, &biton, sizeof(biton)) || 81 memcmp(fs->mask.fip, &biton, sizeof(biton))) 82 return; 83 } 84 85 if (!fs->val.lport || fs->mask.lport != 0xffff) 86 return; 87 if (!fs->val.fport || fs->mask.fport != 0xffff) 88 return; 89 90 if (tp->protocol_shift >= 0) 91 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; 92 if (tp->ethertype_shift >= 0) 93 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; 94 if (tp->port_shift >= 0) 95 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; 96 if (tp->macmatch_shift >= 0) 97 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; 98 99 if (ntuple_mask != hash_filter_mask) 100 return; 101 102 fs->cap = 1; /* use hash region */ 103 } 104 105 static int 106 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item, 107 struct ch_filter_specification *fs, 108 struct rte_flow_error *e) 109 { 110 const struct rte_flow_item_eth *spec = item->spec; 111 const struct rte_flow_item_eth *umask = item->mask; 112 const struct rte_flow_item_eth *mask; 113 114 /* If user has not given any mask, then use chelsio supported mask. */ 115 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask; 116 117 /* we don't support SRC_MAC filtering*/ 118 if (!rte_is_zero_ether_addr(&mask->src)) 119 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 120 item, 121 "src mac filtering not supported"); 122 123 if (!rte_is_zero_ether_addr(&mask->dst)) { 124 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0]; 125 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0]; 126 struct rte_flow *flow = (struct rte_flow *)fs->private; 127 struct port_info *pi = (struct port_info *) 128 (flow->dev->data->dev_private); 129 int idx; 130 131 idx = cxgbe_mpstcam_alloc(pi, addr, m); 132 if (idx <= 0) 133 return rte_flow_error_set(e, idx, 134 RTE_FLOW_ERROR_TYPE_ITEM, 135 NULL, "unable to allocate mac" 136 " entry in h/w"); 137 CXGBE_FILL_FS(idx, 0x1ff, macidx); 138 } 139 140 CXGBE_FILL_FS(be16_to_cpu(spec->type), 141 be16_to_cpu(mask->type), ethtype); 142 return 0; 143 } 144 145 static int 146 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item, 147 struct ch_filter_specification *fs, 148 struct rte_flow_error *e) 149 { 150 const struct rte_flow_item_phy_port *val = item->spec; 151 const struct rte_flow_item_phy_port *umask = item->mask; 152 const struct rte_flow_item_phy_port *mask; 153 154 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask; 155 156 if (val->index > 0x7) 157 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, 158 item, 159 "port index upto 0x7 is supported"); 160 161 CXGBE_FILL_FS(val->index, mask->index, iport); 162 163 return 0; 164 } 165 166 static int 167 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item, 168 struct ch_filter_specification *fs, 169 struct rte_flow_error *e) 170 { 171 const struct rte_flow_item_udp *val = item->spec; 172 const struct rte_flow_item_udp *umask = item->mask; 173 const struct rte_flow_item_udp *mask; 174 175 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask; 176 177 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum) 178 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 179 item, 180 "udp: only src/dst port supported"); 181 182 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto); 183 if (!val) 184 return 0; 185 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), 186 be16_to_cpu(mask->hdr.src_port), fport); 187 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), 188 be16_to_cpu(mask->hdr.dst_port), lport); 189 return 0; 190 } 191 192 static int 193 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item, 194 struct ch_filter_specification *fs, 195 struct rte_flow_error *e) 196 { 197 const struct rte_flow_item_tcp *val = item->spec; 198 const struct rte_flow_item_tcp *umask = item->mask; 199 const struct rte_flow_item_tcp *mask; 200 201 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask; 202 203 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off || 204 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum || 205 mask->hdr.tcp_urp) 206 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 207 item, 208 "tcp: only src/dst port supported"); 209 210 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto); 211 if (!val) 212 return 0; 213 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), 214 be16_to_cpu(mask->hdr.src_port), fport); 215 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), 216 be16_to_cpu(mask->hdr.dst_port), lport); 217 return 0; 218 } 219 220 static int 221 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item, 222 struct ch_filter_specification *fs, 223 struct rte_flow_error *e) 224 { 225 const struct rte_flow_item_ipv4 *val = item->spec; 226 const struct rte_flow_item_ipv4 *umask = item->mask; 227 const struct rte_flow_item_ipv4 *mask; 228 229 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask; 230 231 if (mask->hdr.time_to_live || mask->hdr.type_of_service) 232 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 233 item, "ttl/tos are not supported"); 234 235 fs->type = FILTER_TYPE_IPV4; 236 CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv4, 0xffff, ethtype); 237 if (!val) 238 return 0; /* ipv4 wild card */ 239 240 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto); 241 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); 242 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); 243 244 return 0; 245 } 246 247 static int 248 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item, 249 struct ch_filter_specification *fs, 250 struct rte_flow_error *e) 251 { 252 const struct rte_flow_item_ipv6 *val = item->spec; 253 const struct rte_flow_item_ipv6 *umask = item->mask; 254 const struct rte_flow_item_ipv6 *mask; 255 256 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask; 257 258 if (mask->hdr.vtc_flow || 259 mask->hdr.payload_len || mask->hdr.hop_limits) 260 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, 261 item, 262 "tc/flow/hop are not supported"); 263 264 fs->type = FILTER_TYPE_IPV6; 265 CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv6, 0xffff, ethtype); 266 if (!val) 267 return 0; /* ipv6 wild card */ 268 269 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto); 270 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); 271 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); 272 273 return 0; 274 } 275 276 static int 277 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr, 278 struct rte_flow_error *e) 279 { 280 if (attr->egress) 281 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, 282 attr, "attribute:<egress> is" 283 " not supported !"); 284 if (attr->group > 0) 285 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, 286 attr, "group parameter is" 287 " not supported."); 288 289 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX; 290 291 return 0; 292 } 293 294 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq) 295 { 296 struct port_info *pi = ethdev2pinfo(dev); 297 298 if (rxq > pi->n_rx_qsets) 299 return -EINVAL; 300 return 0; 301 } 302 303 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx) 304 { 305 struct adapter *adap = ethdev2adap(f->dev); 306 struct ch_filter_specification fs = f->fs; 307 308 if (fidx >= adap->tids.nftids) { 309 dev_err(adap, "invalid flow index %d.\n", fidx); 310 return -EINVAL; 311 } 312 if (!is_filter_set(&adap->tids, fidx, fs.type)) { 313 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f); 314 return -EINVAL; 315 } 316 317 return 0; 318 } 319 320 static int 321 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs, 322 struct adapter *adap, unsigned int fidx) 323 { 324 if (is_filter_set(&adap->tids, fidx, fs->type)) { 325 dev_err(adap, "filter index: %d is busy.\n", fidx); 326 return -EBUSY; 327 } 328 if (fidx >= adap->tids.nftids) { 329 dev_err(adap, "filter index (%u) >= max(%u)\n", 330 fidx, adap->tids.nftids); 331 return -ERANGE; 332 } 333 334 return 0; 335 } 336 337 static int 338 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del) 339 { 340 if (flow->fs.cap) 341 return 0; /* Hash filters */ 342 return del ? cxgbe_validate_fidxondel(flow->f, fidx) : 343 cxgbe_validate_fidxonadd(&flow->fs, 344 ethdev2adap(flow->dev), fidx); 345 } 346 347 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx) 348 { 349 struct ch_filter_specification *fs = &flow->fs; 350 struct adapter *adap = ethdev2adap(flow->dev); 351 352 /* For tcam get the next available slot, if default value specified */ 353 if (flow->fidx == FILTER_ID_MAX) { 354 int idx; 355 356 idx = cxgbe_alloc_ftid(adap, fs->type); 357 if (idx < 0) { 358 dev_err(adap, "unable to get a filter index in tcam\n"); 359 return -ENOMEM; 360 } 361 *fidx = (unsigned int)idx; 362 } else { 363 *fidx = flow->fidx; 364 } 365 366 return 0; 367 } 368 369 static int 370 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type) 371 { 372 const struct rte_flow_item *i; 373 int j, index = -ENOENT; 374 375 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) { 376 if (i->type == type) { 377 index = j; 378 break; 379 } 380 } 381 382 return index; 383 } 384 385 static int 386 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs) 387 { 388 /* nmode: 389 * BIT_0 = [src_ip], BIT_1 = [dst_ip] 390 * BIT_2 = [src_port], BIT_3 = [dst_port] 391 * 392 * Only below cases are supported as per our spec. 393 */ 394 switch (nmode) { 395 case 0: /* 0000b */ 396 fs->nat_mode = NAT_MODE_NONE; 397 break; 398 case 2: /* 0010b */ 399 fs->nat_mode = NAT_MODE_DIP; 400 break; 401 case 5: /* 0101b */ 402 fs->nat_mode = NAT_MODE_SIP_SP; 403 break; 404 case 7: /* 0111b */ 405 fs->nat_mode = NAT_MODE_DIP_SIP_SP; 406 break; 407 case 10: /* 1010b */ 408 fs->nat_mode = NAT_MODE_DIP_DP; 409 break; 410 case 11: /* 1011b */ 411 fs->nat_mode = NAT_MODE_DIP_DP_SIP; 412 break; 413 case 14: /* 1110b */ 414 fs->nat_mode = NAT_MODE_DIP_DP_SP; 415 break; 416 case 15: /* 1111b */ 417 fs->nat_mode = NAT_MODE_ALL; 418 break; 419 default: 420 return -EINVAL; 421 } 422 423 return 0; 424 } 425 426 static int 427 ch_rte_parse_atype_switch(const struct rte_flow_action *a, 428 const struct rte_flow_item items[], 429 uint8_t *nmode, 430 struct ch_filter_specification *fs, 431 struct rte_flow_error *e) 432 { 433 const struct rte_flow_action_of_set_vlan_vid *vlanid; 434 const struct rte_flow_action_of_push_vlan *pushvlan; 435 const struct rte_flow_action_set_ipv4 *ipv4; 436 const struct rte_flow_action_set_ipv6 *ipv6; 437 const struct rte_flow_action_set_tp *tp_port; 438 const struct rte_flow_action_phy_port *port; 439 int item_index; 440 441 switch (a->type) { 442 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 443 vlanid = (const struct rte_flow_action_of_set_vlan_vid *) 444 a->conf; 445 fs->newvlan = VLAN_REWRITE; 446 fs->vlan = vlanid->vlan_vid; 447 break; 448 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 449 pushvlan = (const struct rte_flow_action_of_push_vlan *) 450 a->conf; 451 if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN) 452 return rte_flow_error_set(e, EINVAL, 453 RTE_FLOW_ERROR_TYPE_ACTION, a, 454 "only ethertype 0x8100 " 455 "supported for push vlan."); 456 fs->newvlan = VLAN_INSERT; 457 break; 458 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 459 fs->newvlan = VLAN_REMOVE; 460 break; 461 case RTE_FLOW_ACTION_TYPE_PHY_PORT: 462 port = (const struct rte_flow_action_phy_port *)a->conf; 463 fs->eport = port->index; 464 break; 465 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 466 item_index = cxgbe_get_flow_item_index(items, 467 RTE_FLOW_ITEM_TYPE_IPV4); 468 if (item_index < 0) 469 return rte_flow_error_set(e, EINVAL, 470 RTE_FLOW_ERROR_TYPE_ACTION, a, 471 "No RTE_FLOW_ITEM_TYPE_IPV4 " 472 "found."); 473 474 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; 475 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); 476 *nmode |= 1 << 0; 477 break; 478 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 479 item_index = cxgbe_get_flow_item_index(items, 480 RTE_FLOW_ITEM_TYPE_IPV4); 481 if (item_index < 0) 482 return rte_flow_error_set(e, EINVAL, 483 RTE_FLOW_ERROR_TYPE_ACTION, a, 484 "No RTE_FLOW_ITEM_TYPE_IPV4 " 485 "found."); 486 487 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; 488 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); 489 *nmode |= 1 << 1; 490 break; 491 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 492 item_index = cxgbe_get_flow_item_index(items, 493 RTE_FLOW_ITEM_TYPE_IPV6); 494 if (item_index < 0) 495 return rte_flow_error_set(e, EINVAL, 496 RTE_FLOW_ERROR_TYPE_ACTION, a, 497 "No RTE_FLOW_ITEM_TYPE_IPV6 " 498 "found."); 499 500 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; 501 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); 502 *nmode |= 1 << 0; 503 break; 504 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 505 item_index = cxgbe_get_flow_item_index(items, 506 RTE_FLOW_ITEM_TYPE_IPV6); 507 if (item_index < 0) 508 return rte_flow_error_set(e, EINVAL, 509 RTE_FLOW_ERROR_TYPE_ACTION, a, 510 "No RTE_FLOW_ITEM_TYPE_IPV6 " 511 "found."); 512 513 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; 514 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); 515 *nmode |= 1 << 1; 516 break; 517 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 518 item_index = cxgbe_get_flow_item_index(items, 519 RTE_FLOW_ITEM_TYPE_TCP); 520 if (item_index < 0) { 521 item_index = 522 cxgbe_get_flow_item_index(items, 523 RTE_FLOW_ITEM_TYPE_UDP); 524 if (item_index < 0) 525 return rte_flow_error_set(e, EINVAL, 526 RTE_FLOW_ERROR_TYPE_ACTION, a, 527 "No RTE_FLOW_ITEM_TYPE_TCP or " 528 "RTE_FLOW_ITEM_TYPE_UDP found"); 529 } 530 531 tp_port = (const struct rte_flow_action_set_tp *)a->conf; 532 fs->nat_fport = be16_to_cpu(tp_port->port); 533 *nmode |= 1 << 2; 534 break; 535 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 536 item_index = cxgbe_get_flow_item_index(items, 537 RTE_FLOW_ITEM_TYPE_TCP); 538 if (item_index < 0) { 539 item_index = 540 cxgbe_get_flow_item_index(items, 541 RTE_FLOW_ITEM_TYPE_UDP); 542 if (item_index < 0) 543 return rte_flow_error_set(e, EINVAL, 544 RTE_FLOW_ERROR_TYPE_ACTION, a, 545 "No RTE_FLOW_ITEM_TYPE_TCP or " 546 "RTE_FLOW_ITEM_TYPE_UDP found"); 547 } 548 549 tp_port = (const struct rte_flow_action_set_tp *)a->conf; 550 fs->nat_lport = be16_to_cpu(tp_port->port); 551 *nmode |= 1 << 3; 552 break; 553 case RTE_FLOW_ACTION_TYPE_MAC_SWAP: 554 item_index = cxgbe_get_flow_item_index(items, 555 RTE_FLOW_ITEM_TYPE_ETH); 556 if (item_index < 0) 557 return rte_flow_error_set(e, EINVAL, 558 RTE_FLOW_ERROR_TYPE_ACTION, a, 559 "No RTE_FLOW_ITEM_TYPE_ETH " 560 "found"); 561 fs->swapmac = 1; 562 break; 563 default: 564 /* We are not supposed to come here */ 565 return rte_flow_error_set(e, EINVAL, 566 RTE_FLOW_ERROR_TYPE_ACTION, a, 567 "Action not supported"); 568 } 569 570 return 0; 571 } 572 573 static int 574 cxgbe_rtef_parse_actions(struct rte_flow *flow, 575 const struct rte_flow_item items[], 576 const struct rte_flow_action action[], 577 struct rte_flow_error *e) 578 { 579 struct ch_filter_specification *fs = &flow->fs; 580 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0; 581 const struct rte_flow_action_queue *q; 582 const struct rte_flow_action *a; 583 char abit = 0; 584 int ret; 585 586 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { 587 switch (a->type) { 588 case RTE_FLOW_ACTION_TYPE_VOID: 589 continue; 590 case RTE_FLOW_ACTION_TYPE_DROP: 591 if (abit++) 592 return rte_flow_error_set(e, EINVAL, 593 RTE_FLOW_ERROR_TYPE_ACTION, a, 594 "specify only 1 pass/drop"); 595 fs->action = FILTER_DROP; 596 break; 597 case RTE_FLOW_ACTION_TYPE_QUEUE: 598 q = (const struct rte_flow_action_queue *)a->conf; 599 if (!q) 600 return rte_flow_error_set(e, EINVAL, 601 RTE_FLOW_ERROR_TYPE_ACTION, q, 602 "specify rx queue index"); 603 if (check_rxq(flow->dev, q->index)) 604 return rte_flow_error_set(e, EINVAL, 605 RTE_FLOW_ERROR_TYPE_ACTION, q, 606 "Invalid rx queue"); 607 if (abit++) 608 return rte_flow_error_set(e, EINVAL, 609 RTE_FLOW_ERROR_TYPE_ACTION, a, 610 "specify only 1 pass/drop"); 611 fs->action = FILTER_PASS; 612 fs->dirsteer = 1; 613 fs->iq = q->index; 614 break; 615 case RTE_FLOW_ACTION_TYPE_COUNT: 616 fs->hitcnts = 1; 617 break; 618 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 619 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 620 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 621 case RTE_FLOW_ACTION_TYPE_PHY_PORT: 622 case RTE_FLOW_ACTION_TYPE_MAC_SWAP: 623 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: 624 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: 625 nat_ipv4++; 626 goto action_switch; 627 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: 628 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: 629 nat_ipv6++; 630 goto action_switch; 631 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: 632 case RTE_FLOW_ACTION_TYPE_SET_TP_DST: 633 action_switch: 634 /* We allow multiple switch actions, but switch is 635 * not compatible with either queue or drop 636 */ 637 if (abit++ && fs->action != FILTER_SWITCH) 638 return rte_flow_error_set(e, EINVAL, 639 RTE_FLOW_ERROR_TYPE_ACTION, a, 640 "overlapping action specified"); 641 if (nat_ipv4 && nat_ipv6) 642 return rte_flow_error_set(e, EINVAL, 643 RTE_FLOW_ERROR_TYPE_ACTION, a, 644 "Can't have one address ipv4 and the" 645 " other ipv6"); 646 647 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs, 648 e); 649 if (ret) 650 return ret; 651 fs->action = FILTER_SWITCH; 652 break; 653 default: 654 /* Not supported action : return error */ 655 return rte_flow_error_set(e, ENOTSUP, 656 RTE_FLOW_ERROR_TYPE_ACTION, 657 a, "Action not supported"); 658 } 659 } 660 661 if (ch_rte_parse_nat(nmode, fs)) 662 return rte_flow_error_set(e, EINVAL, 663 RTE_FLOW_ERROR_TYPE_ACTION, a, 664 "invalid settings for swich action"); 665 return 0; 666 } 667 668 static struct chrte_fparse parseitem[] = { 669 [RTE_FLOW_ITEM_TYPE_ETH] = { 670 .fptr = ch_rte_parsetype_eth, 671 .dmask = &(const struct rte_flow_item_eth){ 672 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 673 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 674 .type = 0xffff, 675 } 676 }, 677 678 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { 679 .fptr = ch_rte_parsetype_port, 680 .dmask = &(const struct rte_flow_item_phy_port){ 681 .index = 0x7, 682 } 683 }, 684 685 [RTE_FLOW_ITEM_TYPE_IPV4] = { 686 .fptr = ch_rte_parsetype_ipv4, 687 .dmask = &rte_flow_item_ipv4_mask, 688 }, 689 690 [RTE_FLOW_ITEM_TYPE_IPV6] = { 691 .fptr = ch_rte_parsetype_ipv6, 692 .dmask = &rte_flow_item_ipv6_mask, 693 }, 694 695 [RTE_FLOW_ITEM_TYPE_UDP] = { 696 .fptr = ch_rte_parsetype_udp, 697 .dmask = &rte_flow_item_udp_mask, 698 }, 699 700 [RTE_FLOW_ITEM_TYPE_TCP] = { 701 .fptr = ch_rte_parsetype_tcp, 702 .dmask = &rte_flow_item_tcp_mask, 703 }, 704 }; 705 706 static int 707 cxgbe_rtef_parse_items(struct rte_flow *flow, 708 const struct rte_flow_item items[], 709 struct rte_flow_error *e) 710 { 711 struct adapter *adap = ethdev2adap(flow->dev); 712 const struct rte_flow_item *i; 713 char repeat[ARRAY_SIZE(parseitem)] = {0}; 714 715 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) { 716 struct chrte_fparse *idx; 717 int ret; 718 719 if (i->type >= ARRAY_SIZE(parseitem)) 720 return rte_flow_error_set(e, ENOTSUP, 721 RTE_FLOW_ERROR_TYPE_ITEM, 722 i, "Item not supported"); 723 724 switch (i->type) { 725 case RTE_FLOW_ITEM_TYPE_VOID: 726 continue; 727 default: 728 /* check if item is repeated */ 729 if (repeat[i->type]) 730 return rte_flow_error_set(e, ENOTSUP, 731 RTE_FLOW_ERROR_TYPE_ITEM, i, 732 "parse items cannot be repeated (except void)"); 733 repeat[i->type] = 1; 734 735 /* No spec found for this pattern item. Skip it */ 736 if (!i->spec) 737 break; 738 739 /* validate the item */ 740 ret = cxgbe_validate_item(i, e); 741 if (ret) 742 return ret; 743 744 idx = &flow->item_parser[i->type]; 745 if (!idx || !idx->fptr) { 746 return rte_flow_error_set(e, ENOTSUP, 747 RTE_FLOW_ERROR_TYPE_ITEM, i, 748 "Item not supported"); 749 } else { 750 ret = idx->fptr(idx->dmask, i, &flow->fs, e); 751 if (ret) 752 return ret; 753 } 754 } 755 } 756 757 cxgbe_fill_filter_region(adap, &flow->fs); 758 759 return 0; 760 } 761 762 static int 763 cxgbe_flow_parse(struct rte_flow *flow, 764 const struct rte_flow_attr *attr, 765 const struct rte_flow_item item[], 766 const struct rte_flow_action action[], 767 struct rte_flow_error *e) 768 { 769 int ret; 770 /* parse user request into ch_filter_specification */ 771 ret = cxgbe_rtef_parse_attr(flow, attr, e); 772 if (ret) 773 return ret; 774 ret = cxgbe_rtef_parse_items(flow, item, e); 775 if (ret) 776 return ret; 777 return cxgbe_rtef_parse_actions(flow, item, action, e); 778 } 779 780 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) 781 { 782 struct ch_filter_specification *fs = &flow->fs; 783 struct adapter *adap = ethdev2adap(dev); 784 struct tid_info *t = &adap->tids; 785 struct filter_ctx ctx; 786 unsigned int fidx; 787 int err; 788 789 if (cxgbe_get_fidx(flow, &fidx)) 790 return -ENOMEM; 791 if (cxgbe_verify_fidx(flow, fidx, 0)) 792 return -1; 793 794 t4_init_completion(&ctx.completion); 795 /* go create the filter */ 796 err = cxgbe_set_filter(dev, fidx, fs, &ctx); 797 if (err) { 798 dev_err(adap, "Error %d while creating filter.\n", err); 799 return err; 800 } 801 802 /* Poll the FW for reply */ 803 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, 804 CXGBE_FLOW_POLL_MS, 805 CXGBE_FLOW_POLL_CNT, 806 &ctx.completion); 807 if (err) { 808 dev_err(adap, "Filter set operation timed out (%d)\n", err); 809 return err; 810 } 811 if (ctx.result) { 812 dev_err(adap, "Hardware error %d while creating the filter.\n", 813 ctx.result); 814 return ctx.result; 815 } 816 817 if (fs->cap) { /* to destroy the filter */ 818 flow->fidx = ctx.tid; 819 flow->f = lookup_tid(t, ctx.tid); 820 } else { 821 flow->fidx = fidx; 822 flow->f = &adap->tids.ftid_tab[fidx]; 823 } 824 825 return 0; 826 } 827 828 static struct rte_flow * 829 cxgbe_flow_create(struct rte_eth_dev *dev, 830 const struct rte_flow_attr *attr, 831 const struct rte_flow_item item[], 832 const struct rte_flow_action action[], 833 struct rte_flow_error *e) 834 { 835 struct rte_flow *flow; 836 int ret; 837 838 flow = t4_os_alloc(sizeof(struct rte_flow)); 839 if (!flow) { 840 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 841 NULL, "Unable to allocate memory for" 842 " filter_entry"); 843 return NULL; 844 } 845 846 flow->item_parser = parseitem; 847 flow->dev = dev; 848 flow->fs.private = (void *)flow; 849 850 if (cxgbe_flow_parse(flow, attr, item, action, e)) { 851 t4_os_free(flow); 852 return NULL; 853 } 854 855 /* go, interact with cxgbe_filter */ 856 ret = __cxgbe_flow_create(dev, flow); 857 if (ret) { 858 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, 859 NULL, "Unable to create flow rule"); 860 t4_os_free(flow); 861 return NULL; 862 } 863 864 flow->f->private = flow; /* Will be used during flush */ 865 866 return flow; 867 } 868 869 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) 870 { 871 struct adapter *adap = ethdev2adap(dev); 872 struct filter_entry *f = flow->f; 873 struct ch_filter_specification *fs; 874 struct filter_ctx ctx; 875 int err; 876 877 fs = &f->fs; 878 if (cxgbe_verify_fidx(flow, flow->fidx, 1)) 879 return -1; 880 881 t4_init_completion(&ctx.completion); 882 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx); 883 if (err) { 884 dev_err(adap, "Error %d while deleting filter.\n", err); 885 return err; 886 } 887 888 /* Poll the FW for reply */ 889 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, 890 CXGBE_FLOW_POLL_MS, 891 CXGBE_FLOW_POLL_CNT, 892 &ctx.completion); 893 if (err) { 894 dev_err(adap, "Filter delete operation timed out (%d)\n", err); 895 return err; 896 } 897 if (ctx.result) { 898 dev_err(adap, "Hardware error %d while deleting the filter.\n", 899 ctx.result); 900 return ctx.result; 901 } 902 903 fs = &flow->fs; 904 if (fs->mask.macidx) { 905 struct port_info *pi = (struct port_info *) 906 (dev->data->dev_private); 907 int ret; 908 909 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx); 910 if (!ret) 911 return ret; 912 } 913 914 return 0; 915 } 916 917 static int 918 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, 919 struct rte_flow_error *e) 920 { 921 int ret; 922 923 ret = __cxgbe_flow_destroy(dev, flow); 924 if (ret) 925 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, 926 flow, "error destroying filter."); 927 t4_os_free(flow); 928 return 0; 929 } 930 931 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count, 932 u64 *byte_count) 933 { 934 struct adapter *adap = ethdev2adap(flow->dev); 935 struct ch_filter_specification fs = flow->f->fs; 936 unsigned int fidx = flow->fidx; 937 int ret = 0; 938 939 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0); 940 if (ret) 941 return ret; 942 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1); 943 } 944 945 static int 946 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 947 const struct rte_flow_action *action, void *data, 948 struct rte_flow_error *e) 949 { 950 struct adapter *adap = ethdev2adap(flow->dev); 951 struct ch_filter_specification fs; 952 struct rte_flow_query_count *c; 953 struct filter_entry *f; 954 int ret; 955 956 RTE_SET_USED(dev); 957 958 f = flow->f; 959 fs = f->fs; 960 961 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) 962 return rte_flow_error_set(e, ENOTSUP, 963 RTE_FLOW_ERROR_TYPE_ACTION, NULL, 964 "only count supported for query"); 965 966 /* 967 * This is a valid operation, Since we are allowed to do chelsio 968 * specific operations in rte side of our code but not vise-versa 969 * 970 * So, fs can be queried/modified here BUT rte_flow_query_count 971 * cannot be worked on by the lower layer since we want to maintain 972 * it as rte_flow agnostic. 973 */ 974 if (!fs.hitcnts) 975 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, 976 &fs, "filter hit counters were not" 977 " enabled during filter creation"); 978 979 c = (struct rte_flow_query_count *)data; 980 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes); 981 if (ret) 982 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION, 983 f, "cxgbe pmd failed to" 984 " perform query"); 985 986 /* Query was successful */ 987 c->bytes_set = 1; 988 c->hits_set = 1; 989 if (c->reset) 990 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true); 991 992 return 0; /* success / partial_success */ 993 } 994 995 static int 996 cxgbe_flow_validate(struct rte_eth_dev *dev, 997 const struct rte_flow_attr *attr, 998 const struct rte_flow_item item[], 999 const struct rte_flow_action action[], 1000 struct rte_flow_error *e) 1001 { 1002 struct adapter *adap = ethdev2adap(dev); 1003 struct rte_flow *flow; 1004 unsigned int fidx; 1005 int ret; 1006 1007 flow = t4_os_alloc(sizeof(struct rte_flow)); 1008 if (!flow) 1009 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 1010 NULL, 1011 "Unable to allocate memory for filter_entry"); 1012 1013 flow->item_parser = parseitem; 1014 flow->dev = dev; 1015 1016 ret = cxgbe_flow_parse(flow, attr, item, action, e); 1017 if (ret) { 1018 t4_os_free(flow); 1019 return ret; 1020 } 1021 1022 if (validate_filter(adap, &flow->fs)) { 1023 t4_os_free(flow); 1024 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, 1025 NULL, 1026 "validation failed. Check f/w config file."); 1027 } 1028 1029 if (cxgbe_get_fidx(flow, &fidx)) { 1030 t4_os_free(flow); 1031 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, 1032 NULL, "no memory in tcam."); 1033 } 1034 1035 if (cxgbe_verify_fidx(flow, fidx, 0)) { 1036 t4_os_free(flow); 1037 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, 1038 NULL, "validation failed"); 1039 } 1040 1041 t4_os_free(flow); 1042 return 0; 1043 } 1044 1045 /* 1046 * @ret : > 0 filter destroyed succsesfully 1047 * < 0 error destroying filter 1048 * == 1 filter not active / not found 1049 */ 1050 static int 1051 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev, 1052 struct rte_flow_error *e) 1053 { 1054 if (f && (f->valid || f->pending) && 1055 f->dev == dev && /* Only if user has asked for this port */ 1056 f->private) /* We (rte_flow) created this filter */ 1057 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private, 1058 e); 1059 return 1; 1060 } 1061 1062 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e) 1063 { 1064 struct adapter *adap = ethdev2adap(dev); 1065 unsigned int i; 1066 int ret = 0; 1067 1068 if (adap->tids.ftid_tab) { 1069 struct filter_entry *f = &adap->tids.ftid_tab[0]; 1070 1071 for (i = 0; i < adap->tids.nftids; i++, f++) { 1072 ret = cxgbe_check_n_destroy(f, dev, e); 1073 if (ret < 0) 1074 goto out; 1075 } 1076 } 1077 1078 if (is_hashfilter(adap) && adap->tids.tid_tab) { 1079 struct filter_entry *f; 1080 1081 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) { 1082 f = (struct filter_entry *)adap->tids.tid_tab[i]; 1083 1084 ret = cxgbe_check_n_destroy(f, dev, e); 1085 if (ret < 0) 1086 goto out; 1087 } 1088 } 1089 1090 out: 1091 return ret >= 0 ? 0 : ret; 1092 } 1093 1094 static const struct rte_flow_ops cxgbe_flow_ops = { 1095 .validate = cxgbe_flow_validate, 1096 .create = cxgbe_flow_create, 1097 .destroy = cxgbe_flow_destroy, 1098 .flush = cxgbe_flow_flush, 1099 .query = cxgbe_flow_query, 1100 .isolate = NULL, 1101 }; 1102 1103 int 1104 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 1105 enum rte_filter_type filter_type, 1106 enum rte_filter_op filter_op, 1107 void *arg) 1108 { 1109 int ret = 0; 1110 1111 RTE_SET_USED(dev); 1112 switch (filter_type) { 1113 case RTE_ETH_FILTER_GENERIC: 1114 if (filter_op != RTE_ETH_FILTER_GET) 1115 return -EINVAL; 1116 *(const void **)arg = &cxgbe_flow_ops; 1117 break; 1118 default: 1119 ret = -ENOTSUP; 1120 break; 1121 } 1122 return ret; 1123 } 1124