1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2022 NXP 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 13 #include <rte_ethdev.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_flow_driver.h> 17 #include <rte_tailq.h> 18 19 #include <fsl_dpni.h> 20 #include <fsl_dpkg.h> 21 22 #include <dpaa2_ethdev.h> 23 #include <dpaa2_pmd_logs.h> 24 25 /* Workaround to discriminate the UDP/TCP/SCTP 26 * with next protocol of l3. 27 * MC/WRIOP are not able to identify 28 * the l4 protocol with l4 ports. 29 */ 30 static int mc_l4_port_identification; 31 32 static char *dpaa2_flow_control_log; 33 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */ 34 35 enum dpaa2_flow_entry_size { 36 DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2), 37 DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE 38 }; 39 40 enum dpaa2_flow_dist_type { 41 DPAA2_FLOW_QOS_TYPE = 1 << 0, 42 DPAA2_FLOW_FS_TYPE = 1 << 1 43 }; 44 45 #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT 16 46 #define DPAA2_FLOW_MAX_KEY_SIZE 16 47 48 struct dpaa2_dev_flow { 49 LIST_ENTRY(dpaa2_dev_flow) next; 50 struct dpni_rule_cfg qos_rule; 51 uint8_t *qos_key_addr; 52 uint8_t *qos_mask_addr; 53 uint16_t qos_rule_size; 54 struct dpni_rule_cfg fs_rule; 55 uint8_t qos_real_key_size; 56 uint8_t fs_real_key_size; 57 uint8_t *fs_key_addr; 58 uint8_t *fs_mask_addr; 59 uint16_t fs_rule_size; 60 uint8_t tc_id; /** Traffic Class ID. */ 61 uint8_t tc_index; /** index within this Traffic Class. */ 62 enum rte_flow_action_type action_type; 63 struct dpni_fs_action_cfg fs_action_cfg; 64 }; 65 66 static const 67 enum rte_flow_item_type dpaa2_supported_pattern_type[] = { 68 RTE_FLOW_ITEM_TYPE_END, 69 RTE_FLOW_ITEM_TYPE_ETH, 70 RTE_FLOW_ITEM_TYPE_VLAN, 71 RTE_FLOW_ITEM_TYPE_IPV4, 72 RTE_FLOW_ITEM_TYPE_IPV6, 73 RTE_FLOW_ITEM_TYPE_ICMP, 74 RTE_FLOW_ITEM_TYPE_UDP, 75 RTE_FLOW_ITEM_TYPE_TCP, 76 RTE_FLOW_ITEM_TYPE_SCTP, 77 RTE_FLOW_ITEM_TYPE_GRE, 78 }; 79 80 static const 81 enum rte_flow_action_type dpaa2_supported_action_type[] = { 82 RTE_FLOW_ACTION_TYPE_END, 83 RTE_FLOW_ACTION_TYPE_QUEUE, 84 RTE_FLOW_ACTION_TYPE_PORT_ID, 85 RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, 86 RTE_FLOW_ACTION_TYPE_RSS 87 }; 88 89 #ifndef __cplusplus 90 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = { 91 .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 92 .hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 93 .hdr.ether_type = RTE_BE16(0xffff), 94 }; 95 96 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = { 97 .hdr.vlan_tci = RTE_BE16(0xffff), 98 }; 99 100 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = { 101 .hdr.src_addr = RTE_BE32(0xffffffff), 102 .hdr.dst_addr = RTE_BE32(0xffffffff), 103 .hdr.next_proto_id = 0xff, 104 }; 105 106 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = { 107 .hdr = { 108 .src_addr = RTE_IPV6_MASK_FULL, 109 .dst_addr = RTE_IPV6_MASK_FULL, 110 .proto = 0xff 111 }, 112 }; 113 114 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = { 115 .hdr.icmp_type = 0xff, 116 .hdr.icmp_code = 0xff, 117 }; 118 119 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = { 120 .hdr = { 121 .src_port = RTE_BE16(0xffff), 122 .dst_port = RTE_BE16(0xffff), 123 }, 124 }; 125 126 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = { 127 .hdr = { 128 .src_port = RTE_BE16(0xffff), 129 .dst_port = RTE_BE16(0xffff), 130 }, 131 }; 132 133 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = { 134 .hdr = { 135 .src_port = RTE_BE16(0xffff), 136 .dst_port = RTE_BE16(0xffff), 137 }, 138 }; 139 140 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = { 141 .protocol = RTE_BE16(0xffff), 142 }; 143 #endif 144 145 #define DPAA2_FLOW_DUMP printf 146 147 static inline void 148 dpaa2_prot_field_string(uint32_t prot, uint32_t field, 149 char *string) 150 { 151 if (!dpaa2_flow_control_log) 152 return; 153 154 if (prot == NET_PROT_ETH) { 155 strcpy(string, "eth"); 156 if (field == NH_FLD_ETH_DA) 157 strcat(string, ".dst"); 158 else if (field == NH_FLD_ETH_SA) 159 strcat(string, ".src"); 160 else if (field == NH_FLD_ETH_TYPE) 161 strcat(string, ".type"); 162 else 163 strcat(string, ".unknown field"); 164 } else if (prot == NET_PROT_VLAN) { 165 strcpy(string, "vlan"); 166 if (field == NH_FLD_VLAN_TCI) 167 strcat(string, ".tci"); 168 else 169 strcat(string, ".unknown field"); 170 } else if (prot == NET_PROT_IP) { 171 strcpy(string, "ip"); 172 if (field == NH_FLD_IP_SRC) 173 strcat(string, ".src"); 174 else if (field == NH_FLD_IP_DST) 175 strcat(string, ".dst"); 176 else if (field == NH_FLD_IP_PROTO) 177 strcat(string, ".proto"); 178 else 179 strcat(string, ".unknown field"); 180 } else if (prot == NET_PROT_TCP) { 181 strcpy(string, "tcp"); 182 if (field == NH_FLD_TCP_PORT_SRC) 183 strcat(string, ".src"); 184 else if (field == NH_FLD_TCP_PORT_DST) 185 strcat(string, ".dst"); 186 else 187 strcat(string, ".unknown field"); 188 } else if (prot == NET_PROT_UDP) { 189 strcpy(string, "udp"); 190 if (field == NH_FLD_UDP_PORT_SRC) 191 strcat(string, ".src"); 192 else if (field == NH_FLD_UDP_PORT_DST) 193 strcat(string, ".dst"); 194 else 195 strcat(string, ".unknown field"); 196 } else if (prot == NET_PROT_ICMP) { 197 strcpy(string, "icmp"); 198 if (field == NH_FLD_ICMP_TYPE) 199 strcat(string, ".type"); 200 else if (field == NH_FLD_ICMP_CODE) 201 strcat(string, ".code"); 202 else 203 strcat(string, ".unknown field"); 204 } else if (prot == NET_PROT_SCTP) { 205 strcpy(string, "sctp"); 206 if (field == NH_FLD_SCTP_PORT_SRC) 207 strcat(string, ".src"); 208 else if (field == NH_FLD_SCTP_PORT_DST) 209 strcat(string, ".dst"); 210 else 211 strcat(string, ".unknown field"); 212 } else if (prot == NET_PROT_GRE) { 213 strcpy(string, "gre"); 214 if (field == NH_FLD_GRE_TYPE) 215 strcat(string, ".type"); 216 else 217 strcat(string, ".unknown field"); 218 } else { 219 strcpy(string, "unknown protocol"); 220 } 221 } 222 223 static inline void 224 dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv) 225 { 226 int idx; 227 char string[32]; 228 const struct dpkg_profile_cfg *dpkg = 229 &priv->extract.qos_key_extract.dpkg; 230 const struct dpkg_extract *extract; 231 enum dpkg_extract_type type; 232 enum net_prot prot; 233 uint32_t field; 234 235 if (!dpaa2_flow_control_log) 236 return; 237 238 DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n", 239 dpkg->num_extracts); 240 for (idx = 0; idx < dpkg->num_extracts; idx++) { 241 extract = &dpkg->extracts[idx]; 242 type = extract->type; 243 if (type == DPKG_EXTRACT_FROM_HDR) { 244 prot = extract->extract.from_hdr.prot; 245 field = extract->extract.from_hdr.field; 246 dpaa2_prot_field_string(prot, field, 247 string); 248 } else if (type == DPKG_EXTRACT_FROM_DATA) { 249 sprintf(string, "raw offset/len: %d/%d", 250 extract->extract.from_data.offset, 251 extract->extract.from_data.size); 252 } 253 DPAA2_FLOW_DUMP("%s", string); 254 if ((idx + 1) < dpkg->num_extracts) 255 DPAA2_FLOW_DUMP(" / "); 256 } 257 DPAA2_FLOW_DUMP("\r\n"); 258 } 259 260 static inline void 261 dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv, 262 int tc_id) 263 { 264 int idx; 265 char string[32]; 266 const struct dpkg_profile_cfg *dpkg = 267 &priv->extract.tc_key_extract[tc_id].dpkg; 268 const struct dpkg_extract *extract; 269 enum dpkg_extract_type type; 270 enum net_prot prot; 271 uint32_t field; 272 273 if (!dpaa2_flow_control_log) 274 return; 275 276 DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n", 277 dpkg->num_extracts, tc_id); 278 for (idx = 0; idx < dpkg->num_extracts; idx++) { 279 extract = &dpkg->extracts[idx]; 280 type = extract->type; 281 if (type == DPKG_EXTRACT_FROM_HDR) { 282 prot = extract->extract.from_hdr.prot; 283 field = extract->extract.from_hdr.field; 284 dpaa2_prot_field_string(prot, field, 285 string); 286 } else if (type == DPKG_EXTRACT_FROM_DATA) { 287 sprintf(string, "raw offset/len: %d/%d", 288 extract->extract.from_data.offset, 289 extract->extract.from_data.size); 290 } 291 DPAA2_FLOW_DUMP("%s", string); 292 if ((idx + 1) < dpkg->num_extracts) 293 DPAA2_FLOW_DUMP(" / "); 294 } 295 DPAA2_FLOW_DUMP("\r\n"); 296 } 297 298 static inline void 299 dpaa2_flow_qos_entry_log(const char *log_info, 300 const struct dpaa2_dev_flow *flow, int qos_index) 301 { 302 int idx; 303 uint8_t *key, *mask; 304 305 if (!dpaa2_flow_control_log) 306 return; 307 308 if (qos_index >= 0) { 309 DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n", 310 log_info, qos_index, flow->qos_rule_size, 311 flow->qos_rule.key_size, 312 flow->tc_id); 313 } else { 314 DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n", 315 log_info, flow->qos_rule_size, 316 flow->qos_rule.key_size, 317 flow->tc_id); 318 } 319 320 key = flow->qos_key_addr; 321 mask = flow->qos_mask_addr; 322 323 DPAA2_FLOW_DUMP("key:\r\n"); 324 for (idx = 0; idx < flow->qos_rule_size; idx++) 325 DPAA2_FLOW_DUMP("%02x ", key[idx]); 326 327 DPAA2_FLOW_DUMP("\r\nmask:\r\n"); 328 for (idx = 0; idx < flow->qos_rule_size; idx++) 329 DPAA2_FLOW_DUMP("%02x ", mask[idx]); 330 DPAA2_FLOW_DUMP("\r\n"); 331 } 332 333 static inline void 334 dpaa2_flow_fs_entry_log(const char *log_info, 335 const struct dpaa2_dev_flow *flow) 336 { 337 int idx; 338 uint8_t *key, *mask; 339 340 if (!dpaa2_flow_control_log) 341 return; 342 343 DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n", 344 log_info, flow->tc_index, 345 flow->fs_rule_size, flow->fs_rule.key_size, 346 flow->tc_id); 347 348 key = flow->fs_key_addr; 349 mask = flow->fs_mask_addr; 350 351 DPAA2_FLOW_DUMP("key:\r\n"); 352 for (idx = 0; idx < flow->fs_rule_size; idx++) 353 DPAA2_FLOW_DUMP("%02x ", key[idx]); 354 355 DPAA2_FLOW_DUMP("\r\nmask:\r\n"); 356 for (idx = 0; idx < flow->fs_rule_size; idx++) 357 DPAA2_FLOW_DUMP("%02x ", mask[idx]); 358 DPAA2_FLOW_DUMP("\r\n"); 359 } 360 361 static int 362 dpaa2_flow_ip_address_extract(enum net_prot prot, 363 uint32_t field) 364 { 365 if (prot == NET_PROT_IPV4 && 366 (field == NH_FLD_IPV4_SRC_IP || 367 field == NH_FLD_IPV4_DST_IP)) 368 return true; 369 else if (prot == NET_PROT_IPV6 && 370 (field == NH_FLD_IPV6_SRC_IP || 371 field == NH_FLD_IPV6_DST_IP)) 372 return true; 373 else if (prot == NET_PROT_IP && 374 (field == NH_FLD_IP_SRC || 375 field == NH_FLD_IP_DST)) 376 return true; 377 378 return false; 379 } 380 381 static int 382 dpaa2_flow_l4_src_port_extract(enum net_prot prot, 383 uint32_t field) 384 { 385 if (prot == NET_PROT_TCP && 386 field == NH_FLD_TCP_PORT_SRC) 387 return true; 388 else if (prot == NET_PROT_UDP && 389 field == NH_FLD_UDP_PORT_SRC) 390 return true; 391 else if (prot == NET_PROT_SCTP && 392 field == NH_FLD_SCTP_PORT_SRC) 393 return true; 394 395 return false; 396 } 397 398 static int 399 dpaa2_flow_l4_dst_port_extract(enum net_prot prot, 400 uint32_t field) 401 { 402 if (prot == NET_PROT_TCP && 403 field == NH_FLD_TCP_PORT_DST) 404 return true; 405 else if (prot == NET_PROT_UDP && 406 field == NH_FLD_UDP_PORT_DST) 407 return true; 408 else if (prot == NET_PROT_SCTP && 409 field == NH_FLD_SCTP_PORT_DST) 410 return true; 411 412 return false; 413 } 414 415 static int 416 dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv, 417 struct dpaa2_dev_flow *flow) 418 { 419 uint16_t qos_index; 420 int ret; 421 struct fsl_mc_io *dpni = priv->hw; 422 423 if (priv->num_rx_tc <= 1 && 424 flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) { 425 DPAA2_PMD_WARN("No QoS Table for FS"); 426 return -EINVAL; 427 } 428 429 /* QoS entry added is only effective for multiple TCs.*/ 430 qos_index = flow->tc_id * priv->fs_entries + flow->tc_index; 431 if (qos_index >= priv->qos_entries) { 432 DPAA2_PMD_ERR("QoS table full(%d >= %d)", 433 qos_index, priv->qos_entries); 434 return -EINVAL; 435 } 436 437 dpaa2_flow_qos_entry_log("Start add", flow, qos_index); 438 439 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, 440 priv->token, &flow->qos_rule, 441 flow->tc_id, qos_index, 442 0, 0); 443 if (ret < 0) { 444 DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed", 445 qos_index, flow->tc_id); 446 return ret; 447 } 448 449 return 0; 450 } 451 452 static int 453 dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv, 454 struct dpaa2_dev_flow *flow) 455 { 456 int ret; 457 struct fsl_mc_io *dpni = priv->hw; 458 459 if (flow->tc_index >= priv->fs_entries) { 460 DPAA2_PMD_ERR("FS table full(%d >= %d)", 461 flow->tc_index, priv->fs_entries); 462 return -EINVAL; 463 } 464 465 dpaa2_flow_fs_entry_log("Start add", flow); 466 467 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, 468 priv->token, flow->tc_id, 469 flow->tc_index, &flow->fs_rule, 470 &flow->fs_action_cfg); 471 if (ret < 0) { 472 DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed", 473 flow->tc_index, flow->tc_id); 474 return ret; 475 } 476 477 return 0; 478 } 479 480 static int 481 dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow, 482 int offset, int size, 483 enum dpaa2_flow_dist_type dist_type) 484 { 485 int end; 486 487 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 488 end = flow->qos_rule_size; 489 if (end > offset) { 490 memmove(flow->qos_key_addr + offset + size, 491 flow->qos_key_addr + offset, 492 end - offset); 493 memset(flow->qos_key_addr + offset, 494 0, size); 495 496 memmove(flow->qos_mask_addr + offset + size, 497 flow->qos_mask_addr + offset, 498 end - offset); 499 memset(flow->qos_mask_addr + offset, 500 0, size); 501 } 502 flow->qos_rule_size += size; 503 } 504 505 if (dist_type & DPAA2_FLOW_FS_TYPE) { 506 end = flow->fs_rule_size; 507 if (end > offset) { 508 memmove(flow->fs_key_addr + offset + size, 509 flow->fs_key_addr + offset, 510 end - offset); 511 memset(flow->fs_key_addr + offset, 512 0, size); 513 514 memmove(flow->fs_mask_addr + offset + size, 515 flow->fs_mask_addr + offset, 516 end - offset); 517 memset(flow->fs_mask_addr + offset, 518 0, size); 519 } 520 flow->fs_rule_size += size; 521 } 522 523 return 0; 524 } 525 526 static int 527 dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv, 528 enum dpaa2_flow_dist_type dist_type, 529 uint16_t entry_size, uint8_t tc_id) 530 { 531 struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); 532 int ret; 533 534 while (curr) { 535 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 536 if (priv->num_rx_tc > 1 || 537 curr->action_type == 538 RTE_FLOW_ACTION_TYPE_RSS) { 539 curr->qos_rule.key_size = entry_size; 540 ret = dpaa2_flow_add_qos_rule(priv, curr); 541 if (ret) 542 return ret; 543 } 544 } 545 if (dist_type & DPAA2_FLOW_FS_TYPE && 546 curr->tc_id == tc_id) { 547 curr->fs_rule.key_size = entry_size; 548 ret = dpaa2_flow_add_fs_rule(priv, curr); 549 if (ret) 550 return ret; 551 } 552 curr = LIST_NEXT(curr, next); 553 } 554 555 return 0; 556 } 557 558 static int 559 dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv, 560 int offset, int size) 561 { 562 struct dpaa2_dev_flow *curr; 563 int ret; 564 565 curr = priv->curr; 566 if (!curr) { 567 DPAA2_PMD_ERR("Current qos flow insert hole failed."); 568 return -EINVAL; 569 } else { 570 ret = dpaa2_flow_rule_insert_hole(curr, offset, size, 571 DPAA2_FLOW_QOS_TYPE); 572 if (ret) 573 return ret; 574 } 575 576 curr = LIST_FIRST(&priv->flows); 577 while (curr) { 578 ret = dpaa2_flow_rule_insert_hole(curr, offset, size, 579 DPAA2_FLOW_QOS_TYPE); 580 if (ret) 581 return ret; 582 curr = LIST_NEXT(curr, next); 583 } 584 585 return 0; 586 } 587 588 static int 589 dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv, 590 int offset, int size, int tc_id) 591 { 592 struct dpaa2_dev_flow *curr; 593 int ret; 594 595 curr = priv->curr; 596 if (!curr || curr->tc_id != tc_id) { 597 DPAA2_PMD_ERR("Current flow insert hole failed."); 598 return -EINVAL; 599 } else { 600 ret = dpaa2_flow_rule_insert_hole(curr, offset, size, 601 DPAA2_FLOW_FS_TYPE); 602 if (ret) 603 return ret; 604 } 605 606 curr = LIST_FIRST(&priv->flows); 607 608 while (curr) { 609 if (curr->tc_id != tc_id) { 610 curr = LIST_NEXT(curr, next); 611 continue; 612 } 613 ret = dpaa2_flow_rule_insert_hole(curr, offset, size, 614 DPAA2_FLOW_FS_TYPE); 615 if (ret) 616 return ret; 617 curr = LIST_NEXT(curr, next); 618 } 619 620 return 0; 621 } 622 623 /* Move IPv4/IPv6 addresses to fill new extract previous IP address. 624 * Current MC/WRIOP only support generic IP extract but IP address 625 * is not fixed, so we have to put them at end of extracts, otherwise, 626 * the extracts position following them can't be identified. 627 */ 628 static int 629 dpaa2_flow_key_profile_advance(enum net_prot prot, 630 uint32_t field, uint8_t field_size, 631 struct dpaa2_dev_priv *priv, 632 enum dpaa2_flow_dist_type dist_type, int tc_id, 633 int *insert_offset) 634 { 635 int offset, ret; 636 struct dpaa2_key_profile *key_profile; 637 int num, pos; 638 639 if (dpaa2_flow_ip_address_extract(prot, field)) { 640 DPAA2_PMD_ERR("%s only for none IP address extract", 641 __func__); 642 return -EINVAL; 643 } 644 645 if (dist_type == DPAA2_FLOW_QOS_TYPE) 646 key_profile = &priv->extract.qos_key_extract.key_profile; 647 else 648 key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; 649 650 num = key_profile->num; 651 652 if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { 653 DPAA2_PMD_ERR("Number of extracts overflows"); 654 return -EINVAL; 655 } 656 657 if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { 658 offset = key_profile->ip_addr_extract_off; 659 pos = key_profile->ip_addr_extract_pos; 660 key_profile->ip_addr_extract_pos++; 661 key_profile->ip_addr_extract_off += field_size; 662 if (dist_type == DPAA2_FLOW_QOS_TYPE) { 663 ret = dpaa2_flow_qos_rule_insert_hole(priv, 664 offset, field_size); 665 } else { 666 ret = dpaa2_flow_fs_rule_insert_hole(priv, 667 offset, field_size, tc_id); 668 } 669 if (ret) 670 return ret; 671 } else { 672 pos = num; 673 } 674 675 if (pos > 0) { 676 key_profile->key_offset[pos] = 677 key_profile->key_offset[pos - 1] + 678 key_profile->key_size[pos - 1]; 679 } else { 680 key_profile->key_offset[pos] = 0; 681 } 682 683 key_profile->key_size[pos] = field_size; 684 key_profile->prot_field[pos].prot = prot; 685 key_profile->prot_field[pos].key_field = field; 686 key_profile->num++; 687 688 if (insert_offset) 689 *insert_offset = key_profile->key_offset[pos]; 690 691 if (dpaa2_flow_l4_src_port_extract(prot, field)) { 692 key_profile->l4_src_port_present = 1; 693 key_profile->l4_src_port_pos = pos; 694 key_profile->l4_src_port_offset = 695 key_profile->key_offset[pos]; 696 } else if (dpaa2_flow_l4_dst_port_extract(prot, field)) { 697 key_profile->l4_dst_port_present = 1; 698 key_profile->l4_dst_port_pos = pos; 699 key_profile->l4_dst_port_offset = 700 key_profile->key_offset[pos]; 701 } 702 key_profile->key_max_size += field_size; 703 704 return pos; 705 } 706 707 static int 708 dpaa2_flow_extract_add_hdr(enum net_prot prot, 709 uint32_t field, uint8_t field_size, 710 struct dpaa2_dev_priv *priv, 711 enum dpaa2_flow_dist_type dist_type, int tc_id, 712 int *insert_offset) 713 { 714 int pos, i; 715 struct dpaa2_key_extract *key_extract; 716 struct dpkg_profile_cfg *dpkg; 717 struct dpkg_extract *extracts; 718 719 if (dist_type == DPAA2_FLOW_QOS_TYPE) 720 key_extract = &priv->extract.qos_key_extract; 721 else 722 key_extract = &priv->extract.tc_key_extract[tc_id]; 723 724 dpkg = &key_extract->dpkg; 725 extracts = dpkg->extracts; 726 727 if (dpaa2_flow_ip_address_extract(prot, field)) { 728 DPAA2_PMD_ERR("%s only for none IP address extract", 729 __func__); 730 return -EINVAL; 731 } 732 733 if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 734 DPAA2_PMD_ERR("Number of extracts overflows"); 735 return -EINVAL; 736 } 737 738 pos = dpaa2_flow_key_profile_advance(prot, 739 field, field_size, priv, 740 dist_type, tc_id, 741 insert_offset); 742 if (pos < 0) 743 return pos; 744 745 if (pos != dpkg->num_extracts) { 746 /* Not the last pos, must have IP address extract.*/ 747 for (i = dpkg->num_extracts - 1; i >= pos; i--) { 748 memcpy(&extracts[i + 1], 749 &extracts[i], sizeof(struct dpkg_extract)); 750 } 751 } 752 753 extracts[pos].type = DPKG_EXTRACT_FROM_HDR; 754 extracts[pos].extract.from_hdr.prot = prot; 755 extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD; 756 extracts[pos].extract.from_hdr.field = field; 757 758 dpkg->num_extracts++; 759 760 return 0; 761 } 762 763 static int 764 dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv, 765 int offset, int size, 766 enum dpaa2_flow_dist_type dist_type, int tc_id) 767 { 768 struct dpaa2_key_extract *key_extract; 769 struct dpkg_profile_cfg *dpkg; 770 struct dpaa2_key_profile *key_profile; 771 int last_extract_size, index, pos, item_size; 772 uint8_t num_extracts; 773 uint32_t field; 774 775 if (dist_type == DPAA2_FLOW_QOS_TYPE) 776 key_extract = &priv->extract.qos_key_extract; 777 else 778 key_extract = &priv->extract.tc_key_extract[tc_id]; 779 780 dpkg = &key_extract->dpkg; 781 key_profile = &key_extract->key_profile; 782 783 key_profile->raw_region.raw_start = 0; 784 key_profile->raw_region.raw_size = 0; 785 786 last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE); 787 num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE); 788 if (last_extract_size) 789 num_extracts++; 790 else 791 last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; 792 793 for (index = 0; index < num_extracts; index++) { 794 if (index == num_extracts - 1) 795 item_size = last_extract_size; 796 else 797 item_size = DPAA2_FLOW_MAX_KEY_SIZE; 798 field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; 799 field |= item_size; 800 801 pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD, 802 field, item_size, priv, dist_type, 803 tc_id, NULL); 804 if (pos < 0) 805 return pos; 806 807 dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA; 808 dpkg->extracts[pos].extract.from_data.size = item_size; 809 dpkg->extracts[pos].extract.from_data.offset = offset; 810 811 if (index == 0) { 812 key_profile->raw_extract_pos = pos; 813 key_profile->raw_extract_off = 814 key_profile->key_offset[pos]; 815 key_profile->raw_region.raw_start = offset; 816 } 817 key_profile->raw_extract_num++; 818 key_profile->raw_region.raw_size += 819 key_profile->key_size[pos]; 820 821 offset += item_size; 822 dpkg->num_extracts++; 823 } 824 825 return 0; 826 } 827 828 static int 829 dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv, 830 int offset, int size, enum dpaa2_flow_dist_type dist_type, 831 int tc_id, int *recfg) 832 { 833 struct dpaa2_key_profile *key_profile; 834 struct dpaa2_raw_region *raw_region; 835 int end = offset + size, ret = 0, extract_extended, sz_extend; 836 int start_cmp, end_cmp, new_size, index, pos, end_pos; 837 int last_extract_size, item_size, num_extracts, bk_num = 0; 838 struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS]; 839 uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS]; 840 uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS]; 841 struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS]; 842 struct dpaa2_raw_region raw_hole; 843 struct dpkg_profile_cfg *dpkg; 844 enum net_prot prot; 845 uint32_t field; 846 847 if (dist_type == DPAA2_FLOW_QOS_TYPE) { 848 key_profile = &priv->extract.qos_key_extract.key_profile; 849 dpkg = &priv->extract.qos_key_extract.dpkg; 850 } else { 851 key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; 852 dpkg = &priv->extract.tc_key_extract[tc_id].dpkg; 853 } 854 855 raw_region = &key_profile->raw_region; 856 if (!raw_region->raw_size) { 857 /* New RAW region*/ 858 ret = dpaa2_flow_extract_new_raw(priv, offset, size, 859 dist_type, tc_id); 860 if (!ret && recfg) 861 (*recfg) |= dist_type; 862 863 return ret; 864 } 865 start_cmp = raw_region->raw_start; 866 end_cmp = raw_region->raw_start + raw_region->raw_size; 867 868 if (offset >= start_cmp && end <= end_cmp) 869 return 0; 870 871 sz_extend = 0; 872 new_size = raw_region->raw_size; 873 if (offset < start_cmp) { 874 sz_extend += start_cmp - offset; 875 new_size += (start_cmp - offset); 876 } 877 if (end > end_cmp) { 878 sz_extend += end - end_cmp; 879 new_size += (end - end_cmp); 880 } 881 882 last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE); 883 num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE); 884 if (last_extract_size) 885 num_extracts++; 886 else 887 last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; 888 889 if ((key_profile->num + num_extracts - 890 key_profile->raw_extract_num) >= 891 DPKG_MAX_NUM_OF_EXTRACTS) { 892 DPAA2_PMD_ERR("%s Failed to expand raw extracts", 893 __func__); 894 return -EINVAL; 895 } 896 897 if (offset < start_cmp) { 898 raw_hole.raw_start = key_profile->raw_extract_off; 899 raw_hole.raw_size = start_cmp - offset; 900 raw_region->raw_start = offset; 901 raw_region->raw_size += start_cmp - offset; 902 903 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 904 ret = dpaa2_flow_qos_rule_insert_hole(priv, 905 raw_hole.raw_start, 906 raw_hole.raw_size); 907 if (ret) 908 return ret; 909 } 910 if (dist_type & DPAA2_FLOW_FS_TYPE) { 911 ret = dpaa2_flow_fs_rule_insert_hole(priv, 912 raw_hole.raw_start, 913 raw_hole.raw_size, tc_id); 914 if (ret) 915 return ret; 916 } 917 } 918 919 if (end > end_cmp) { 920 raw_hole.raw_start = 921 key_profile->raw_extract_off + 922 raw_region->raw_size; 923 raw_hole.raw_size = end - end_cmp; 924 raw_region->raw_size += end - end_cmp; 925 926 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 927 ret = dpaa2_flow_qos_rule_insert_hole(priv, 928 raw_hole.raw_start, 929 raw_hole.raw_size); 930 if (ret) 931 return ret; 932 } 933 if (dist_type & DPAA2_FLOW_FS_TYPE) { 934 ret = dpaa2_flow_fs_rule_insert_hole(priv, 935 raw_hole.raw_start, 936 raw_hole.raw_size, tc_id); 937 if (ret) 938 return ret; 939 } 940 } 941 942 end_pos = key_profile->raw_extract_pos + 943 key_profile->raw_extract_num; 944 if (key_profile->num > end_pos) { 945 bk_num = key_profile->num - end_pos; 946 memcpy(extract_bk, &dpkg->extracts[end_pos], 947 bk_num * sizeof(struct dpkg_extract)); 948 memcpy(key_offset_bk, &key_profile->key_offset[end_pos], 949 bk_num * sizeof(uint8_t)); 950 memcpy(key_size_bk, &key_profile->key_size[end_pos], 951 bk_num * sizeof(uint8_t)); 952 memcpy(prot_field_bk, &key_profile->prot_field[end_pos], 953 bk_num * sizeof(struct key_prot_field)); 954 955 for (index = 0; index < bk_num; index++) { 956 key_offset_bk[index] += sz_extend; 957 prot = prot_field_bk[index].prot; 958 field = prot_field_bk[index].key_field; 959 if (dpaa2_flow_l4_src_port_extract(prot, 960 field)) { 961 key_profile->l4_src_port_present = 1; 962 key_profile->l4_src_port_pos = end_pos + index; 963 key_profile->l4_src_port_offset = 964 key_offset_bk[index]; 965 } else if (dpaa2_flow_l4_dst_port_extract(prot, 966 field)) { 967 key_profile->l4_dst_port_present = 1; 968 key_profile->l4_dst_port_pos = end_pos + index; 969 key_profile->l4_dst_port_offset = 970 key_offset_bk[index]; 971 } 972 } 973 } 974 975 pos = key_profile->raw_extract_pos; 976 977 for (index = 0; index < num_extracts; index++) { 978 if (index == num_extracts - 1) 979 item_size = last_extract_size; 980 else 981 item_size = DPAA2_FLOW_MAX_KEY_SIZE; 982 field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; 983 field |= item_size; 984 985 if (pos > 0) { 986 key_profile->key_offset[pos] = 987 key_profile->key_offset[pos - 1] + 988 key_profile->key_size[pos - 1]; 989 } else { 990 key_profile->key_offset[pos] = 0; 991 } 992 key_profile->key_size[pos] = item_size; 993 key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD; 994 key_profile->prot_field[pos].key_field = field; 995 996 dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA; 997 dpkg->extracts[pos].extract.from_data.size = item_size; 998 dpkg->extracts[pos].extract.from_data.offset = offset; 999 offset += item_size; 1000 pos++; 1001 } 1002 1003 if (bk_num) { 1004 memcpy(&dpkg->extracts[pos], extract_bk, 1005 bk_num * sizeof(struct dpkg_extract)); 1006 memcpy(&key_profile->key_offset[end_pos], 1007 key_offset_bk, bk_num * sizeof(uint8_t)); 1008 memcpy(&key_profile->key_size[end_pos], 1009 key_size_bk, bk_num * sizeof(uint8_t)); 1010 memcpy(&key_profile->prot_field[end_pos], 1011 prot_field_bk, bk_num * sizeof(struct key_prot_field)); 1012 } 1013 1014 extract_extended = num_extracts - key_profile->raw_extract_num; 1015 if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { 1016 key_profile->ip_addr_extract_pos += extract_extended; 1017 key_profile->ip_addr_extract_off += sz_extend; 1018 } 1019 key_profile->raw_extract_num = num_extracts; 1020 key_profile->num += extract_extended; 1021 key_profile->key_max_size += sz_extend; 1022 1023 dpkg->num_extracts += extract_extended; 1024 if (!ret && recfg) 1025 (*recfg) |= dist_type; 1026 1027 return ret; 1028 } 1029 1030 static inline int 1031 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile, 1032 enum net_prot prot, uint32_t key_field) 1033 { 1034 int pos; 1035 struct key_prot_field *prot_field; 1036 1037 if (dpaa2_flow_ip_address_extract(prot, key_field)) { 1038 DPAA2_PMD_ERR("%s only for none IP address extract", 1039 __func__); 1040 return -EINVAL; 1041 } 1042 1043 prot_field = key_profile->prot_field; 1044 for (pos = 0; pos < key_profile->num; pos++) { 1045 if (prot_field[pos].prot == prot && 1046 prot_field[pos].key_field == key_field) { 1047 return pos; 1048 } 1049 } 1050 1051 if (dpaa2_flow_l4_src_port_extract(prot, key_field)) { 1052 if (key_profile->l4_src_port_present) 1053 return key_profile->l4_src_port_pos; 1054 } else if (dpaa2_flow_l4_dst_port_extract(prot, key_field)) { 1055 if (key_profile->l4_dst_port_present) 1056 return key_profile->l4_dst_port_pos; 1057 } 1058 1059 return -ENXIO; 1060 } 1061 1062 static inline int 1063 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile, 1064 enum net_prot prot, uint32_t key_field) 1065 { 1066 int i; 1067 1068 i = dpaa2_flow_extract_search(key_profile, prot, key_field); 1069 if (i >= 0) 1070 return key_profile->key_offset[i]; 1071 else 1072 return i; 1073 } 1074 1075 struct prev_proto_field_id { 1076 enum net_prot prot; 1077 union { 1078 rte_be16_t eth_type; 1079 uint8_t ip_proto; 1080 }; 1081 }; 1082 1083 static int 1084 dpaa2_flow_prev_proto_rule(struct dpaa2_dev_priv *priv, 1085 struct dpaa2_dev_flow *flow, 1086 const struct prev_proto_field_id *prev_proto, 1087 int group, 1088 enum dpaa2_flow_dist_type dist_type) 1089 { 1090 int offset; 1091 uint8_t *key_addr; 1092 uint8_t *mask_addr; 1093 uint32_t field = 0; 1094 rte_be16_t eth_type; 1095 uint8_t ip_proto; 1096 struct dpaa2_key_extract *key_extract; 1097 struct dpaa2_key_profile *key_profile; 1098 1099 if (prev_proto->prot == NET_PROT_ETH) { 1100 field = NH_FLD_ETH_TYPE; 1101 } else if (prev_proto->prot == NET_PROT_IP) { 1102 field = NH_FLD_IP_PROTO; 1103 } else { 1104 DPAA2_PMD_ERR("Prev proto(%d) not support!", 1105 prev_proto->prot); 1106 return -EINVAL; 1107 } 1108 1109 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 1110 key_extract = &priv->extract.qos_key_extract; 1111 key_profile = &key_extract->key_profile; 1112 1113 offset = dpaa2_flow_extract_key_offset(key_profile, 1114 prev_proto->prot, field); 1115 if (offset < 0) { 1116 DPAA2_PMD_ERR("%s QoS key extract failed", __func__); 1117 return -EINVAL; 1118 } 1119 key_addr = flow->qos_key_addr + offset; 1120 mask_addr = flow->qos_mask_addr + offset; 1121 if (prev_proto->prot == NET_PROT_ETH) { 1122 eth_type = prev_proto->eth_type; 1123 memcpy(key_addr, ð_type, sizeof(rte_be16_t)); 1124 eth_type = 0xffff; 1125 memcpy(mask_addr, ð_type, sizeof(rte_be16_t)); 1126 flow->qos_rule_size += sizeof(rte_be16_t); 1127 } else if (prev_proto->prot == NET_PROT_IP) { 1128 ip_proto = prev_proto->ip_proto; 1129 memcpy(key_addr, &ip_proto, sizeof(uint8_t)); 1130 ip_proto = 0xff; 1131 memcpy(mask_addr, &ip_proto, sizeof(uint8_t)); 1132 flow->qos_rule_size += sizeof(uint8_t); 1133 } else { 1134 DPAA2_PMD_ERR("Invalid Prev proto(%d)", 1135 prev_proto->prot); 1136 return -EINVAL; 1137 } 1138 } 1139 1140 if (dist_type & DPAA2_FLOW_FS_TYPE) { 1141 key_extract = &priv->extract.tc_key_extract[group]; 1142 key_profile = &key_extract->key_profile; 1143 1144 offset = dpaa2_flow_extract_key_offset(key_profile, 1145 prev_proto->prot, field); 1146 if (offset < 0) { 1147 DPAA2_PMD_ERR("%s TC[%d] key extract failed", 1148 __func__, group); 1149 return -EINVAL; 1150 } 1151 key_addr = flow->fs_key_addr + offset; 1152 mask_addr = flow->fs_mask_addr + offset; 1153 1154 if (prev_proto->prot == NET_PROT_ETH) { 1155 eth_type = prev_proto->eth_type; 1156 memcpy(key_addr, ð_type, sizeof(rte_be16_t)); 1157 eth_type = 0xffff; 1158 memcpy(mask_addr, ð_type, sizeof(rte_be16_t)); 1159 flow->fs_rule_size += sizeof(rte_be16_t); 1160 } else if (prev_proto->prot == NET_PROT_IP) { 1161 ip_proto = prev_proto->ip_proto; 1162 memcpy(key_addr, &ip_proto, sizeof(uint8_t)); 1163 ip_proto = 0xff; 1164 memcpy(mask_addr, &ip_proto, sizeof(uint8_t)); 1165 flow->fs_rule_size += sizeof(uint8_t); 1166 } else { 1167 DPAA2_PMD_ERR("Invalid Prev proto(%d)", 1168 prev_proto->prot); 1169 return -EINVAL; 1170 } 1171 } 1172 1173 return 0; 1174 } 1175 1176 static inline int 1177 dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow, 1178 struct dpaa2_key_profile *key_profile, 1179 enum net_prot prot, uint32_t field, int size, 1180 const void *key, const void *mask, 1181 enum dpaa2_flow_dist_type dist_type) 1182 { 1183 int offset; 1184 1185 if (dpaa2_flow_ip_address_extract(prot, field)) { 1186 DPAA2_PMD_ERR("%s only for none IP address extract", 1187 __func__); 1188 return -EINVAL; 1189 } 1190 1191 offset = dpaa2_flow_extract_key_offset(key_profile, 1192 prot, field); 1193 if (offset < 0) { 1194 DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!", 1195 prot, field); 1196 return -EINVAL; 1197 } 1198 1199 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 1200 memcpy((flow->qos_key_addr + offset), key, size); 1201 memcpy((flow->qos_mask_addr + offset), mask, size); 1202 if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) 1203 flow->qos_rule_size = offset + size; 1204 } 1205 1206 if (dist_type & DPAA2_FLOW_FS_TYPE) { 1207 memcpy((flow->fs_key_addr + offset), key, size); 1208 memcpy((flow->fs_mask_addr + offset), mask, size); 1209 if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) 1210 flow->fs_rule_size = offset + size; 1211 } 1212 1213 return 0; 1214 } 1215 1216 static inline int 1217 dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow, 1218 struct dpaa2_key_profile *key_profile, 1219 uint32_t extract_offset, int size, 1220 const void *key, const void *mask, 1221 enum dpaa2_flow_dist_type dist_type) 1222 { 1223 int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ? 1224 DPAA2_FLOW_MAX_KEY_SIZE : size; 1225 int offset, field; 1226 1227 field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; 1228 field |= extract_size; 1229 offset = dpaa2_flow_extract_key_offset(key_profile, 1230 NET_PROT_PAYLOAD, field); 1231 if (offset < 0) { 1232 DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed", 1233 extract_offset, size); 1234 return -EINVAL; 1235 } 1236 1237 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 1238 memcpy((flow->qos_key_addr + offset), key, size); 1239 memcpy((flow->qos_mask_addr + offset), mask, size); 1240 flow->qos_rule_size = offset + size; 1241 } 1242 1243 if (dist_type & DPAA2_FLOW_FS_TYPE) { 1244 memcpy((flow->fs_key_addr + offset), key, size); 1245 memcpy((flow->fs_mask_addr + offset), mask, size); 1246 flow->fs_rule_size = offset + size; 1247 } 1248 1249 return 0; 1250 } 1251 1252 static int 1253 dpaa2_flow_extract_support(const uint8_t *mask_src, 1254 enum rte_flow_item_type type) 1255 { 1256 char mask[64]; 1257 int i, size = 0; 1258 const char *mask_support = 0; 1259 1260 switch (type) { 1261 case RTE_FLOW_ITEM_TYPE_ETH: 1262 mask_support = (const char *)&dpaa2_flow_item_eth_mask; 1263 size = sizeof(struct rte_flow_item_eth); 1264 break; 1265 case RTE_FLOW_ITEM_TYPE_VLAN: 1266 mask_support = (const char *)&dpaa2_flow_item_vlan_mask; 1267 size = sizeof(struct rte_flow_item_vlan); 1268 break; 1269 case RTE_FLOW_ITEM_TYPE_IPV4: 1270 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask; 1271 size = sizeof(struct rte_flow_item_ipv4); 1272 break; 1273 case RTE_FLOW_ITEM_TYPE_IPV6: 1274 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask; 1275 size = sizeof(struct rte_flow_item_ipv6); 1276 break; 1277 case RTE_FLOW_ITEM_TYPE_ICMP: 1278 mask_support = (const char *)&dpaa2_flow_item_icmp_mask; 1279 size = sizeof(struct rte_flow_item_icmp); 1280 break; 1281 case RTE_FLOW_ITEM_TYPE_UDP: 1282 mask_support = (const char *)&dpaa2_flow_item_udp_mask; 1283 size = sizeof(struct rte_flow_item_udp); 1284 break; 1285 case RTE_FLOW_ITEM_TYPE_TCP: 1286 mask_support = (const char *)&dpaa2_flow_item_tcp_mask; 1287 size = sizeof(struct rte_flow_item_tcp); 1288 break; 1289 case RTE_FLOW_ITEM_TYPE_SCTP: 1290 mask_support = (const char *)&dpaa2_flow_item_sctp_mask; 1291 size = sizeof(struct rte_flow_item_sctp); 1292 break; 1293 case RTE_FLOW_ITEM_TYPE_GRE: 1294 mask_support = (const char *)&dpaa2_flow_item_gre_mask; 1295 size = sizeof(struct rte_flow_item_gre); 1296 break; 1297 default: 1298 return -EINVAL; 1299 } 1300 1301 memcpy(mask, mask_support, size); 1302 1303 for (i = 0; i < size; i++) 1304 mask[i] = (mask[i] | mask_src[i]); 1305 1306 if (memcmp(mask, mask_support, size)) 1307 return -1; 1308 1309 return 0; 1310 } 1311 1312 static int 1313 dpaa2_flow_identify_by_prev_prot(struct dpaa2_dev_priv *priv, 1314 struct dpaa2_dev_flow *flow, 1315 const struct prev_proto_field_id *prev_prot, 1316 enum dpaa2_flow_dist_type dist_type, 1317 int group, int *recfg) 1318 { 1319 int ret, index, local_cfg = 0, size = 0; 1320 struct dpaa2_key_extract *extract; 1321 struct dpaa2_key_profile *key_profile; 1322 enum net_prot prot = prev_prot->prot; 1323 uint32_t key_field = 0; 1324 1325 if (prot == NET_PROT_ETH) { 1326 key_field = NH_FLD_ETH_TYPE; 1327 size = sizeof(rte_be16_t); 1328 } else if (prot == NET_PROT_IP) { 1329 key_field = NH_FLD_IP_PROTO; 1330 size = sizeof(uint8_t); 1331 } else if (prot == NET_PROT_IPV4) { 1332 prot = NET_PROT_IP; 1333 key_field = NH_FLD_IP_PROTO; 1334 size = sizeof(uint8_t); 1335 } else if (prot == NET_PROT_IPV6) { 1336 prot = NET_PROT_IP; 1337 key_field = NH_FLD_IP_PROTO; 1338 size = sizeof(uint8_t); 1339 } else { 1340 DPAA2_PMD_ERR("Invalid Prev prot(%d)", prot); 1341 return -EINVAL; 1342 } 1343 1344 if (dist_type & DPAA2_FLOW_QOS_TYPE) { 1345 extract = &priv->extract.qos_key_extract; 1346 key_profile = &extract->key_profile; 1347 1348 index = dpaa2_flow_extract_search(key_profile, 1349 prot, key_field); 1350 if (index < 0) { 1351 ret = dpaa2_flow_extract_add_hdr(prot, 1352 key_field, size, priv, 1353 DPAA2_FLOW_QOS_TYPE, group, 1354 NULL); 1355 if (ret) { 1356 DPAA2_PMD_ERR("QOS prev extract add failed"); 1357 1358 return -EINVAL; 1359 } 1360 local_cfg |= DPAA2_FLOW_QOS_TYPE; 1361 } 1362 1363 ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group, 1364 DPAA2_FLOW_QOS_TYPE); 1365 if (ret) { 1366 DPAA2_PMD_ERR("QoS prev rule set failed"); 1367 return -EINVAL; 1368 } 1369 } 1370 1371 if (dist_type & DPAA2_FLOW_FS_TYPE) { 1372 extract = &priv->extract.tc_key_extract[group]; 1373 key_profile = &extract->key_profile; 1374 1375 index = dpaa2_flow_extract_search(key_profile, 1376 prot, key_field); 1377 if (index < 0) { 1378 ret = dpaa2_flow_extract_add_hdr(prot, 1379 key_field, size, priv, 1380 DPAA2_FLOW_FS_TYPE, group, 1381 NULL); 1382 if (ret) { 1383 DPAA2_PMD_ERR("FS[%d] prev extract add failed", 1384 group); 1385 1386 return -EINVAL; 1387 } 1388 local_cfg |= DPAA2_FLOW_FS_TYPE; 1389 } 1390 1391 ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group, 1392 DPAA2_FLOW_FS_TYPE); 1393 if (ret) { 1394 DPAA2_PMD_ERR("FS[%d] prev rule set failed", 1395 group); 1396 return -EINVAL; 1397 } 1398 } 1399 1400 if (recfg) 1401 *recfg = local_cfg; 1402 1403 return 0; 1404 } 1405 1406 static int 1407 dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow, 1408 enum net_prot prot, uint32_t field, 1409 const void *key, const void *mask, int size, 1410 struct dpaa2_dev_priv *priv, int tc_id, int *recfg, 1411 enum dpaa2_flow_dist_type dist_type) 1412 { 1413 int index, ret, local_cfg = 0; 1414 struct dpaa2_key_extract *key_extract; 1415 struct dpaa2_key_profile *key_profile; 1416 1417 if (dpaa2_flow_ip_address_extract(prot, field)) 1418 return -EINVAL; 1419 1420 if (dist_type == DPAA2_FLOW_QOS_TYPE) 1421 key_extract = &priv->extract.qos_key_extract; 1422 else 1423 key_extract = &priv->extract.tc_key_extract[tc_id]; 1424 1425 key_profile = &key_extract->key_profile; 1426 1427 index = dpaa2_flow_extract_search(key_profile, 1428 prot, field); 1429 if (index < 0) { 1430 ret = dpaa2_flow_extract_add_hdr(prot, 1431 field, size, priv, 1432 dist_type, tc_id, NULL); 1433 if (ret) { 1434 DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed", 1435 prot, field); 1436 1437 return ret; 1438 } 1439 local_cfg |= dist_type; 1440 } 1441 1442 ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile, 1443 prot, field, size, key, mask, dist_type); 1444 if (ret) { 1445 DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed", 1446 prot, field); 1447 1448 return ret; 1449 } 1450 1451 if (recfg) 1452 *recfg |= local_cfg; 1453 1454 return 0; 1455 } 1456 1457 static int 1458 dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow, 1459 enum net_prot prot, uint32_t field, 1460 const void *key, const void *mask, int size, 1461 struct dpaa2_dev_priv *priv, int tc_id, int *recfg, 1462 enum dpaa2_flow_dist_type dist_type) 1463 { 1464 int local_cfg = 0, num, ipaddr_extract_len = 0; 1465 struct dpaa2_key_extract *key_extract; 1466 struct dpaa2_key_profile *key_profile; 1467 struct dpkg_profile_cfg *dpkg; 1468 uint8_t *key_addr, *mask_addr; 1469 union ip_addr_extract_rule *ip_addr_data; 1470 union ip_addr_extract_rule *ip_addr_mask; 1471 enum net_prot orig_prot; 1472 uint32_t orig_field; 1473 1474 if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6) 1475 return -EINVAL; 1476 1477 if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP && 1478 field != NH_FLD_IPV4_DST_IP) { 1479 return -EINVAL; 1480 } 1481 1482 if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP && 1483 field != NH_FLD_IPV6_DST_IP) { 1484 return -EINVAL; 1485 } 1486 1487 orig_prot = prot; 1488 orig_field = field; 1489 1490 if (prot == NET_PROT_IPV4 && 1491 field == NH_FLD_IPV4_SRC_IP) { 1492 prot = NET_PROT_IP; 1493 field = NH_FLD_IP_SRC; 1494 } else if (prot == NET_PROT_IPV4 && 1495 field == NH_FLD_IPV4_DST_IP) { 1496 prot = NET_PROT_IP; 1497 field = NH_FLD_IP_DST; 1498 } else if (prot == NET_PROT_IPV6 && 1499 field == NH_FLD_IPV6_SRC_IP) { 1500 prot = NET_PROT_IP; 1501 field = NH_FLD_IP_SRC; 1502 } else if (prot == NET_PROT_IPV6 && 1503 field == NH_FLD_IPV6_DST_IP) { 1504 prot = NET_PROT_IP; 1505 field = NH_FLD_IP_DST; 1506 } else { 1507 DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address", 1508 prot, field); 1509 return -EINVAL; 1510 } 1511 1512 if (dist_type == DPAA2_FLOW_QOS_TYPE) { 1513 key_extract = &priv->extract.qos_key_extract; 1514 key_profile = &key_extract->key_profile; 1515 dpkg = &key_extract->dpkg; 1516 num = key_profile->num; 1517 key_addr = flow->qos_key_addr; 1518 mask_addr = flow->qos_mask_addr; 1519 } else { 1520 key_extract = &priv->extract.tc_key_extract[tc_id]; 1521 key_profile = &key_extract->key_profile; 1522 dpkg = &key_extract->dpkg; 1523 num = key_profile->num; 1524 key_addr = flow->fs_key_addr; 1525 mask_addr = flow->fs_mask_addr; 1526 } 1527 1528 if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { 1529 DPAA2_PMD_ERR("Number of extracts overflows"); 1530 return -EINVAL; 1531 } 1532 1533 if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) { 1534 if (field == NH_FLD_IP_SRC) 1535 key_profile->ip_addr_type = IP_SRC_EXTRACT; 1536 else 1537 key_profile->ip_addr_type = IP_DST_EXTRACT; 1538 ipaddr_extract_len = size; 1539 1540 key_profile->ip_addr_extract_pos = num; 1541 if (num > 0) { 1542 key_profile->ip_addr_extract_off = 1543 key_profile->key_offset[num - 1] + 1544 key_profile->key_size[num - 1]; 1545 } else { 1546 key_profile->ip_addr_extract_off = 0; 1547 } 1548 key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; 1549 } else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) { 1550 if (field == NH_FLD_IP_SRC) { 1551 ipaddr_extract_len = size; 1552 goto rule_configure; 1553 } 1554 key_profile->ip_addr_type = IP_SRC_DST_EXTRACT; 1555 ipaddr_extract_len = size * 2; 1556 key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; 1557 } else if (key_profile->ip_addr_type == IP_DST_EXTRACT) { 1558 if (field == NH_FLD_IP_DST) { 1559 ipaddr_extract_len = size; 1560 goto rule_configure; 1561 } 1562 key_profile->ip_addr_type = IP_DST_SRC_EXTRACT; 1563 ipaddr_extract_len = size * 2; 1564 key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; 1565 } 1566 key_profile->num++; 1567 1568 dpkg->extracts[num].extract.from_hdr.prot = prot; 1569 dpkg->extracts[num].extract.from_hdr.field = field; 1570 dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD; 1571 dpkg->num_extracts++; 1572 1573 if (dist_type == DPAA2_FLOW_QOS_TYPE) 1574 local_cfg = DPAA2_FLOW_QOS_TYPE; 1575 else 1576 local_cfg = DPAA2_FLOW_FS_TYPE; 1577 1578 rule_configure: 1579 key_addr += key_profile->ip_addr_extract_off; 1580 ip_addr_data = (union ip_addr_extract_rule *)key_addr; 1581 mask_addr += key_profile->ip_addr_extract_off; 1582 ip_addr_mask = (union ip_addr_extract_rule *)mask_addr; 1583 1584 if (orig_prot == NET_PROT_IPV4 && 1585 orig_field == NH_FLD_IPV4_SRC_IP) { 1586 if (key_profile->ip_addr_type == IP_SRC_EXTRACT || 1587 key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) { 1588 memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src, 1589 key, size); 1590 memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src, 1591 mask, size); 1592 } else { 1593 memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src, 1594 key, size); 1595 memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src, 1596 mask, size); 1597 } 1598 } else if (orig_prot == NET_PROT_IPV4 && 1599 orig_field == NH_FLD_IPV4_DST_IP) { 1600 if (key_profile->ip_addr_type == IP_DST_EXTRACT || 1601 key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) { 1602 memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst, 1603 key, size); 1604 memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst, 1605 mask, size); 1606 } else { 1607 memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst, 1608 key, size); 1609 memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst, 1610 mask, size); 1611 } 1612 } else if (orig_prot == NET_PROT_IPV6 && 1613 orig_field == NH_FLD_IPV6_SRC_IP) { 1614 if (key_profile->ip_addr_type == IP_SRC_EXTRACT || 1615 key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) { 1616 memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src, 1617 key, size); 1618 memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src, 1619 mask, size); 1620 } else { 1621 memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src, 1622 key, size); 1623 memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src, 1624 mask, size); 1625 } 1626 } else if (orig_prot == NET_PROT_IPV6 && 1627 orig_field == NH_FLD_IPV6_DST_IP) { 1628 if (key_profile->ip_addr_type == IP_DST_EXTRACT || 1629 key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) { 1630 memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst, 1631 key, size); 1632 memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst, 1633 mask, size); 1634 } else { 1635 memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst, 1636 key, size); 1637 memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst, 1638 mask, size); 1639 } 1640 } 1641 1642 if (dist_type == DPAA2_FLOW_QOS_TYPE) { 1643 flow->qos_rule_size = 1644 key_profile->ip_addr_extract_off + ipaddr_extract_len; 1645 } else { 1646 flow->fs_rule_size = 1647 key_profile->ip_addr_extract_off + ipaddr_extract_len; 1648 } 1649 1650 if (recfg) 1651 *recfg |= local_cfg; 1652 1653 return 0; 1654 } 1655 1656 static int 1657 dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, 1658 struct rte_eth_dev *dev, 1659 const struct rte_flow_attr *attr, 1660 const struct rte_flow_item *pattern, 1661 const struct rte_flow_action actions[] __rte_unused, 1662 struct rte_flow_error *error __rte_unused, 1663 int *device_configured) 1664 { 1665 int ret, local_cfg = 0; 1666 uint32_t group; 1667 const struct rte_flow_item_eth *spec, *mask; 1668 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1669 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; 1670 1671 group = attr->group; 1672 1673 /* Parse pattern list to get the matching parameters */ 1674 spec = pattern->spec; 1675 mask = pattern->mask ? 1676 pattern->mask : &dpaa2_flow_item_eth_mask; 1677 if (!spec) { 1678 DPAA2_PMD_WARN("No pattern spec for Eth flow"); 1679 return -EINVAL; 1680 } 1681 1682 /* Get traffic class index and flow id to be configured */ 1683 flow->tc_id = group; 1684 flow->tc_index = attr->priority; 1685 1686 if (dpaa2_flow_extract_support((const uint8_t *)mask, 1687 RTE_FLOW_ITEM_TYPE_ETH)) { 1688 DPAA2_PMD_WARN("Extract field(s) of ethernet failed"); 1689 1690 return -EINVAL; 1691 } 1692 1693 if (memcmp((const char *)&mask->src, 1694 zero_cmp, RTE_ETHER_ADDR_LEN)) { 1695 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1696 NH_FLD_ETH_SA, &spec->src.addr_bytes, 1697 &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN, 1698 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 1699 if (ret) 1700 return ret; 1701 1702 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1703 NH_FLD_ETH_SA, &spec->src.addr_bytes, 1704 &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN, 1705 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 1706 if (ret) 1707 return ret; 1708 } 1709 1710 if (memcmp((const char *)&mask->dst, 1711 zero_cmp, RTE_ETHER_ADDR_LEN)) { 1712 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1713 NH_FLD_ETH_DA, &spec->dst.addr_bytes, 1714 &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN, 1715 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 1716 if (ret) 1717 return ret; 1718 1719 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1720 NH_FLD_ETH_DA, &spec->dst.addr_bytes, 1721 &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN, 1722 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 1723 if (ret) 1724 return ret; 1725 } 1726 1727 if (memcmp((const char *)&mask->type, 1728 zero_cmp, sizeof(rte_be16_t))) { 1729 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1730 NH_FLD_ETH_TYPE, &spec->type, 1731 &mask->type, sizeof(rte_be16_t), 1732 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 1733 if (ret) 1734 return ret; 1735 1736 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, 1737 NH_FLD_ETH_TYPE, &spec->type, 1738 &mask->type, sizeof(rte_be16_t), 1739 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 1740 if (ret) 1741 return ret; 1742 } 1743 1744 (*device_configured) |= local_cfg; 1745 1746 return 0; 1747 } 1748 1749 static int 1750 dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, 1751 struct rte_eth_dev *dev, 1752 const struct rte_flow_attr *attr, 1753 const struct rte_flow_item *pattern, 1754 const struct rte_flow_action actions[] __rte_unused, 1755 struct rte_flow_error *error __rte_unused, 1756 int *device_configured) 1757 { 1758 int ret, local_cfg = 0; 1759 uint32_t group; 1760 const struct rte_flow_item_vlan *spec, *mask; 1761 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1762 1763 group = attr->group; 1764 1765 /* Parse pattern list to get the matching parameters */ 1766 spec = pattern->spec; 1767 mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask; 1768 1769 /* Get traffic class index and flow id to be configured */ 1770 flow->tc_id = group; 1771 flow->tc_index = attr->priority; 1772 1773 if (!spec) { 1774 struct prev_proto_field_id prev_proto; 1775 1776 prev_proto.prot = NET_PROT_ETH; 1777 prev_proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); 1778 ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_proto, 1779 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 1780 group, &local_cfg); 1781 if (ret) 1782 return ret; 1783 (*device_configured) |= local_cfg; 1784 return 0; 1785 } 1786 1787 if (dpaa2_flow_extract_support((const uint8_t *)mask, 1788 RTE_FLOW_ITEM_TYPE_VLAN)) { 1789 DPAA2_PMD_WARN("Extract field(s) of vlan not support."); 1790 return -EINVAL; 1791 } 1792 1793 if (!mask->tci) 1794 return 0; 1795 1796 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN, 1797 NH_FLD_VLAN_TCI, &spec->tci, 1798 &mask->tci, sizeof(rte_be16_t), 1799 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 1800 if (ret) 1801 return ret; 1802 1803 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN, 1804 NH_FLD_VLAN_TCI, &spec->tci, 1805 &mask->tci, sizeof(rte_be16_t), 1806 priv, group, &local_cfg, 1807 DPAA2_FLOW_FS_TYPE); 1808 if (ret) 1809 return ret; 1810 1811 (*device_configured) |= local_cfg; 1812 return 0; 1813 } 1814 1815 static int 1816 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, 1817 const struct rte_flow_attr *attr, 1818 const struct rte_flow_item *pattern, 1819 const struct rte_flow_action actions[] __rte_unused, 1820 struct rte_flow_error *error __rte_unused, 1821 int *device_configured) 1822 { 1823 int ret, local_cfg = 0; 1824 uint32_t group; 1825 const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0; 1826 const void *key, *mask; 1827 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1828 int size; 1829 struct prev_proto_field_id prev_prot; 1830 1831 group = attr->group; 1832 1833 /* Parse pattern list to get the matching parameters */ 1834 spec_ipv4 = pattern->spec; 1835 mask_ipv4 = pattern->mask ? 1836 pattern->mask : &dpaa2_flow_item_ipv4_mask; 1837 1838 /* Get traffic class index and flow id to be configured */ 1839 flow->tc_id = group; 1840 flow->tc_index = attr->priority; 1841 1842 prev_prot.prot = NET_PROT_ETH; 1843 prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 1844 1845 ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot, 1846 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, group, 1847 &local_cfg); 1848 if (ret) { 1849 DPAA2_PMD_ERR("IPv4 identification failed!"); 1850 return ret; 1851 } 1852 1853 if (!spec_ipv4) 1854 return 0; 1855 1856 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4, 1857 RTE_FLOW_ITEM_TYPE_IPV4)) { 1858 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support."); 1859 return -EINVAL; 1860 } 1861 1862 if (mask_ipv4->hdr.src_addr) { 1863 key = &spec_ipv4->hdr.src_addr; 1864 mask = &mask_ipv4->hdr.src_addr; 1865 size = sizeof(rte_be32_t); 1866 1867 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, 1868 NH_FLD_IPV4_SRC_IP, 1869 key, mask, size, priv, 1870 group, &local_cfg, 1871 DPAA2_FLOW_QOS_TYPE); 1872 if (ret) 1873 return ret; 1874 1875 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, 1876 NH_FLD_IPV4_SRC_IP, 1877 key, mask, size, priv, 1878 group, &local_cfg, 1879 DPAA2_FLOW_FS_TYPE); 1880 if (ret) 1881 return ret; 1882 } 1883 1884 if (mask_ipv4->hdr.dst_addr) { 1885 key = &spec_ipv4->hdr.dst_addr; 1886 mask = &mask_ipv4->hdr.dst_addr; 1887 size = sizeof(rte_be32_t); 1888 1889 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, 1890 NH_FLD_IPV4_DST_IP, 1891 key, mask, size, priv, 1892 group, &local_cfg, 1893 DPAA2_FLOW_QOS_TYPE); 1894 if (ret) 1895 return ret; 1896 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, 1897 NH_FLD_IPV4_DST_IP, 1898 key, mask, size, priv, 1899 group, &local_cfg, 1900 DPAA2_FLOW_FS_TYPE); 1901 if (ret) 1902 return ret; 1903 } 1904 1905 if (mask_ipv4->hdr.next_proto_id) { 1906 key = &spec_ipv4->hdr.next_proto_id; 1907 mask = &mask_ipv4->hdr.next_proto_id; 1908 size = sizeof(uint8_t); 1909 1910 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, 1911 NH_FLD_IP_PROTO, key, 1912 mask, size, priv, group, 1913 &local_cfg, 1914 DPAA2_FLOW_QOS_TYPE); 1915 if (ret) 1916 return ret; 1917 1918 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, 1919 NH_FLD_IP_PROTO, key, 1920 mask, size, priv, group, 1921 &local_cfg, 1922 DPAA2_FLOW_FS_TYPE); 1923 if (ret) 1924 return ret; 1925 } 1926 1927 (*device_configured) |= local_cfg; 1928 return 0; 1929 } 1930 1931 static int 1932 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, 1933 const struct rte_flow_attr *attr, 1934 const struct rte_flow_item *pattern, 1935 const struct rte_flow_action actions[] __rte_unused, 1936 struct rte_flow_error *error __rte_unused, 1937 int *device_configured) 1938 { 1939 int ret, local_cfg = 0; 1940 uint32_t group; 1941 const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0; 1942 const void *key, *mask; 1943 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1944 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0}; 1945 int size; 1946 struct prev_proto_field_id prev_prot; 1947 1948 group = attr->group; 1949 1950 /* Parse pattern list to get the matching parameters */ 1951 spec_ipv6 = pattern->spec; 1952 mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask; 1953 1954 /* Get traffic class index and flow id to be configured */ 1955 flow->tc_id = group; 1956 flow->tc_index = attr->priority; 1957 1958 prev_prot.prot = NET_PROT_ETH; 1959 prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); 1960 1961 ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot, 1962 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 1963 group, &local_cfg); 1964 if (ret) { 1965 DPAA2_PMD_ERR("IPv6 identification failed!"); 1966 return ret; 1967 } 1968 1969 if (!spec_ipv6) 1970 return 0; 1971 1972 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6, 1973 RTE_FLOW_ITEM_TYPE_IPV6)) { 1974 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support."); 1975 return -EINVAL; 1976 } 1977 1978 if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) { 1979 key = &spec_ipv6->hdr.src_addr; 1980 mask = &mask_ipv6->hdr.src_addr; 1981 size = NH_FLD_IPV6_ADDR_SIZE; 1982 1983 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, 1984 NH_FLD_IPV6_SRC_IP, 1985 key, mask, size, priv, 1986 group, &local_cfg, 1987 DPAA2_FLOW_QOS_TYPE); 1988 if (ret) 1989 return ret; 1990 1991 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, 1992 NH_FLD_IPV6_SRC_IP, 1993 key, mask, size, priv, 1994 group, &local_cfg, 1995 DPAA2_FLOW_FS_TYPE); 1996 if (ret) 1997 return ret; 1998 } 1999 2000 if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) { 2001 key = &spec_ipv6->hdr.dst_addr; 2002 mask = &mask_ipv6->hdr.dst_addr; 2003 size = NH_FLD_IPV6_ADDR_SIZE; 2004 2005 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, 2006 NH_FLD_IPV6_DST_IP, 2007 key, mask, size, priv, 2008 group, &local_cfg, 2009 DPAA2_FLOW_QOS_TYPE); 2010 if (ret) 2011 return ret; 2012 2013 ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, 2014 NH_FLD_IPV6_DST_IP, 2015 key, mask, size, priv, 2016 group, &local_cfg, 2017 DPAA2_FLOW_FS_TYPE); 2018 if (ret) 2019 return ret; 2020 } 2021 2022 if (mask_ipv6->hdr.proto) { 2023 key = &spec_ipv6->hdr.proto; 2024 mask = &mask_ipv6->hdr.proto; 2025 size = sizeof(uint8_t); 2026 2027 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, 2028 NH_FLD_IP_PROTO, key, 2029 mask, size, priv, group, 2030 &local_cfg, 2031 DPAA2_FLOW_QOS_TYPE); 2032 if (ret) 2033 return ret; 2034 2035 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, 2036 NH_FLD_IP_PROTO, key, 2037 mask, size, priv, group, 2038 &local_cfg, 2039 DPAA2_FLOW_FS_TYPE); 2040 if (ret) 2041 return ret; 2042 } 2043 2044 (*device_configured) |= local_cfg; 2045 return 0; 2046 } 2047 2048 static int 2049 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow, 2050 struct rte_eth_dev *dev, 2051 const struct rte_flow_attr *attr, 2052 const struct rte_flow_item *pattern, 2053 const struct rte_flow_action actions[] __rte_unused, 2054 struct rte_flow_error *error __rte_unused, 2055 int *device_configured) 2056 { 2057 int ret, local_cfg = 0; 2058 uint32_t group; 2059 const struct rte_flow_item_icmp *spec, *mask; 2060 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2061 2062 group = attr->group; 2063 2064 /* Parse pattern list to get the matching parameters */ 2065 spec = pattern->spec; 2066 mask = pattern->mask ? 2067 pattern->mask : &dpaa2_flow_item_icmp_mask; 2068 2069 /* Get traffic class index and flow id to be configured */ 2070 flow->tc_id = group; 2071 flow->tc_index = attr->priority; 2072 2073 if (!spec) { 2074 /* Next proto of Generical IP is actually used 2075 * for ICMP identification. 2076 * Example: flow create 0 ingress pattern icmp 2077 */ 2078 struct prev_proto_field_id prev_proto; 2079 2080 prev_proto.prot = NET_PROT_IP; 2081 prev_proto.ip_proto = IPPROTO_ICMP; 2082 ret = dpaa2_flow_identify_by_prev_prot(priv, 2083 flow, &prev_proto, 2084 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 2085 group, &local_cfg); 2086 if (ret) 2087 return ret; 2088 2089 (*device_configured) |= local_cfg; 2090 return 0; 2091 } 2092 2093 if (dpaa2_flow_extract_support((const uint8_t *)mask, 2094 RTE_FLOW_ITEM_TYPE_ICMP)) { 2095 DPAA2_PMD_WARN("Extract field(s) of ICMP not support."); 2096 2097 return -EINVAL; 2098 } 2099 2100 if (mask->hdr.icmp_type) { 2101 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, 2102 NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type, 2103 &mask->hdr.icmp_type, sizeof(uint8_t), 2104 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2105 if (ret) 2106 return ret; 2107 2108 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, 2109 NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type, 2110 &mask->hdr.icmp_type, sizeof(uint8_t), 2111 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2112 if (ret) 2113 return ret; 2114 } 2115 2116 if (mask->hdr.icmp_code) { 2117 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, 2118 NH_FLD_ICMP_CODE, &spec->hdr.icmp_code, 2119 &mask->hdr.icmp_code, sizeof(uint8_t), 2120 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2121 if (ret) 2122 return ret; 2123 2124 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, 2125 NH_FLD_ICMP_CODE, &spec->hdr.icmp_code, 2126 &mask->hdr.icmp_code, sizeof(uint8_t), 2127 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2128 if (ret) 2129 return ret; 2130 } 2131 2132 (*device_configured) |= local_cfg; 2133 2134 return 0; 2135 } 2136 2137 static int 2138 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow, 2139 struct rte_eth_dev *dev, 2140 const struct rte_flow_attr *attr, 2141 const struct rte_flow_item *pattern, 2142 const struct rte_flow_action actions[] __rte_unused, 2143 struct rte_flow_error *error __rte_unused, 2144 int *device_configured) 2145 { 2146 int ret, local_cfg = 0; 2147 uint32_t group; 2148 const struct rte_flow_item_udp *spec, *mask; 2149 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2150 2151 group = attr->group; 2152 2153 /* Parse pattern list to get the matching parameters */ 2154 spec = pattern->spec; 2155 mask = pattern->mask ? 2156 pattern->mask : &dpaa2_flow_item_udp_mask; 2157 2158 /* Get traffic class index and flow id to be configured */ 2159 flow->tc_id = group; 2160 flow->tc_index = attr->priority; 2161 2162 if (!spec || !mc_l4_port_identification) { 2163 struct prev_proto_field_id prev_proto; 2164 2165 prev_proto.prot = NET_PROT_IP; 2166 prev_proto.ip_proto = IPPROTO_UDP; 2167 ret = dpaa2_flow_identify_by_prev_prot(priv, 2168 flow, &prev_proto, 2169 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 2170 group, &local_cfg); 2171 if (ret) 2172 return ret; 2173 2174 (*device_configured) |= local_cfg; 2175 2176 if (!spec) 2177 return 0; 2178 } 2179 2180 if (dpaa2_flow_extract_support((const uint8_t *)mask, 2181 RTE_FLOW_ITEM_TYPE_UDP)) { 2182 DPAA2_PMD_WARN("Extract field(s) of UDP not support."); 2183 2184 return -EINVAL; 2185 } 2186 2187 if (mask->hdr.src_port) { 2188 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, 2189 NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port, 2190 &mask->hdr.src_port, sizeof(rte_be16_t), 2191 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2192 if (ret) 2193 return ret; 2194 2195 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, 2196 NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port, 2197 &mask->hdr.src_port, sizeof(rte_be16_t), 2198 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2199 if (ret) 2200 return ret; 2201 } 2202 2203 if (mask->hdr.dst_port) { 2204 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, 2205 NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port, 2206 &mask->hdr.dst_port, sizeof(rte_be16_t), 2207 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2208 if (ret) 2209 return ret; 2210 2211 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, 2212 NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port, 2213 &mask->hdr.dst_port, sizeof(rte_be16_t), 2214 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2215 if (ret) 2216 return ret; 2217 } 2218 2219 (*device_configured) |= local_cfg; 2220 2221 return 0; 2222 } 2223 2224 static int 2225 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow, 2226 struct rte_eth_dev *dev, 2227 const struct rte_flow_attr *attr, 2228 const struct rte_flow_item *pattern, 2229 const struct rte_flow_action actions[] __rte_unused, 2230 struct rte_flow_error *error __rte_unused, 2231 int *device_configured) 2232 { 2233 int ret, local_cfg = 0; 2234 uint32_t group; 2235 const struct rte_flow_item_tcp *spec, *mask; 2236 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2237 2238 group = attr->group; 2239 2240 /* Parse pattern list to get the matching parameters */ 2241 spec = pattern->spec; 2242 mask = pattern->mask ? 2243 pattern->mask : &dpaa2_flow_item_tcp_mask; 2244 2245 /* Get traffic class index and flow id to be configured */ 2246 flow->tc_id = group; 2247 flow->tc_index = attr->priority; 2248 2249 if (!spec || !mc_l4_port_identification) { 2250 struct prev_proto_field_id prev_proto; 2251 2252 prev_proto.prot = NET_PROT_IP; 2253 prev_proto.ip_proto = IPPROTO_TCP; 2254 ret = dpaa2_flow_identify_by_prev_prot(priv, 2255 flow, &prev_proto, 2256 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 2257 group, &local_cfg); 2258 if (ret) 2259 return ret; 2260 2261 (*device_configured) |= local_cfg; 2262 2263 if (!spec) 2264 return 0; 2265 } 2266 2267 if (dpaa2_flow_extract_support((const uint8_t *)mask, 2268 RTE_FLOW_ITEM_TYPE_TCP)) { 2269 DPAA2_PMD_WARN("Extract field(s) of TCP not support."); 2270 2271 return -EINVAL; 2272 } 2273 2274 if (mask->hdr.src_port) { 2275 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, 2276 NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port, 2277 &mask->hdr.src_port, sizeof(rte_be16_t), 2278 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2279 if (ret) 2280 return ret; 2281 2282 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, 2283 NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port, 2284 &mask->hdr.src_port, sizeof(rte_be16_t), 2285 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2286 if (ret) 2287 return ret; 2288 } 2289 2290 if (mask->hdr.dst_port) { 2291 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, 2292 NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port, 2293 &mask->hdr.dst_port, sizeof(rte_be16_t), 2294 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2295 if (ret) 2296 return ret; 2297 2298 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, 2299 NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port, 2300 &mask->hdr.dst_port, sizeof(rte_be16_t), 2301 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2302 if (ret) 2303 return ret; 2304 } 2305 2306 (*device_configured) |= local_cfg; 2307 2308 return 0; 2309 } 2310 2311 static int 2312 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow, 2313 struct rte_eth_dev *dev, 2314 const struct rte_flow_attr *attr, 2315 const struct rte_flow_item *pattern, 2316 const struct rte_flow_action actions[] __rte_unused, 2317 struct rte_flow_error *error __rte_unused, 2318 int *device_configured) 2319 { 2320 int ret, local_cfg = 0; 2321 uint32_t group; 2322 const struct rte_flow_item_sctp *spec, *mask; 2323 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2324 2325 group = attr->group; 2326 2327 /* Parse pattern list to get the matching parameters */ 2328 spec = pattern->spec; 2329 mask = pattern->mask ? 2330 pattern->mask : &dpaa2_flow_item_sctp_mask; 2331 2332 /* Get traffic class index and flow id to be configured */ 2333 flow->tc_id = group; 2334 flow->tc_index = attr->priority; 2335 2336 if (!spec || !mc_l4_port_identification) { 2337 struct prev_proto_field_id prev_proto; 2338 2339 prev_proto.prot = NET_PROT_IP; 2340 prev_proto.ip_proto = IPPROTO_SCTP; 2341 ret = dpaa2_flow_identify_by_prev_prot(priv, 2342 flow, &prev_proto, 2343 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 2344 group, &local_cfg); 2345 if (ret) 2346 return ret; 2347 2348 (*device_configured) |= local_cfg; 2349 2350 if (!spec) 2351 return 0; 2352 } 2353 2354 if (dpaa2_flow_extract_support((const uint8_t *)mask, 2355 RTE_FLOW_ITEM_TYPE_SCTP)) { 2356 DPAA2_PMD_WARN("Extract field(s) of SCTP not support."); 2357 2358 return -1; 2359 } 2360 2361 if (mask->hdr.src_port) { 2362 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, 2363 NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port, 2364 &mask->hdr.src_port, sizeof(rte_be16_t), 2365 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2366 if (ret) 2367 return ret; 2368 2369 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, 2370 NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port, 2371 &mask->hdr.src_port, sizeof(rte_be16_t), 2372 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2373 if (ret) 2374 return ret; 2375 } 2376 2377 if (mask->hdr.dst_port) { 2378 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, 2379 NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port, 2380 &mask->hdr.dst_port, sizeof(rte_be16_t), 2381 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2382 if (ret) 2383 return ret; 2384 2385 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, 2386 NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port, 2387 &mask->hdr.dst_port, sizeof(rte_be16_t), 2388 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2389 if (ret) 2390 return ret; 2391 } 2392 2393 (*device_configured) |= local_cfg; 2394 2395 return 0; 2396 } 2397 2398 static int 2399 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, 2400 struct rte_eth_dev *dev, 2401 const struct rte_flow_attr *attr, 2402 const struct rte_flow_item *pattern, 2403 const struct rte_flow_action actions[] __rte_unused, 2404 struct rte_flow_error *error __rte_unused, 2405 int *device_configured) 2406 { 2407 int ret, local_cfg = 0; 2408 uint32_t group; 2409 const struct rte_flow_item_gre *spec, *mask; 2410 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2411 2412 group = attr->group; 2413 2414 /* Parse pattern list to get the matching parameters */ 2415 spec = pattern->spec; 2416 mask = pattern->mask ? 2417 pattern->mask : &dpaa2_flow_item_gre_mask; 2418 2419 /* Get traffic class index and flow id to be configured */ 2420 flow->tc_id = group; 2421 flow->tc_index = attr->priority; 2422 2423 if (!spec) { 2424 struct prev_proto_field_id prev_proto; 2425 2426 prev_proto.prot = NET_PROT_IP; 2427 prev_proto.ip_proto = IPPROTO_GRE; 2428 ret = dpaa2_flow_identify_by_prev_prot(priv, 2429 flow, &prev_proto, 2430 DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, 2431 group, &local_cfg); 2432 if (ret) 2433 return ret; 2434 2435 (*device_configured) |= local_cfg; 2436 2437 if (!spec) 2438 return 0; 2439 } 2440 2441 if (dpaa2_flow_extract_support((const uint8_t *)mask, 2442 RTE_FLOW_ITEM_TYPE_GRE)) { 2443 DPAA2_PMD_WARN("Extract field(s) of GRE not support."); 2444 2445 return -1; 2446 } 2447 2448 if (!mask->protocol) 2449 return 0; 2450 2451 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE, 2452 NH_FLD_GRE_TYPE, &spec->protocol, 2453 &mask->protocol, sizeof(rte_be16_t), 2454 priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); 2455 if (ret) 2456 return ret; 2457 2458 ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE, 2459 NH_FLD_GRE_TYPE, &spec->protocol, 2460 &mask->protocol, sizeof(rte_be16_t), 2461 priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); 2462 if (ret) 2463 return ret; 2464 2465 (*device_configured) |= local_cfg; 2466 2467 return 0; 2468 } 2469 2470 static int 2471 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow, 2472 struct rte_eth_dev *dev, 2473 const struct rte_flow_attr *attr, 2474 const struct rte_flow_item *pattern, 2475 const struct rte_flow_action actions[] __rte_unused, 2476 struct rte_flow_error *error __rte_unused, 2477 int *device_configured) 2478 { 2479 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2480 const struct rte_flow_item_raw *spec = pattern->spec; 2481 const struct rte_flow_item_raw *mask = pattern->mask; 2482 int local_cfg = 0, ret; 2483 uint32_t group; 2484 struct dpaa2_key_extract *qos_key_extract; 2485 struct dpaa2_key_extract *tc_key_extract; 2486 2487 /* Need both spec and mask */ 2488 if (!spec || !mask) { 2489 DPAA2_PMD_ERR("spec or mask not present."); 2490 return -EINVAL; 2491 } 2492 2493 if (spec->relative) { 2494 /* TBD: relative offset support. 2495 * To support relative offset of previous L3 protocol item, 2496 * extracts should be expanded to identify if the frame is: 2497 * vlan or none-vlan. 2498 * 2499 * To support relative offset of previous L4 protocol item, 2500 * extracts should be expanded to identify if the frame is: 2501 * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6. 2502 */ 2503 DPAA2_PMD_ERR("relative not supported."); 2504 return -EINVAL; 2505 } 2506 2507 if (spec->search) { 2508 DPAA2_PMD_ERR("search not supported."); 2509 return -EINVAL; 2510 } 2511 2512 /* Spec len and mask len should be same */ 2513 if (spec->length != mask->length) { 2514 DPAA2_PMD_ERR("Spec len and mask len mismatch."); 2515 return -EINVAL; 2516 } 2517 2518 /* Get traffic class index and flow id to be configured */ 2519 group = attr->group; 2520 flow->tc_id = group; 2521 flow->tc_index = attr->priority; 2522 2523 qos_key_extract = &priv->extract.qos_key_extract; 2524 tc_key_extract = &priv->extract.tc_key_extract[group]; 2525 2526 ret = dpaa2_flow_extract_add_raw(priv, 2527 spec->offset, spec->length, 2528 DPAA2_FLOW_QOS_TYPE, 0, &local_cfg); 2529 if (ret) { 2530 DPAA2_PMD_ERR("QoS Extract RAW add failed."); 2531 return -EINVAL; 2532 } 2533 2534 ret = dpaa2_flow_extract_add_raw(priv, 2535 spec->offset, spec->length, 2536 DPAA2_FLOW_FS_TYPE, group, &local_cfg); 2537 if (ret) { 2538 DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.", 2539 group); 2540 return -EINVAL; 2541 } 2542 2543 ret = dpaa2_flow_raw_rule_data_set(flow, 2544 &qos_key_extract->key_profile, 2545 spec->offset, spec->length, 2546 spec->pattern, mask->pattern, 2547 DPAA2_FLOW_QOS_TYPE); 2548 if (ret) { 2549 DPAA2_PMD_ERR("QoS RAW rule data set failed"); 2550 return -EINVAL; 2551 } 2552 2553 ret = dpaa2_flow_raw_rule_data_set(flow, 2554 &tc_key_extract->key_profile, 2555 spec->offset, spec->length, 2556 spec->pattern, mask->pattern, 2557 DPAA2_FLOW_FS_TYPE); 2558 if (ret) { 2559 DPAA2_PMD_ERR("FS RAW rule data set failed"); 2560 return -EINVAL; 2561 } 2562 2563 (*device_configured) |= local_cfg; 2564 2565 return 0; 2566 } 2567 2568 static inline int 2569 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv, 2570 const struct rte_flow_attr *attr) 2571 { 2572 struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); 2573 2574 while (curr) { 2575 if (curr->tc_id == attr->group && 2576 curr->tc_index == attr->priority) { 2577 DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists", 2578 attr->group, attr->priority); 2579 2580 return -EINVAL; 2581 } 2582 curr = LIST_NEXT(curr, next); 2583 } 2584 2585 return 0; 2586 } 2587 2588 static inline struct rte_eth_dev * 2589 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv, 2590 const struct rte_flow_action *action) 2591 { 2592 const struct rte_flow_action_port_id *port_id; 2593 const struct rte_flow_action_ethdev *ethdev; 2594 int idx = -1; 2595 struct rte_eth_dev *dest_dev; 2596 2597 if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { 2598 port_id = action->conf; 2599 if (!port_id->original) 2600 idx = port_id->id; 2601 } else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) { 2602 ethdev = action->conf; 2603 idx = ethdev->port_id; 2604 } else { 2605 return NULL; 2606 } 2607 2608 if (idx >= 0) { 2609 if (!rte_eth_dev_is_valid_port(idx)) 2610 return NULL; 2611 if (!rte_pmd_dpaa2_dev_is_dpaa2(idx)) 2612 return NULL; 2613 dest_dev = &rte_eth_devices[idx]; 2614 } else { 2615 dest_dev = priv->eth_dev; 2616 } 2617 2618 return dest_dev; 2619 } 2620 2621 static inline int 2622 dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv, 2623 const struct rte_flow_attr *attr, 2624 const struct rte_flow_action actions[]) 2625 { 2626 int end_of_list = 0, i, j = 0; 2627 const struct rte_flow_action_queue *dest_queue; 2628 const struct rte_flow_action_rss *rss_conf; 2629 struct dpaa2_queue *rxq; 2630 2631 while (!end_of_list) { 2632 switch (actions[j].type) { 2633 case RTE_FLOW_ACTION_TYPE_QUEUE: 2634 dest_queue = actions[j].conf; 2635 rxq = priv->rx_vq[dest_queue->index]; 2636 if (attr->group != rxq->tc_index) { 2637 DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]", 2638 rxq->tc_index, rxq->flow_id, 2639 attr->group); 2640 2641 return -ENOTSUP; 2642 } 2643 break; 2644 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: 2645 case RTE_FLOW_ACTION_TYPE_PORT_ID: 2646 if (!dpaa2_flow_redirect_dev(priv, &actions[j])) { 2647 DPAA2_PMD_ERR("Invalid port id of action"); 2648 return -ENOTSUP; 2649 } 2650 break; 2651 case RTE_FLOW_ACTION_TYPE_RSS: 2652 rss_conf = (const struct rte_flow_action_rss *) 2653 (actions[j].conf); 2654 if (rss_conf->queue_num > priv->dist_queues) { 2655 DPAA2_PMD_ERR("RSS number too large"); 2656 return -ENOTSUP; 2657 } 2658 for (i = 0; i < (int)rss_conf->queue_num; i++) { 2659 if (rss_conf->queue[i] >= priv->nb_rx_queues) { 2660 DPAA2_PMD_ERR("RSS queue not in range"); 2661 return -ENOTSUP; 2662 } 2663 rxq = priv->rx_vq[rss_conf->queue[i]]; 2664 if (rxq->tc_index != attr->group) { 2665 DPAA2_PMD_ERR("RSS queue not in group"); 2666 return -ENOTSUP; 2667 } 2668 } 2669 2670 break; 2671 case RTE_FLOW_ACTION_TYPE_END: 2672 end_of_list = 1; 2673 break; 2674 default: 2675 DPAA2_PMD_ERR("Invalid action type"); 2676 return -ENOTSUP; 2677 } 2678 j++; 2679 } 2680 2681 return 0; 2682 } 2683 2684 static int 2685 dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv, 2686 struct dpaa2_dev_flow *flow, 2687 const struct rte_flow_action *rte_action) 2688 { 2689 struct rte_eth_dev *dest_dev; 2690 struct dpaa2_dev_priv *dest_priv; 2691 const struct rte_flow_action_queue *dest_queue; 2692 struct dpaa2_queue *dest_q; 2693 2694 memset(&flow->fs_action_cfg, 0, 2695 sizeof(struct dpni_fs_action_cfg)); 2696 flow->action_type = rte_action->type; 2697 2698 if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) { 2699 dest_queue = rte_action->conf; 2700 dest_q = priv->rx_vq[dest_queue->index]; 2701 flow->fs_action_cfg.flow_id = dest_q->flow_id; 2702 } else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID || 2703 flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) { 2704 dest_dev = dpaa2_flow_redirect_dev(priv, rte_action); 2705 if (!dest_dev) { 2706 DPAA2_PMD_ERR("Invalid device to redirect"); 2707 return -EINVAL; 2708 } 2709 2710 dest_priv = dest_dev->data->dev_private; 2711 dest_q = dest_priv->tx_vq[0]; 2712 flow->fs_action_cfg.options = 2713 DPNI_FS_OPT_REDIRECT_TO_DPNI_TX; 2714 flow->fs_action_cfg.redirect_obj_token = 2715 dest_priv->token; 2716 flow->fs_action_cfg.flow_id = dest_q->flow_id; 2717 } 2718 2719 return 0; 2720 } 2721 2722 static inline uint16_t 2723 dpaa2_flow_entry_size(uint16_t key_max_size) 2724 { 2725 if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) { 2726 DPAA2_PMD_ERR("Key size(%d) > max(%d)", 2727 key_max_size, 2728 DPAA2_FLOW_ENTRY_MAX_SIZE); 2729 2730 return 0; 2731 } 2732 2733 if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE) 2734 return DPAA2_FLOW_ENTRY_MAX_SIZE; 2735 2736 /* Current MC only support fixed entry size(56)*/ 2737 return DPAA2_FLOW_ENTRY_MAX_SIZE; 2738 } 2739 2740 static inline int 2741 dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv, 2742 uint8_t tc_id) 2743 { 2744 struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); 2745 int need_clear = 0, ret; 2746 struct fsl_mc_io *dpni = priv->hw; 2747 2748 while (curr) { 2749 if (curr->tc_id == tc_id) { 2750 need_clear = 1; 2751 break; 2752 } 2753 curr = LIST_NEXT(curr, next); 2754 } 2755 2756 if (need_clear) { 2757 ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW, 2758 priv->token, tc_id); 2759 if (ret) { 2760 DPAA2_PMD_ERR("TC[%d] clear failed", tc_id); 2761 return ret; 2762 } 2763 } 2764 2765 return 0; 2766 } 2767 2768 static int 2769 dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv, 2770 uint8_t tc_id, uint16_t dist_size, int rss_dist) 2771 { 2772 struct dpaa2_key_extract *tc_extract; 2773 uint8_t *key_cfg_buf; 2774 uint64_t key_cfg_iova; 2775 int ret; 2776 struct dpni_rx_dist_cfg tc_cfg; 2777 struct fsl_mc_io *dpni = priv->hw; 2778 uint16_t entry_size; 2779 uint16_t key_max_size; 2780 2781 ret = dpaa2_flow_clear_fs_table(priv, tc_id); 2782 if (ret < 0) { 2783 DPAA2_PMD_ERR("TC[%d] clear failed", tc_id); 2784 return ret; 2785 } 2786 2787 tc_extract = &priv->extract.tc_key_extract[tc_id]; 2788 key_cfg_buf = priv->extract.tc_extract_param[tc_id]; 2789 key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf); 2790 2791 key_max_size = tc_extract->key_profile.key_max_size; 2792 entry_size = dpaa2_flow_entry_size(key_max_size); 2793 2794 dpaa2_flow_fs_extracts_log(priv, tc_id); 2795 ret = dpkg_prepare_key_cfg(&tc_extract->dpkg, 2796 key_cfg_buf); 2797 if (ret < 0) { 2798 DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id); 2799 return ret; 2800 } 2801 2802 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); 2803 tc_cfg.dist_size = dist_size; 2804 tc_cfg.key_cfg_iova = key_cfg_iova; 2805 if (rss_dist) 2806 tc_cfg.enable = true; 2807 else 2808 tc_cfg.enable = false; 2809 tc_cfg.tc = tc_id; 2810 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, 2811 priv->token, &tc_cfg); 2812 if (ret < 0) { 2813 if (rss_dist) { 2814 DPAA2_PMD_ERR("RSS TC[%d] set failed", 2815 tc_id); 2816 } else { 2817 DPAA2_PMD_ERR("FS TC[%d] hash disable failed", 2818 tc_id); 2819 } 2820 2821 return ret; 2822 } 2823 2824 if (rss_dist) 2825 return 0; 2826 2827 tc_cfg.enable = true; 2828 tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id; 2829 ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW, 2830 priv->token, &tc_cfg); 2831 if (ret < 0) { 2832 DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id); 2833 return ret; 2834 } 2835 2836 ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE, 2837 entry_size, tc_id); 2838 if (ret) 2839 return ret; 2840 2841 return 0; 2842 } 2843 2844 static int 2845 dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv, 2846 int rss_dist) 2847 { 2848 struct dpaa2_key_extract *qos_extract; 2849 uint8_t *key_cfg_buf; 2850 uint64_t key_cfg_iova; 2851 int ret; 2852 struct dpni_qos_tbl_cfg qos_cfg; 2853 struct fsl_mc_io *dpni = priv->hw; 2854 uint16_t entry_size; 2855 uint16_t key_max_size; 2856 2857 if (!rss_dist && priv->num_rx_tc <= 1) { 2858 /* QoS table is effecitive for FS multiple TCs or RSS.*/ 2859 return 0; 2860 } 2861 2862 if (LIST_FIRST(&priv->flows)) { 2863 ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW, 2864 priv->token); 2865 if (ret < 0) { 2866 DPAA2_PMD_ERR("QoS table clear failed"); 2867 return ret; 2868 } 2869 } 2870 2871 qos_extract = &priv->extract.qos_key_extract; 2872 key_cfg_buf = priv->extract.qos_extract_param; 2873 key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf); 2874 2875 key_max_size = qos_extract->key_profile.key_max_size; 2876 entry_size = dpaa2_flow_entry_size(key_max_size); 2877 2878 dpaa2_flow_qos_extracts_log(priv); 2879 2880 ret = dpkg_prepare_key_cfg(&qos_extract->dpkg, 2881 key_cfg_buf); 2882 if (ret < 0) { 2883 DPAA2_PMD_ERR("QoS prepare extract failed"); 2884 return ret; 2885 } 2886 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); 2887 qos_cfg.keep_entries = true; 2888 qos_cfg.key_cfg_iova = key_cfg_iova; 2889 if (rss_dist) { 2890 qos_cfg.discard_on_miss = true; 2891 } else { 2892 qos_cfg.discard_on_miss = false; 2893 qos_cfg.default_tc = 0; 2894 } 2895 2896 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, 2897 priv->token, &qos_cfg); 2898 if (ret < 0) { 2899 DPAA2_PMD_ERR("QoS table set failed"); 2900 return ret; 2901 } 2902 2903 ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE, 2904 entry_size, 0); 2905 if (ret) 2906 return ret; 2907 2908 return 0; 2909 } 2910 2911 static int 2912 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, 2913 struct rte_eth_dev *dev, 2914 const struct rte_flow_attr *attr, 2915 const struct rte_flow_item pattern[], 2916 const struct rte_flow_action actions[], 2917 struct rte_flow_error *error) 2918 { 2919 const struct rte_flow_action_rss *rss_conf; 2920 int is_keycfg_configured = 0, end_of_list = 0; 2921 int ret = 0, i = 0, j = 0; 2922 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2923 struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); 2924 uint16_t dist_size, key_size; 2925 struct dpaa2_key_extract *qos_key_extract; 2926 struct dpaa2_key_extract *tc_key_extract; 2927 2928 ret = dpaa2_flow_verify_attr(priv, attr); 2929 if (ret) 2930 return ret; 2931 2932 ret = dpaa2_flow_verify_action(priv, attr, actions); 2933 if (ret) 2934 return ret; 2935 2936 /* Parse pattern list to get the matching parameters */ 2937 while (!end_of_list) { 2938 switch (pattern[i].type) { 2939 case RTE_FLOW_ITEM_TYPE_ETH: 2940 ret = dpaa2_configure_flow_eth(flow, 2941 dev, attr, &pattern[i], actions, error, 2942 &is_keycfg_configured); 2943 if (ret) { 2944 DPAA2_PMD_ERR("ETH flow config failed!"); 2945 return ret; 2946 } 2947 break; 2948 case RTE_FLOW_ITEM_TYPE_VLAN: 2949 ret = dpaa2_configure_flow_vlan(flow, 2950 dev, attr, &pattern[i], actions, error, 2951 &is_keycfg_configured); 2952 if (ret) { 2953 DPAA2_PMD_ERR("vLan flow config failed!"); 2954 return ret; 2955 } 2956 break; 2957 case RTE_FLOW_ITEM_TYPE_IPV4: 2958 ret = dpaa2_configure_flow_ipv4(flow, 2959 dev, attr, &pattern[i], actions, error, 2960 &is_keycfg_configured); 2961 if (ret) { 2962 DPAA2_PMD_ERR("IPV4 flow config failed!"); 2963 return ret; 2964 } 2965 break; 2966 case RTE_FLOW_ITEM_TYPE_IPV6: 2967 ret = dpaa2_configure_flow_ipv6(flow, 2968 dev, attr, &pattern[i], actions, error, 2969 &is_keycfg_configured); 2970 if (ret) { 2971 DPAA2_PMD_ERR("IPV6 flow config failed!"); 2972 return ret; 2973 } 2974 break; 2975 case RTE_FLOW_ITEM_TYPE_ICMP: 2976 ret = dpaa2_configure_flow_icmp(flow, 2977 dev, attr, &pattern[i], actions, error, 2978 &is_keycfg_configured); 2979 if (ret) { 2980 DPAA2_PMD_ERR("ICMP flow config failed!"); 2981 return ret; 2982 } 2983 break; 2984 case RTE_FLOW_ITEM_TYPE_UDP: 2985 ret = dpaa2_configure_flow_udp(flow, 2986 dev, attr, &pattern[i], actions, error, 2987 &is_keycfg_configured); 2988 if (ret) { 2989 DPAA2_PMD_ERR("UDP flow config failed!"); 2990 return ret; 2991 } 2992 break; 2993 case RTE_FLOW_ITEM_TYPE_TCP: 2994 ret = dpaa2_configure_flow_tcp(flow, 2995 dev, attr, &pattern[i], actions, error, 2996 &is_keycfg_configured); 2997 if (ret) { 2998 DPAA2_PMD_ERR("TCP flow config failed!"); 2999 return ret; 3000 } 3001 break; 3002 case RTE_FLOW_ITEM_TYPE_SCTP: 3003 ret = dpaa2_configure_flow_sctp(flow, 3004 dev, attr, &pattern[i], actions, error, 3005 &is_keycfg_configured); 3006 if (ret) { 3007 DPAA2_PMD_ERR("SCTP flow config failed!"); 3008 return ret; 3009 } 3010 break; 3011 case RTE_FLOW_ITEM_TYPE_GRE: 3012 ret = dpaa2_configure_flow_gre(flow, 3013 dev, attr, &pattern[i], actions, error, 3014 &is_keycfg_configured); 3015 if (ret) { 3016 DPAA2_PMD_ERR("GRE flow config failed!"); 3017 return ret; 3018 } 3019 break; 3020 case RTE_FLOW_ITEM_TYPE_RAW: 3021 ret = dpaa2_configure_flow_raw(flow, 3022 dev, attr, &pattern[i], 3023 actions, error, 3024 &is_keycfg_configured); 3025 if (ret) { 3026 DPAA2_PMD_ERR("RAW flow config failed!"); 3027 return ret; 3028 } 3029 break; 3030 case RTE_FLOW_ITEM_TYPE_END: 3031 end_of_list = 1; 3032 break; /*End of List*/ 3033 default: 3034 DPAA2_PMD_ERR("Invalid action type"); 3035 ret = -ENOTSUP; 3036 break; 3037 } 3038 i++; 3039 } 3040 3041 qos_key_extract = &priv->extract.qos_key_extract; 3042 key_size = qos_key_extract->key_profile.key_max_size; 3043 flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size); 3044 3045 tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id]; 3046 key_size = tc_key_extract->key_profile.key_max_size; 3047 flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size); 3048 3049 /* Let's parse action on matching traffic */ 3050 end_of_list = 0; 3051 while (!end_of_list) { 3052 switch (actions[j].type) { 3053 case RTE_FLOW_ACTION_TYPE_QUEUE: 3054 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: 3055 case RTE_FLOW_ACTION_TYPE_PORT_ID: 3056 ret = dpaa2_configure_flow_fs_action(priv, flow, 3057 &actions[j]); 3058 if (ret) 3059 return ret; 3060 3061 /* Configure FS table first*/ 3062 dist_size = priv->nb_rx_queues / priv->num_rx_tc; 3063 if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) { 3064 ret = dpaa2_configure_fs_rss_table(priv, 3065 flow->tc_id, 3066 dist_size, 3067 false); 3068 if (ret) 3069 return ret; 3070 } 3071 3072 /* Configure QoS table then.*/ 3073 if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { 3074 ret = dpaa2_configure_qos_table(priv, false); 3075 if (ret) 3076 return ret; 3077 } 3078 3079 if (priv->num_rx_tc > 1) { 3080 ret = dpaa2_flow_add_qos_rule(priv, flow); 3081 if (ret) 3082 return ret; 3083 } 3084 3085 if (flow->tc_index >= priv->fs_entries) { 3086 DPAA2_PMD_ERR("FS table with %d entries full", 3087 priv->fs_entries); 3088 return -1; 3089 } 3090 3091 ret = dpaa2_flow_add_fs_rule(priv, flow); 3092 if (ret) 3093 return ret; 3094 3095 break; 3096 case RTE_FLOW_ACTION_TYPE_RSS: 3097 rss_conf = actions[j].conf; 3098 flow->action_type = RTE_FLOW_ACTION_TYPE_RSS; 3099 3100 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types, 3101 &tc_key_extract->dpkg); 3102 if (ret < 0) { 3103 DPAA2_PMD_ERR("TC[%d] distset RSS failed", 3104 flow->tc_id); 3105 return ret; 3106 } 3107 3108 dist_size = rss_conf->queue_num; 3109 if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) { 3110 ret = dpaa2_configure_fs_rss_table(priv, 3111 flow->tc_id, 3112 dist_size, 3113 true); 3114 if (ret) 3115 return ret; 3116 } 3117 3118 if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { 3119 ret = dpaa2_configure_qos_table(priv, true); 3120 if (ret) 3121 return ret; 3122 } 3123 3124 ret = dpaa2_flow_add_qos_rule(priv, flow); 3125 if (ret) 3126 return ret; 3127 3128 ret = dpaa2_flow_add_fs_rule(priv, flow); 3129 if (ret) 3130 return ret; 3131 3132 break; 3133 case RTE_FLOW_ACTION_TYPE_END: 3134 end_of_list = 1; 3135 break; 3136 default: 3137 DPAA2_PMD_ERR("Invalid action type"); 3138 ret = -ENOTSUP; 3139 break; 3140 } 3141 j++; 3142 } 3143 3144 if (!ret) { 3145 /* New rules are inserted. */ 3146 if (!curr) { 3147 LIST_INSERT_HEAD(&priv->flows, flow, next); 3148 } else { 3149 while (LIST_NEXT(curr, next)) 3150 curr = LIST_NEXT(curr, next); 3151 LIST_INSERT_AFTER(curr, flow, next); 3152 } 3153 } 3154 return ret; 3155 } 3156 3157 static inline int 3158 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, 3159 const struct rte_flow_attr *attr) 3160 { 3161 int ret = 0; 3162 3163 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { 3164 DPAA2_PMD_ERR("Priority group is out of range"); 3165 ret = -ENOTSUP; 3166 } 3167 if (unlikely(attr->priority >= dpni_attr->fs_entries)) { 3168 DPAA2_PMD_ERR("Priority within the group is out of range"); 3169 ret = -ENOTSUP; 3170 } 3171 if (unlikely(attr->egress)) { 3172 DPAA2_PMD_ERR( 3173 "Flow configuration is not supported on egress side"); 3174 ret = -ENOTSUP; 3175 } 3176 if (unlikely(!attr->ingress)) { 3177 DPAA2_PMD_ERR("Ingress flag must be configured"); 3178 ret = -EINVAL; 3179 } 3180 return ret; 3181 } 3182 3183 static inline int 3184 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[]) 3185 { 3186 unsigned int i, j, is_found = 0; 3187 int ret = 0; 3188 3189 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { 3190 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) { 3191 if (dpaa2_supported_pattern_type[i] 3192 == pattern[j].type) { 3193 is_found = 1; 3194 break; 3195 } 3196 } 3197 if (!is_found) { 3198 ret = -ENOTSUP; 3199 break; 3200 } 3201 } 3202 /* Lets verify other combinations of given pattern rules */ 3203 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { 3204 if (!pattern[j].spec) { 3205 ret = -EINVAL; 3206 break; 3207 } 3208 } 3209 3210 return ret; 3211 } 3212 3213 static inline int 3214 dpaa2_dev_verify_actions(const struct rte_flow_action actions[]) 3215 { 3216 unsigned int i, j, is_found = 0; 3217 int ret = 0; 3218 3219 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { 3220 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) { 3221 if (dpaa2_supported_action_type[i] == actions[j].type) { 3222 is_found = 1; 3223 break; 3224 } 3225 } 3226 if (!is_found) { 3227 ret = -ENOTSUP; 3228 break; 3229 } 3230 } 3231 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { 3232 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP && 3233 !actions[j].conf) 3234 ret = -EINVAL; 3235 } 3236 return ret; 3237 } 3238 3239 static int 3240 dpaa2_flow_validate(struct rte_eth_dev *dev, 3241 const struct rte_flow_attr *flow_attr, 3242 const struct rte_flow_item pattern[], 3243 const struct rte_flow_action actions[], 3244 struct rte_flow_error *error) 3245 { 3246 struct dpaa2_dev_priv *priv = dev->data->dev_private; 3247 struct dpni_attr dpni_attr; 3248 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 3249 uint16_t token = priv->token; 3250 int ret = 0; 3251 3252 memset(&dpni_attr, 0, sizeof(struct dpni_attr)); 3253 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); 3254 if (ret < 0) { 3255 DPAA2_PMD_ERR( 3256 "Failure to get dpni@%p attribute, err code %d", 3257 dpni, ret); 3258 rte_flow_error_set(error, EPERM, 3259 RTE_FLOW_ERROR_TYPE_ATTR, 3260 flow_attr, "invalid"); 3261 return ret; 3262 } 3263 3264 /* Verify input attributes */ 3265 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); 3266 if (ret < 0) { 3267 DPAA2_PMD_ERR( 3268 "Invalid attributes are given"); 3269 rte_flow_error_set(error, EPERM, 3270 RTE_FLOW_ERROR_TYPE_ATTR, 3271 flow_attr, "invalid"); 3272 goto not_valid_params; 3273 } 3274 /* Verify input pattern list */ 3275 ret = dpaa2_dev_verify_patterns(pattern); 3276 if (ret < 0) { 3277 DPAA2_PMD_ERR( 3278 "Invalid pattern list is given"); 3279 rte_flow_error_set(error, EPERM, 3280 RTE_FLOW_ERROR_TYPE_ITEM, 3281 pattern, "invalid"); 3282 goto not_valid_params; 3283 } 3284 /* Verify input action list */ 3285 ret = dpaa2_dev_verify_actions(actions); 3286 if (ret < 0) { 3287 DPAA2_PMD_ERR( 3288 "Invalid action list is given"); 3289 rte_flow_error_set(error, EPERM, 3290 RTE_FLOW_ERROR_TYPE_ACTION, 3291 actions, "invalid"); 3292 goto not_valid_params; 3293 } 3294 not_valid_params: 3295 return ret; 3296 } 3297 3298 static struct rte_flow * 3299 dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 3300 const struct rte_flow_item pattern[], 3301 const struct rte_flow_action actions[], 3302 struct rte_flow_error *error) 3303 { 3304 struct dpaa2_dev_flow *flow = NULL; 3305 struct dpaa2_dev_priv *priv = dev->data->dev_private; 3306 int ret; 3307 3308 dpaa2_flow_control_log = 3309 getenv("DPAA2_FLOW_CONTROL_LOG"); 3310 3311 if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) { 3312 dpaa2_flow_miss_flow_id = 3313 (uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")); 3314 if (dpaa2_flow_miss_flow_id >= priv->dist_queues) { 3315 DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)", 3316 dpaa2_flow_miss_flow_id, 3317 priv->dist_queues); 3318 return NULL; 3319 } 3320 } 3321 3322 flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow), 3323 RTE_CACHE_LINE_SIZE); 3324 if (!flow) { 3325 DPAA2_PMD_ERR("Failure to allocate memory for flow"); 3326 goto mem_failure; 3327 } 3328 3329 /* Allocate DMA'ble memory to write the qos rules */ 3330 flow->qos_key_addr = rte_zmalloc(NULL, 256, 64); 3331 if (!flow->qos_key_addr) { 3332 DPAA2_PMD_ERR("Memory allocation failed"); 3333 goto mem_failure; 3334 } 3335 flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr); 3336 3337 flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64); 3338 if (!flow->qos_mask_addr) { 3339 DPAA2_PMD_ERR("Memory allocation failed"); 3340 goto mem_failure; 3341 } 3342 flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr); 3343 3344 /* Allocate DMA'ble memory to write the FS rules */ 3345 flow->fs_key_addr = rte_zmalloc(NULL, 256, 64); 3346 if (!flow->fs_key_addr) { 3347 DPAA2_PMD_ERR("Memory allocation failed"); 3348 goto mem_failure; 3349 } 3350 flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr); 3351 3352 flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64); 3353 if (!flow->fs_mask_addr) { 3354 DPAA2_PMD_ERR("Memory allocation failed"); 3355 goto mem_failure; 3356 } 3357 flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr); 3358 3359 priv->curr = flow; 3360 3361 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error); 3362 if (ret < 0) { 3363 if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION) 3364 rte_flow_error_set(error, EPERM, 3365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3366 attr, "unknown"); 3367 DPAA2_PMD_ERR("Create flow failed (%d)", ret); 3368 goto creation_error; 3369 } 3370 3371 priv->curr = NULL; 3372 return (struct rte_flow *)flow; 3373 3374 mem_failure: 3375 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 3376 "memory alloc"); 3377 3378 creation_error: 3379 if (flow) { 3380 if (flow->qos_key_addr) 3381 rte_free(flow->qos_key_addr); 3382 if (flow->qos_mask_addr) 3383 rte_free(flow->qos_mask_addr); 3384 if (flow->fs_key_addr) 3385 rte_free(flow->fs_key_addr); 3386 if (flow->fs_mask_addr) 3387 rte_free(flow->fs_mask_addr); 3388 rte_free(flow); 3389 } 3390 priv->curr = NULL; 3391 3392 return NULL; 3393 } 3394 3395 static int 3396 dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow, 3397 struct rte_flow_error *error) 3398 { 3399 int ret = 0; 3400 struct dpaa2_dev_flow *flow; 3401 struct dpaa2_dev_priv *priv = dev->data->dev_private; 3402 struct fsl_mc_io *dpni = priv->hw; 3403 3404 flow = (struct dpaa2_dev_flow *)_flow; 3405 3406 switch (flow->action_type) { 3407 case RTE_FLOW_ACTION_TYPE_QUEUE: 3408 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: 3409 case RTE_FLOW_ACTION_TYPE_PORT_ID: 3410 if (priv->num_rx_tc > 1) { 3411 /* Remove entry from QoS table first */ 3412 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, 3413 priv->token, 3414 &flow->qos_rule); 3415 if (ret < 0) { 3416 DPAA2_PMD_ERR("Remove FS QoS entry failed"); 3417 dpaa2_flow_qos_entry_log("Delete failed", flow, 3418 -1); 3419 abort(); 3420 goto error; 3421 } 3422 } 3423 3424 /* Then remove entry from FS table */ 3425 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, 3426 flow->tc_id, &flow->fs_rule); 3427 if (ret < 0) { 3428 DPAA2_PMD_ERR("Remove entry from FS[%d] failed", 3429 flow->tc_id); 3430 goto error; 3431 } 3432 break; 3433 case RTE_FLOW_ACTION_TYPE_RSS: 3434 if (priv->num_rx_tc > 1) { 3435 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, 3436 priv->token, 3437 &flow->qos_rule); 3438 if (ret < 0) { 3439 DPAA2_PMD_ERR("Remove RSS QoS entry failed"); 3440 goto error; 3441 } 3442 } 3443 break; 3444 default: 3445 DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type); 3446 ret = -ENOTSUP; 3447 break; 3448 } 3449 3450 LIST_REMOVE(flow, next); 3451 if (flow->qos_key_addr) 3452 rte_free(flow->qos_key_addr); 3453 if (flow->qos_mask_addr) 3454 rte_free(flow->qos_mask_addr); 3455 if (flow->fs_key_addr) 3456 rte_free(flow->fs_key_addr); 3457 if (flow->fs_mask_addr) 3458 rte_free(flow->fs_mask_addr); 3459 /* Now free the flow */ 3460 rte_free(flow); 3461 3462 error: 3463 if (ret) 3464 rte_flow_error_set(error, EPERM, 3465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 3466 NULL, "unknown"); 3467 return ret; 3468 } 3469 3470 /** 3471 * Destroy user-configured flow rules. 3472 * 3473 * This function skips internal flows rules. 3474 * 3475 * @see rte_flow_flush() 3476 * @see rte_flow_ops 3477 */ 3478 static int 3479 dpaa2_flow_flush(struct rte_eth_dev *dev, 3480 struct rte_flow_error *error) 3481 { 3482 struct dpaa2_dev_priv *priv = dev->data->dev_private; 3483 struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows); 3484 3485 while (flow) { 3486 struct dpaa2_dev_flow *next = LIST_NEXT(flow, next); 3487 3488 dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error); 3489 flow = next; 3490 } 3491 return 0; 3492 } 3493 3494 static int 3495 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, 3496 struct rte_flow *_flow __rte_unused, 3497 const struct rte_flow_action *actions __rte_unused, 3498 void *data __rte_unused, 3499 struct rte_flow_error *error __rte_unused) 3500 { 3501 return 0; 3502 } 3503 3504 /** 3505 * Clean up all flow rules. 3506 * 3507 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow 3508 * rules regardless of whether they are internal or user-configured. 3509 * 3510 * @param priv 3511 * Pointer to private structure. 3512 */ 3513 void 3514 dpaa2_flow_clean(struct rte_eth_dev *dev) 3515 { 3516 struct dpaa2_dev_flow *flow; 3517 struct dpaa2_dev_priv *priv = dev->data->dev_private; 3518 3519 while ((flow = LIST_FIRST(&priv->flows))) 3520 dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL); 3521 } 3522 3523 const struct rte_flow_ops dpaa2_flow_ops = { 3524 .create = dpaa2_flow_create, 3525 .validate = dpaa2_flow_validate, 3526 .destroy = dpaa2_flow_destroy, 3527 .flush = dpaa2_flow_flush, 3528 .query = dpaa2_flow_query, 3529 }; 3530