1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 NXP 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 13 #include <rte_ethdev.h> 14 #include <rte_log.h> 15 #include <rte_eth_ctrl.h> 16 #include <rte_malloc.h> 17 #include <rte_flow_driver.h> 18 #include <rte_tailq.h> 19 20 #include <fsl_dpni.h> 21 #include <fsl_dpkg.h> 22 23 #include <dpaa2_ethdev.h> 24 #include <dpaa2_pmd_logs.h> 25 26 struct rte_flow { 27 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ 28 struct dpni_rule_cfg rule; 29 uint8_t key_size; 30 uint8_t tc_id; 31 uint8_t flow_type; 32 uint8_t index; 33 enum rte_flow_action_type action; 34 uint16_t flow_id; 35 }; 36 37 /* Layout for rule compositions for supported patterns */ 38 /* TODO: Current design only supports Ethernet + IPv4 based classification. */ 39 /* So corresponding offset macros are valid only. Rest are placeholder for */ 40 /* now. Once support for other netwrok headers will be added then */ 41 /* corresponding macros will be updated with correct values*/ 42 #define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/ 43 #define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */ 44 /* + Sizeof Eth fields */ 45 #define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */ 46 /* + Sizeof VLAN fields */ 47 #define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */ 48 /* + Sizeof IPV4 fields */ 49 #define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */ 50 /* + Sizeof IPV6 fields */ 51 #define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */ 52 /* + Sizeof ICMP fields */ 53 #define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */ 54 /* + Sizeof UDP fields */ 55 #define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */ 56 /* + Sizeof TCP fields */ 57 #define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */ 58 /* + Sizeof SCTP fields */ 59 60 static const 61 enum rte_flow_item_type dpaa2_supported_pattern_type[] = { 62 RTE_FLOW_ITEM_TYPE_END, 63 RTE_FLOW_ITEM_TYPE_ETH, 64 RTE_FLOW_ITEM_TYPE_VLAN, 65 RTE_FLOW_ITEM_TYPE_IPV4, 66 RTE_FLOW_ITEM_TYPE_IPV6, 67 RTE_FLOW_ITEM_TYPE_ICMP, 68 RTE_FLOW_ITEM_TYPE_UDP, 69 RTE_FLOW_ITEM_TYPE_TCP, 70 RTE_FLOW_ITEM_TYPE_SCTP, 71 RTE_FLOW_ITEM_TYPE_GRE, 72 }; 73 74 static const 75 enum rte_flow_action_type dpaa2_supported_action_type[] = { 76 RTE_FLOW_ACTION_TYPE_END, 77 RTE_FLOW_ACTION_TYPE_QUEUE, 78 RTE_FLOW_ACTION_TYPE_RSS 79 }; 80 81 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE; 82 static const void *default_mask; 83 84 static int 85 dpaa2_configure_flow_eth(struct rte_flow *flow, 86 struct rte_eth_dev *dev, 87 const struct rte_flow_attr *attr, 88 const struct rte_flow_item *pattern, 89 const struct rte_flow_action actions[] __rte_unused, 90 struct rte_flow_error *error __rte_unused) 91 { 92 int index, j = 0; 93 size_t key_iova; 94 size_t mask_iova; 95 int device_configured = 0, entry_found = 0; 96 uint32_t group; 97 const struct rte_flow_item_eth *spec, *mask; 98 99 /* TODO: Currently upper bound of range parameter is not implemented */ 100 const struct rte_flow_item_eth *last __rte_unused; 101 struct dpaa2_dev_priv *priv = dev->data->dev_private; 102 103 group = attr->group; 104 105 /* DPAA2 platform has a limitation that extract parameter can not be */ 106 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 107 /* TODO: pattern is an array of 9 elements where 9th pattern element */ 108 /* is for QoS table and 1-8th pattern element is for FS tables. */ 109 /* It can be changed to macro. */ 110 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 111 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 112 DPKG_MAX_NUM_OF_EXTRACTS); 113 return -ENOTSUP; 114 } 115 116 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 117 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 118 DPKG_MAX_NUM_OF_EXTRACTS); 119 return -ENOTSUP; 120 } 121 122 for (j = 0; j < priv->pattern[8].item_count; j++) { 123 if (priv->pattern[8].pattern_type[j] != pattern->type) { 124 continue; 125 } else { 126 entry_found = 1; 127 break; 128 } 129 } 130 131 if (!entry_found) { 132 priv->pattern[8].pattern_type[j] = pattern->type; 133 priv->pattern[8].item_count++; 134 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 135 } 136 137 entry_found = 0; 138 for (j = 0; j < priv->pattern[group].item_count; j++) { 139 if (priv->pattern[group].pattern_type[j] != pattern->type) { 140 continue; 141 } else { 142 entry_found = 1; 143 break; 144 } 145 } 146 147 if (!entry_found) { 148 priv->pattern[group].pattern_type[j] = pattern->type; 149 priv->pattern[group].item_count++; 150 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 151 } 152 153 /* Get traffic class index and flow id to be configured */ 154 flow->tc_id = group; 155 flow->index = attr->priority; 156 157 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 158 index = priv->extract.qos_key_cfg.num_extracts; 159 priv->extract.qos_key_cfg.extracts[index].type = 160 DPKG_EXTRACT_FROM_HDR; 161 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 162 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 163 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; 164 index++; 165 166 priv->extract.qos_key_cfg.extracts[index].type = 167 DPKG_EXTRACT_FROM_HDR; 168 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 169 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 170 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; 171 index++; 172 173 priv->extract.qos_key_cfg.extracts[index].type = 174 DPKG_EXTRACT_FROM_HDR; 175 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 176 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 177 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; 178 index++; 179 180 priv->extract.qos_key_cfg.num_extracts = index; 181 } 182 183 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 184 index = priv->extract.fs_key_cfg[group].num_extracts; 185 priv->extract.fs_key_cfg[group].extracts[index].type = 186 DPKG_EXTRACT_FROM_HDR; 187 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 188 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 189 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; 190 index++; 191 192 priv->extract.fs_key_cfg[group].extracts[index].type = 193 DPKG_EXTRACT_FROM_HDR; 194 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 195 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 196 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; 197 index++; 198 199 priv->extract.fs_key_cfg[group].extracts[index].type = 200 DPKG_EXTRACT_FROM_HDR; 201 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 202 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; 203 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; 204 index++; 205 206 priv->extract.fs_key_cfg[group].num_extracts = index; 207 } 208 209 /* Parse pattern list to get the matching parameters */ 210 spec = (const struct rte_flow_item_eth *)pattern->spec; 211 last = (const struct rte_flow_item_eth *)pattern->last; 212 mask = (const struct rte_flow_item_eth *) 213 (pattern->mask ? pattern->mask : default_mask); 214 215 /* Key rule */ 216 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH; 217 memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes), 218 sizeof(struct ether_addr)); 219 key_iova += sizeof(struct ether_addr); 220 memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes), 221 sizeof(struct ether_addr)); 222 key_iova += sizeof(struct ether_addr); 223 memcpy((void *)key_iova, (const void *)(&spec->type), 224 sizeof(rte_be16_t)); 225 226 /* Key mask */ 227 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH; 228 memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes), 229 sizeof(struct ether_addr)); 230 mask_iova += sizeof(struct ether_addr); 231 memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes), 232 sizeof(struct ether_addr)); 233 mask_iova += sizeof(struct ether_addr); 234 memcpy((void *)mask_iova, (const void *)(&mask->type), 235 sizeof(rte_be16_t)); 236 237 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH + 238 ((2 * sizeof(struct ether_addr)) + 239 sizeof(rte_be16_t))); 240 return device_configured; 241 } 242 243 static int 244 dpaa2_configure_flow_vlan(struct rte_flow *flow, 245 struct rte_eth_dev *dev, 246 const struct rte_flow_attr *attr, 247 const struct rte_flow_item *pattern, 248 const struct rte_flow_action actions[] __rte_unused, 249 struct rte_flow_error *error __rte_unused) 250 { 251 int index, j = 0; 252 size_t key_iova; 253 size_t mask_iova; 254 int device_configured = 0, entry_found = 0; 255 uint32_t group; 256 const struct rte_flow_item_vlan *spec, *mask; 257 258 const struct rte_flow_item_vlan *last __rte_unused; 259 struct dpaa2_dev_priv *priv = dev->data->dev_private; 260 261 group = attr->group; 262 263 /* DPAA2 platform has a limitation that extract parameter can not be */ 264 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 265 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 266 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 267 DPKG_MAX_NUM_OF_EXTRACTS); 268 return -ENOTSUP; 269 } 270 271 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 272 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 273 DPKG_MAX_NUM_OF_EXTRACTS); 274 return -ENOTSUP; 275 } 276 277 for (j = 0; j < priv->pattern[8].item_count; j++) { 278 if (priv->pattern[8].pattern_type[j] != pattern->type) { 279 continue; 280 } else { 281 entry_found = 1; 282 break; 283 } 284 } 285 286 if (!entry_found) { 287 priv->pattern[8].pattern_type[j] = pattern->type; 288 priv->pattern[8].item_count++; 289 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 290 } 291 292 entry_found = 0; 293 for (j = 0; j < priv->pattern[group].item_count; j++) { 294 if (priv->pattern[group].pattern_type[j] != pattern->type) { 295 continue; 296 } else { 297 entry_found = 1; 298 break; 299 } 300 } 301 302 if (!entry_found) { 303 priv->pattern[group].pattern_type[j] = pattern->type; 304 priv->pattern[group].item_count++; 305 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 306 } 307 308 309 /* Get traffic class index and flow id to be configured */ 310 flow->tc_id = group; 311 flow->index = attr->priority; 312 313 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 314 index = priv->extract.qos_key_cfg.num_extracts; 315 priv->extract.qos_key_cfg.extracts[index].type = 316 DPKG_EXTRACT_FROM_HDR; 317 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 318 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; 319 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; 320 priv->extract.qos_key_cfg.num_extracts++; 321 } 322 323 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 324 index = priv->extract.fs_key_cfg[group].num_extracts; 325 priv->extract.fs_key_cfg[group].extracts[index].type = 326 DPKG_EXTRACT_FROM_HDR; 327 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 328 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; 329 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; 330 priv->extract.fs_key_cfg[group].num_extracts++; 331 } 332 333 /* Parse pattern list to get the matching parameters */ 334 spec = (const struct rte_flow_item_vlan *)pattern->spec; 335 last = (const struct rte_flow_item_vlan *)pattern->last; 336 mask = (const struct rte_flow_item_vlan *) 337 (pattern->mask ? pattern->mask : default_mask); 338 339 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN; 340 memcpy((void *)key_iova, (const void *)(&spec->tci), 341 sizeof(rte_be16_t)); 342 343 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN; 344 memcpy((void *)mask_iova, (const void *)(&mask->tci), 345 sizeof(rte_be16_t)); 346 347 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t)); 348 return device_configured; 349 } 350 351 static int 352 dpaa2_configure_flow_ipv4(struct rte_flow *flow, 353 struct rte_eth_dev *dev, 354 const struct rte_flow_attr *attr, 355 const struct rte_flow_item *pattern, 356 const struct rte_flow_action actions[] __rte_unused, 357 struct rte_flow_error *error __rte_unused) 358 { 359 int index, j = 0; 360 size_t key_iova; 361 size_t mask_iova; 362 int device_configured = 0, entry_found = 0; 363 uint32_t group; 364 const struct rte_flow_item_ipv4 *spec, *mask; 365 366 const struct rte_flow_item_ipv4 *last __rte_unused; 367 struct dpaa2_dev_priv *priv = dev->data->dev_private; 368 369 group = attr->group; 370 371 /* DPAA2 platform has a limitation that extract parameter can not be */ 372 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 373 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 374 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 375 DPKG_MAX_NUM_OF_EXTRACTS); 376 return -ENOTSUP; 377 } 378 379 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 380 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 381 DPKG_MAX_NUM_OF_EXTRACTS); 382 return -ENOTSUP; 383 } 384 385 for (j = 0; j < priv->pattern[8].item_count; j++) { 386 if (priv->pattern[8].pattern_type[j] != pattern->type) { 387 continue; 388 } else { 389 entry_found = 1; 390 break; 391 } 392 } 393 394 if (!entry_found) { 395 priv->pattern[8].pattern_type[j] = pattern->type; 396 priv->pattern[8].item_count++; 397 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 398 } 399 400 entry_found = 0; 401 for (j = 0; j < priv->pattern[group].item_count; j++) { 402 if (priv->pattern[group].pattern_type[j] != pattern->type) { 403 continue; 404 } else { 405 entry_found = 1; 406 break; 407 } 408 } 409 410 if (!entry_found) { 411 priv->pattern[group].pattern_type[j] = pattern->type; 412 priv->pattern[group].item_count++; 413 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 414 } 415 416 /* Get traffic class index and flow id to be configured */ 417 flow->tc_id = group; 418 flow->index = attr->priority; 419 420 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 421 index = priv->extract.qos_key_cfg.num_extracts; 422 priv->extract.qos_key_cfg.extracts[index].type = 423 DPKG_EXTRACT_FROM_HDR; 424 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 425 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 426 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; 427 index++; 428 429 priv->extract.qos_key_cfg.extracts[index].type = 430 DPKG_EXTRACT_FROM_HDR; 431 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 432 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 433 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; 434 index++; 435 436 priv->extract.qos_key_cfg.extracts[index].type = 437 DPKG_EXTRACT_FROM_HDR; 438 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 439 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 440 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 441 index++; 442 443 priv->extract.qos_key_cfg.num_extracts = index; 444 } 445 446 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 447 index = priv->extract.fs_key_cfg[group].num_extracts; 448 priv->extract.fs_key_cfg[group].extracts[index].type = 449 DPKG_EXTRACT_FROM_HDR; 450 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 451 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 452 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; 453 index++; 454 455 priv->extract.fs_key_cfg[group].extracts[index].type = 456 DPKG_EXTRACT_FROM_HDR; 457 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 458 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 459 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; 460 index++; 461 462 priv->extract.fs_key_cfg[group].extracts[index].type = 463 DPKG_EXTRACT_FROM_HDR; 464 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 465 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 466 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 467 index++; 468 469 priv->extract.fs_key_cfg[group].num_extracts = index; 470 } 471 472 /* Parse pattern list to get the matching parameters */ 473 spec = (const struct rte_flow_item_ipv4 *)pattern->spec; 474 last = (const struct rte_flow_item_ipv4 *)pattern->last; 475 mask = (const struct rte_flow_item_ipv4 *) 476 (pattern->mask ? pattern->mask : default_mask); 477 478 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4; 479 memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr, 480 sizeof(uint32_t)); 481 key_iova += sizeof(uint32_t); 482 memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr, 483 sizeof(uint32_t)); 484 key_iova += sizeof(uint32_t); 485 memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id, 486 sizeof(uint8_t)); 487 488 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4; 489 memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr, 490 sizeof(uint32_t)); 491 mask_iova += sizeof(uint32_t); 492 memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr, 493 sizeof(uint32_t)); 494 mask_iova += sizeof(uint32_t); 495 memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id, 496 sizeof(uint8_t)); 497 498 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 + 499 (2 * sizeof(uint32_t)) + sizeof(uint8_t)); 500 501 return device_configured; 502 } 503 504 static int 505 dpaa2_configure_flow_ipv6(struct rte_flow *flow, 506 struct rte_eth_dev *dev, 507 const struct rte_flow_attr *attr, 508 const struct rte_flow_item *pattern, 509 const struct rte_flow_action actions[] __rte_unused, 510 struct rte_flow_error *error __rte_unused) 511 { 512 int index, j = 0; 513 size_t key_iova; 514 size_t mask_iova; 515 int device_configured = 0, entry_found = 0; 516 uint32_t group; 517 const struct rte_flow_item_ipv6 *spec, *mask; 518 519 const struct rte_flow_item_ipv6 *last __rte_unused; 520 struct dpaa2_dev_priv *priv = dev->data->dev_private; 521 522 group = attr->group; 523 524 /* DPAA2 platform has a limitation that extract parameter can not be */ 525 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 526 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 527 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 528 DPKG_MAX_NUM_OF_EXTRACTS); 529 return -ENOTSUP; 530 } 531 532 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 533 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 534 DPKG_MAX_NUM_OF_EXTRACTS); 535 return -ENOTSUP; 536 } 537 538 for (j = 0; j < priv->pattern[8].item_count; j++) { 539 if (priv->pattern[8].pattern_type[j] != pattern->type) { 540 continue; 541 } else { 542 entry_found = 1; 543 break; 544 } 545 } 546 547 if (!entry_found) { 548 priv->pattern[8].pattern_type[j] = pattern->type; 549 priv->pattern[8].item_count++; 550 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 551 } 552 553 entry_found = 0; 554 for (j = 0; j < priv->pattern[group].item_count; j++) { 555 if (priv->pattern[group].pattern_type[j] != pattern->type) { 556 continue; 557 } else { 558 entry_found = 1; 559 break; 560 } 561 } 562 563 if (!entry_found) { 564 priv->pattern[group].pattern_type[j] = pattern->type; 565 priv->pattern[group].item_count++; 566 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 567 } 568 569 /* Get traffic class index and flow id to be configured */ 570 flow->tc_id = group; 571 flow->index = attr->priority; 572 573 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 574 index = priv->extract.qos_key_cfg.num_extracts; 575 priv->extract.qos_key_cfg.extracts[index].type = 576 DPKG_EXTRACT_FROM_HDR; 577 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 578 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 579 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; 580 index++; 581 582 priv->extract.qos_key_cfg.extracts[index].type = 583 DPKG_EXTRACT_FROM_HDR; 584 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 585 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 586 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; 587 index++; 588 589 priv->extract.qos_key_cfg.num_extracts = index; 590 } 591 592 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 593 index = priv->extract.fs_key_cfg[group].num_extracts; 594 priv->extract.fs_key_cfg[group].extracts[index].type = 595 DPKG_EXTRACT_FROM_HDR; 596 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 597 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 598 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; 599 index++; 600 601 priv->extract.fs_key_cfg[group].extracts[index].type = 602 DPKG_EXTRACT_FROM_HDR; 603 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 604 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 605 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; 606 index++; 607 608 priv->extract.fs_key_cfg[group].num_extracts = index; 609 } 610 611 /* Parse pattern list to get the matching parameters */ 612 spec = (const struct rte_flow_item_ipv6 *)pattern->spec; 613 last = (const struct rte_flow_item_ipv6 *)pattern->last; 614 mask = (const struct rte_flow_item_ipv6 *) 615 (pattern->mask ? pattern->mask : default_mask); 616 617 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6; 618 memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr), 619 sizeof(spec->hdr.src_addr)); 620 key_iova += sizeof(spec->hdr.src_addr); 621 memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr), 622 sizeof(spec->hdr.dst_addr)); 623 624 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6; 625 memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr), 626 sizeof(mask->hdr.src_addr)); 627 mask_iova += sizeof(mask->hdr.src_addr); 628 memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr), 629 sizeof(mask->hdr.dst_addr)); 630 631 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 + 632 sizeof(spec->hdr.src_addr) + 633 sizeof(mask->hdr.dst_addr)); 634 return device_configured; 635 } 636 637 static int 638 dpaa2_configure_flow_icmp(struct rte_flow *flow, 639 struct rte_eth_dev *dev, 640 const struct rte_flow_attr *attr, 641 const struct rte_flow_item *pattern, 642 const struct rte_flow_action actions[] __rte_unused, 643 struct rte_flow_error *error __rte_unused) 644 { 645 int index, j = 0; 646 size_t key_iova; 647 size_t mask_iova; 648 int device_configured = 0, entry_found = 0; 649 uint32_t group; 650 const struct rte_flow_item_icmp *spec, *mask; 651 652 const struct rte_flow_item_icmp *last __rte_unused; 653 struct dpaa2_dev_priv *priv = dev->data->dev_private; 654 655 group = attr->group; 656 657 /* DPAA2 platform has a limitation that extract parameter can not be */ 658 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 659 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 660 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 661 DPKG_MAX_NUM_OF_EXTRACTS); 662 return -ENOTSUP; 663 } 664 665 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 666 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 667 DPKG_MAX_NUM_OF_EXTRACTS); 668 return -ENOTSUP; 669 } 670 671 for (j = 0; j < priv->pattern[8].item_count; j++) { 672 if (priv->pattern[8].pattern_type[j] != pattern->type) { 673 continue; 674 } else { 675 entry_found = 1; 676 break; 677 } 678 } 679 680 if (!entry_found) { 681 priv->pattern[8].pattern_type[j] = pattern->type; 682 priv->pattern[8].item_count++; 683 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 684 } 685 686 entry_found = 0; 687 for (j = 0; j < priv->pattern[group].item_count; j++) { 688 if (priv->pattern[group].pattern_type[j] != pattern->type) { 689 continue; 690 } else { 691 entry_found = 1; 692 break; 693 } 694 } 695 696 if (!entry_found) { 697 priv->pattern[group].pattern_type[j] = pattern->type; 698 priv->pattern[group].item_count++; 699 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 700 } 701 702 /* Get traffic class index and flow id to be configured */ 703 flow->tc_id = group; 704 flow->index = attr->priority; 705 706 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 707 index = priv->extract.qos_key_cfg.num_extracts; 708 priv->extract.qos_key_cfg.extracts[index].type = 709 DPKG_EXTRACT_FROM_HDR; 710 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 711 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; 712 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; 713 index++; 714 715 priv->extract.qos_key_cfg.extracts[index].type = 716 DPKG_EXTRACT_FROM_HDR; 717 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 718 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; 719 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; 720 index++; 721 722 priv->extract.qos_key_cfg.num_extracts = index; 723 } 724 725 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 726 index = priv->extract.fs_key_cfg[group].num_extracts; 727 priv->extract.fs_key_cfg[group].extracts[index].type = 728 DPKG_EXTRACT_FROM_HDR; 729 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 730 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; 731 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; 732 index++; 733 734 priv->extract.fs_key_cfg[group].extracts[index].type = 735 DPKG_EXTRACT_FROM_HDR; 736 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 737 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; 738 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; 739 index++; 740 741 priv->extract.fs_key_cfg[group].num_extracts = index; 742 } 743 744 /* Parse pattern list to get the matching parameters */ 745 spec = (const struct rte_flow_item_icmp *)pattern->spec; 746 last = (const struct rte_flow_item_icmp *)pattern->last; 747 mask = (const struct rte_flow_item_icmp *) 748 (pattern->mask ? pattern->mask : default_mask); 749 750 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP; 751 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type, 752 sizeof(uint8_t)); 753 key_iova += sizeof(uint8_t); 754 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code, 755 sizeof(uint8_t)); 756 757 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP; 758 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type, 759 sizeof(uint8_t)); 760 key_iova += sizeof(uint8_t); 761 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code, 762 sizeof(uint8_t)); 763 764 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP + 765 (2 * sizeof(uint8_t))); 766 767 return device_configured; 768 } 769 770 static int 771 dpaa2_configure_flow_udp(struct rte_flow *flow, 772 struct rte_eth_dev *dev, 773 const struct rte_flow_attr *attr, 774 const struct rte_flow_item *pattern, 775 const struct rte_flow_action actions[] __rte_unused, 776 struct rte_flow_error *error __rte_unused) 777 { 778 int index, j = 0; 779 size_t key_iova; 780 size_t mask_iova; 781 int device_configured = 0, entry_found = 0; 782 uint32_t group; 783 const struct rte_flow_item_udp *spec, *mask; 784 785 const struct rte_flow_item_udp *last __rte_unused; 786 struct dpaa2_dev_priv *priv = dev->data->dev_private; 787 788 group = attr->group; 789 790 /* DPAA2 platform has a limitation that extract parameter can not be */ 791 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 792 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 793 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 794 DPKG_MAX_NUM_OF_EXTRACTS); 795 return -ENOTSUP; 796 } 797 798 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 799 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 800 DPKG_MAX_NUM_OF_EXTRACTS); 801 return -ENOTSUP; 802 } 803 804 for (j = 0; j < priv->pattern[8].item_count; j++) { 805 if (priv->pattern[8].pattern_type[j] != pattern->type) { 806 continue; 807 } else { 808 entry_found = 1; 809 break; 810 } 811 } 812 813 if (!entry_found) { 814 priv->pattern[8].pattern_type[j] = pattern->type; 815 priv->pattern[8].item_count++; 816 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 817 } 818 819 entry_found = 0; 820 for (j = 0; j < priv->pattern[group].item_count; j++) { 821 if (priv->pattern[group].pattern_type[j] != pattern->type) { 822 continue; 823 } else { 824 entry_found = 1; 825 break; 826 } 827 } 828 829 if (!entry_found) { 830 priv->pattern[group].pattern_type[j] = pattern->type; 831 priv->pattern[group].item_count++; 832 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 833 } 834 835 /* Get traffic class index and flow id to be configured */ 836 flow->tc_id = group; 837 flow->index = attr->priority; 838 839 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 840 index = priv->extract.qos_key_cfg.num_extracts; 841 priv->extract.qos_key_cfg.extracts[index].type = 842 DPKG_EXTRACT_FROM_HDR; 843 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 844 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 845 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 846 index++; 847 848 priv->extract.qos_key_cfg.extracts[index].type = 849 DPKG_EXTRACT_FROM_HDR; 850 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 851 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; 852 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; 853 index++; 854 855 priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR; 856 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 857 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; 858 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; 859 index++; 860 861 priv->extract.qos_key_cfg.num_extracts = index; 862 } 863 864 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 865 index = priv->extract.fs_key_cfg[group].num_extracts; 866 priv->extract.fs_key_cfg[group].extracts[index].type = 867 DPKG_EXTRACT_FROM_HDR; 868 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 869 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 870 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 871 index++; 872 873 priv->extract.fs_key_cfg[group].extracts[index].type = 874 DPKG_EXTRACT_FROM_HDR; 875 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 876 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; 877 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; 878 index++; 879 880 priv->extract.fs_key_cfg[group].extracts[index].type = 881 DPKG_EXTRACT_FROM_HDR; 882 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 883 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; 884 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; 885 index++; 886 887 priv->extract.fs_key_cfg[group].num_extracts = index; 888 } 889 890 /* Parse pattern list to get the matching parameters */ 891 spec = (const struct rte_flow_item_udp *)pattern->spec; 892 last = (const struct rte_flow_item_udp *)pattern->last; 893 mask = (const struct rte_flow_item_udp *) 894 (pattern->mask ? pattern->mask : default_mask); 895 896 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + 897 (2 * sizeof(uint32_t)); 898 memset((void *)key_iova, 0x11, sizeof(uint8_t)); 899 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP; 900 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), 901 sizeof(uint16_t)); 902 key_iova += sizeof(uint16_t); 903 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), 904 sizeof(uint16_t)); 905 906 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP; 907 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), 908 sizeof(uint16_t)); 909 mask_iova += sizeof(uint16_t); 910 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), 911 sizeof(uint16_t)); 912 913 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP + 914 (2 * sizeof(uint16_t))); 915 916 return device_configured; 917 } 918 919 static int 920 dpaa2_configure_flow_tcp(struct rte_flow *flow, 921 struct rte_eth_dev *dev, 922 const struct rte_flow_attr *attr, 923 const struct rte_flow_item *pattern, 924 const struct rte_flow_action actions[] __rte_unused, 925 struct rte_flow_error *error __rte_unused) 926 { 927 int index, j = 0; 928 size_t key_iova; 929 size_t mask_iova; 930 int device_configured = 0, entry_found = 0; 931 uint32_t group; 932 const struct rte_flow_item_tcp *spec, *mask; 933 934 const struct rte_flow_item_tcp *last __rte_unused; 935 struct dpaa2_dev_priv *priv = dev->data->dev_private; 936 937 group = attr->group; 938 939 /* DPAA2 platform has a limitation that extract parameter can not be */ 940 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ 941 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 942 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 943 DPKG_MAX_NUM_OF_EXTRACTS); 944 return -ENOTSUP; 945 } 946 947 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 948 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 949 DPKG_MAX_NUM_OF_EXTRACTS); 950 return -ENOTSUP; 951 } 952 953 for (j = 0; j < priv->pattern[8].item_count; j++) { 954 if (priv->pattern[8].pattern_type[j] != pattern->type) { 955 continue; 956 } else { 957 entry_found = 1; 958 break; 959 } 960 } 961 962 if (!entry_found) { 963 priv->pattern[8].pattern_type[j] = pattern->type; 964 priv->pattern[8].item_count++; 965 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 966 } 967 968 entry_found = 0; 969 for (j = 0; j < priv->pattern[group].item_count; j++) { 970 if (priv->pattern[group].pattern_type[j] != pattern->type) { 971 continue; 972 } else { 973 entry_found = 1; 974 break; 975 } 976 } 977 978 if (!entry_found) { 979 priv->pattern[group].pattern_type[j] = pattern->type; 980 priv->pattern[group].item_count++; 981 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 982 } 983 984 /* Get traffic class index and flow id to be configured */ 985 flow->tc_id = group; 986 flow->index = attr->priority; 987 988 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 989 index = priv->extract.qos_key_cfg.num_extracts; 990 priv->extract.qos_key_cfg.extracts[index].type = 991 DPKG_EXTRACT_FROM_HDR; 992 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 993 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 994 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 995 index++; 996 997 priv->extract.qos_key_cfg.extracts[index].type = 998 DPKG_EXTRACT_FROM_HDR; 999 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1000 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; 1001 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; 1002 index++; 1003 1004 priv->extract.qos_key_cfg.extracts[index].type = 1005 DPKG_EXTRACT_FROM_HDR; 1006 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1007 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; 1008 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; 1009 index++; 1010 1011 priv->extract.qos_key_cfg.num_extracts = index; 1012 } 1013 1014 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 1015 index = priv->extract.fs_key_cfg[group].num_extracts; 1016 priv->extract.fs_key_cfg[group].extracts[index].type = 1017 DPKG_EXTRACT_FROM_HDR; 1018 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1019 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 1020 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 1021 index++; 1022 1023 priv->extract.fs_key_cfg[group].extracts[index].type = 1024 DPKG_EXTRACT_FROM_HDR; 1025 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1026 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; 1027 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; 1028 index++; 1029 1030 priv->extract.fs_key_cfg[group].extracts[index].type = 1031 DPKG_EXTRACT_FROM_HDR; 1032 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1033 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; 1034 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; 1035 index++; 1036 1037 priv->extract.fs_key_cfg[group].num_extracts = index; 1038 } 1039 1040 /* Parse pattern list to get the matching parameters */ 1041 spec = (const struct rte_flow_item_tcp *)pattern->spec; 1042 last = (const struct rte_flow_item_tcp *)pattern->last; 1043 mask = (const struct rte_flow_item_tcp *) 1044 (pattern->mask ? pattern->mask : default_mask); 1045 1046 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + 1047 (2 * sizeof(uint32_t)); 1048 memset((void *)key_iova, 0x06, sizeof(uint8_t)); 1049 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP; 1050 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), 1051 sizeof(uint16_t)); 1052 key_iova += sizeof(uint16_t); 1053 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), 1054 sizeof(uint16_t)); 1055 1056 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP; 1057 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), 1058 sizeof(uint16_t)); 1059 mask_iova += sizeof(uint16_t); 1060 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), 1061 sizeof(uint16_t)); 1062 1063 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP + 1064 (2 * sizeof(uint16_t))); 1065 1066 return device_configured; 1067 } 1068 1069 static int 1070 dpaa2_configure_flow_sctp(struct rte_flow *flow, 1071 struct rte_eth_dev *dev, 1072 const struct rte_flow_attr *attr, 1073 const struct rte_flow_item *pattern, 1074 const struct rte_flow_action actions[] __rte_unused, 1075 struct rte_flow_error *error __rte_unused) 1076 { 1077 int index, j = 0; 1078 size_t key_iova; 1079 size_t mask_iova; 1080 int device_configured = 0, entry_found = 0; 1081 uint32_t group; 1082 const struct rte_flow_item_sctp *spec, *mask; 1083 1084 const struct rte_flow_item_sctp *last __rte_unused; 1085 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1086 1087 group = attr->group; 1088 1089 /* DPAA2 platform has a limitation that extract parameter can not be */ 1090 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ 1091 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 1092 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 1093 DPKG_MAX_NUM_OF_EXTRACTS); 1094 return -ENOTSUP; 1095 } 1096 1097 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 1098 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 1099 DPKG_MAX_NUM_OF_EXTRACTS); 1100 return -ENOTSUP; 1101 } 1102 1103 for (j = 0; j < priv->pattern[8].item_count; j++) { 1104 if (priv->pattern[8].pattern_type[j] != pattern->type) { 1105 continue; 1106 } else { 1107 entry_found = 1; 1108 break; 1109 } 1110 } 1111 1112 if (!entry_found) { 1113 priv->pattern[8].pattern_type[j] = pattern->type; 1114 priv->pattern[8].item_count++; 1115 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 1116 } 1117 1118 entry_found = 0; 1119 for (j = 0; j < priv->pattern[group].item_count; j++) { 1120 if (priv->pattern[group].pattern_type[j] != pattern->type) { 1121 continue; 1122 } else { 1123 entry_found = 1; 1124 break; 1125 } 1126 } 1127 1128 if (!entry_found) { 1129 priv->pattern[group].pattern_type[j] = pattern->type; 1130 priv->pattern[group].item_count++; 1131 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 1132 } 1133 1134 /* Get traffic class index and flow id to be configured */ 1135 flow->tc_id = group; 1136 flow->index = attr->priority; 1137 1138 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 1139 index = priv->extract.qos_key_cfg.num_extracts; 1140 priv->extract.qos_key_cfg.extracts[index].type = 1141 DPKG_EXTRACT_FROM_HDR; 1142 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1143 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; 1144 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 1145 index++; 1146 1147 priv->extract.qos_key_cfg.extracts[index].type = 1148 DPKG_EXTRACT_FROM_HDR; 1149 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1150 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; 1151 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; 1152 index++; 1153 1154 priv->extract.qos_key_cfg.extracts[index].type = 1155 DPKG_EXTRACT_FROM_HDR; 1156 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1157 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; 1158 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; 1159 index++; 1160 1161 priv->extract.qos_key_cfg.num_extracts = index; 1162 } 1163 1164 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 1165 index = priv->extract.fs_key_cfg[group].num_extracts; 1166 priv->extract.fs_key_cfg[group].extracts[index].type = 1167 DPKG_EXTRACT_FROM_HDR; 1168 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1169 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; 1170 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; 1171 index++; 1172 1173 priv->extract.fs_key_cfg[group].extracts[index].type = 1174 DPKG_EXTRACT_FROM_HDR; 1175 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1176 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; 1177 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; 1178 index++; 1179 1180 priv->extract.fs_key_cfg[group].extracts[index].type = 1181 DPKG_EXTRACT_FROM_HDR; 1182 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1183 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; 1184 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; 1185 index++; 1186 1187 priv->extract.fs_key_cfg[group].num_extracts = index; 1188 } 1189 1190 /* Parse pattern list to get the matching parameters */ 1191 spec = (const struct rte_flow_item_sctp *)pattern->spec; 1192 last = (const struct rte_flow_item_sctp *)pattern->last; 1193 mask = (const struct rte_flow_item_sctp *) 1194 (pattern->mask ? pattern->mask : default_mask); 1195 1196 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + 1197 (2 * sizeof(uint32_t)); 1198 memset((void *)key_iova, 0x84, sizeof(uint8_t)); 1199 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP; 1200 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), 1201 sizeof(uint16_t)); 1202 key_iova += sizeof(uint16_t); 1203 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), 1204 sizeof(uint16_t)); 1205 1206 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP; 1207 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), 1208 sizeof(uint16_t)); 1209 mask_iova += sizeof(uint16_t); 1210 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), 1211 sizeof(uint16_t)); 1212 1213 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP + 1214 (2 * sizeof(uint16_t))); 1215 return device_configured; 1216 } 1217 1218 static int 1219 dpaa2_configure_flow_gre(struct rte_flow *flow, 1220 struct rte_eth_dev *dev, 1221 const struct rte_flow_attr *attr, 1222 const struct rte_flow_item *pattern, 1223 const struct rte_flow_action actions[] __rte_unused, 1224 struct rte_flow_error *error __rte_unused) 1225 { 1226 int index, j = 0; 1227 size_t key_iova; 1228 size_t mask_iova; 1229 int device_configured = 0, entry_found = 0; 1230 uint32_t group; 1231 const struct rte_flow_item_gre *spec, *mask; 1232 1233 const struct rte_flow_item_gre *last __rte_unused; 1234 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1235 1236 group = attr->group; 1237 1238 /* DPAA2 platform has a limitation that extract parameter can not be */ 1239 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ 1240 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 1241 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 1242 DPKG_MAX_NUM_OF_EXTRACTS); 1243 return -ENOTSUP; 1244 } 1245 1246 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { 1247 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", 1248 DPKG_MAX_NUM_OF_EXTRACTS); 1249 return -ENOTSUP; 1250 } 1251 1252 for (j = 0; j < priv->pattern[8].item_count; j++) { 1253 if (priv->pattern[8].pattern_type[j] != pattern->type) { 1254 continue; 1255 } else { 1256 entry_found = 1; 1257 break; 1258 } 1259 } 1260 1261 if (!entry_found) { 1262 priv->pattern[8].pattern_type[j] = pattern->type; 1263 priv->pattern[8].item_count++; 1264 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; 1265 } 1266 1267 entry_found = 0; 1268 for (j = 0; j < priv->pattern[group].item_count; j++) { 1269 if (priv->pattern[group].pattern_type[j] != pattern->type) { 1270 continue; 1271 } else { 1272 entry_found = 1; 1273 break; 1274 } 1275 } 1276 1277 if (!entry_found) { 1278 priv->pattern[group].pattern_type[j] = pattern->type; 1279 priv->pattern[group].item_count++; 1280 device_configured |= DPAA2_FS_TABLE_RECONFIGURE; 1281 } 1282 1283 /* Get traffic class index and flow id to be configured */ 1284 flow->tc_id = group; 1285 flow->index = attr->priority; 1286 1287 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 1288 index = priv->extract.qos_key_cfg.num_extracts; 1289 priv->extract.qos_key_cfg.extracts[index].type = 1290 DPKG_EXTRACT_FROM_HDR; 1291 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1292 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE; 1293 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; 1294 index++; 1295 1296 priv->extract.qos_key_cfg.num_extracts = index; 1297 } 1298 1299 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { 1300 index = priv->extract.fs_key_cfg[group].num_extracts; 1301 priv->extract.fs_key_cfg[group].extracts[index].type = 1302 DPKG_EXTRACT_FROM_HDR; 1303 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; 1304 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE; 1305 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; 1306 index++; 1307 1308 priv->extract.fs_key_cfg[group].num_extracts = index; 1309 } 1310 1311 /* Parse pattern list to get the matching parameters */ 1312 spec = (const struct rte_flow_item_gre *)pattern->spec; 1313 last = (const struct rte_flow_item_gre *)pattern->last; 1314 mask = (const struct rte_flow_item_gre *) 1315 (pattern->mask ? pattern->mask : default_mask); 1316 1317 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE; 1318 memcpy((void *)key_iova, (const void *)(&spec->protocol), 1319 sizeof(rte_be16_t)); 1320 1321 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE; 1322 memcpy((void *)mask_iova, (const void *)(&mask->protocol), 1323 sizeof(rte_be16_t)); 1324 1325 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t)); 1326 1327 return device_configured; 1328 } 1329 1330 static int 1331 dpaa2_generic_flow_set(struct rte_flow *flow, 1332 struct rte_eth_dev *dev, 1333 const struct rte_flow_attr *attr, 1334 const struct rte_flow_item pattern[], 1335 const struct rte_flow_action actions[], 1336 struct rte_flow_error *error) 1337 { 1338 const struct rte_flow_action_queue *dest_queue; 1339 const struct rte_flow_action_rss *rss_conf; 1340 uint16_t index; 1341 int is_keycfg_configured = 0, end_of_list = 0; 1342 int ret = 0, i = 0, j = 0; 1343 struct dpni_attr nic_attr; 1344 struct dpni_rx_tc_dist_cfg tc_cfg; 1345 struct dpni_qos_tbl_cfg qos_cfg; 1346 struct dpkg_profile_cfg key_cfg; 1347 struct dpni_fs_action_cfg action; 1348 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1349 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1350 size_t param; 1351 struct rte_flow *curr = LIST_FIRST(&priv->flows); 1352 1353 /* Parse pattern list to get the matching parameters */ 1354 while (!end_of_list) { 1355 switch (pattern[i].type) { 1356 case RTE_FLOW_ITEM_TYPE_ETH: 1357 is_keycfg_configured = dpaa2_configure_flow_eth(flow, 1358 dev, 1359 attr, 1360 &pattern[i], 1361 actions, 1362 error); 1363 break; 1364 case RTE_FLOW_ITEM_TYPE_VLAN: 1365 is_keycfg_configured = dpaa2_configure_flow_vlan(flow, 1366 dev, 1367 attr, 1368 &pattern[i], 1369 actions, 1370 error); 1371 break; 1372 case RTE_FLOW_ITEM_TYPE_IPV4: 1373 is_keycfg_configured = dpaa2_configure_flow_ipv4(flow, 1374 dev, 1375 attr, 1376 &pattern[i], 1377 actions, 1378 error); 1379 break; 1380 case RTE_FLOW_ITEM_TYPE_IPV6: 1381 is_keycfg_configured = dpaa2_configure_flow_ipv6(flow, 1382 dev, 1383 attr, 1384 &pattern[i], 1385 actions, 1386 error); 1387 break; 1388 case RTE_FLOW_ITEM_TYPE_ICMP: 1389 is_keycfg_configured = dpaa2_configure_flow_icmp(flow, 1390 dev, 1391 attr, 1392 &pattern[i], 1393 actions, 1394 error); 1395 break; 1396 case RTE_FLOW_ITEM_TYPE_UDP: 1397 is_keycfg_configured = dpaa2_configure_flow_udp(flow, 1398 dev, 1399 attr, 1400 &pattern[i], 1401 actions, 1402 error); 1403 break; 1404 case RTE_FLOW_ITEM_TYPE_TCP: 1405 is_keycfg_configured = dpaa2_configure_flow_tcp(flow, 1406 dev, 1407 attr, 1408 &pattern[i], 1409 actions, 1410 error); 1411 break; 1412 case RTE_FLOW_ITEM_TYPE_SCTP: 1413 is_keycfg_configured = dpaa2_configure_flow_sctp(flow, 1414 dev, attr, 1415 &pattern[i], 1416 actions, 1417 error); 1418 break; 1419 case RTE_FLOW_ITEM_TYPE_GRE: 1420 is_keycfg_configured = dpaa2_configure_flow_gre(flow, 1421 dev, 1422 attr, 1423 &pattern[i], 1424 actions, 1425 error); 1426 break; 1427 case RTE_FLOW_ITEM_TYPE_END: 1428 end_of_list = 1; 1429 break; /*End of List*/ 1430 default: 1431 DPAA2_PMD_ERR("Invalid action type"); 1432 ret = -ENOTSUP; 1433 break; 1434 } 1435 i++; 1436 } 1437 1438 /* Let's parse action on matching traffic */ 1439 end_of_list = 0; 1440 while (!end_of_list) { 1441 switch (actions[j].type) { 1442 case RTE_FLOW_ACTION_TYPE_QUEUE: 1443 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf); 1444 flow->flow_id = dest_queue->index; 1445 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE; 1446 memset(&action, 0, sizeof(struct dpni_fs_action_cfg)); 1447 action.flow_id = flow->flow_id; 1448 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) { 1449 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, 1450 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { 1451 DPAA2_PMD_ERR( 1452 "Unable to prepare extract parameters"); 1453 return -1; 1454 } 1455 1456 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); 1457 qos_cfg.discard_on_miss = true; 1458 qos_cfg.keep_entries = true; 1459 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; 1460 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, 1461 priv->token, &qos_cfg); 1462 if (ret < 0) { 1463 DPAA2_PMD_ERR( 1464 "Distribution cannot be configured.(%d)" 1465 , ret); 1466 return -1; 1467 } 1468 } 1469 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { 1470 if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id], 1471 (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) { 1472 DPAA2_PMD_ERR( 1473 "Unable to prepare extract parameters"); 1474 return -1; 1475 } 1476 1477 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); 1478 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc; 1479 tc_cfg.dist_mode = DPNI_DIST_MODE_FS; 1480 tc_cfg.key_cfg_iova = 1481 (uint64_t)priv->extract.fs_extract_param[flow->tc_id]; 1482 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; 1483 tc_cfg.fs_cfg.keep_entries = true; 1484 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, 1485 priv->token, 1486 flow->tc_id, &tc_cfg); 1487 if (ret < 0) { 1488 DPAA2_PMD_ERR( 1489 "Distribution cannot be configured.(%d)" 1490 , ret); 1491 return -1; 1492 } 1493 } 1494 /* Configure QoS table first */ 1495 memset(&nic_attr, 0, sizeof(struct dpni_attr)); 1496 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, 1497 priv->token, &nic_attr); 1498 if (ret < 0) { 1499 DPAA2_PMD_ERR( 1500 "Failure to get attribute. dpni@%p err code(%d)\n", 1501 dpni, ret); 1502 return ret; 1503 } 1504 1505 action.flow_id = action.flow_id % nic_attr.num_rx_tcs; 1506 index = flow->index + (flow->tc_id * nic_attr.fs_entries); 1507 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, 1508 priv->token, &flow->rule, 1509 flow->tc_id, index); 1510 if (ret < 0) { 1511 DPAA2_PMD_ERR( 1512 "Error in addnig entry to QoS table(%d)", ret); 1513 return ret; 1514 } 1515 1516 /* Then Configure FS table */ 1517 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token, 1518 flow->tc_id, flow->index, 1519 &flow->rule, &action); 1520 if (ret < 0) { 1521 DPAA2_PMD_ERR( 1522 "Error in adding entry to FS table(%d)", ret); 1523 return ret; 1524 } 1525 break; 1526 case RTE_FLOW_ACTION_TYPE_RSS: 1527 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, 1528 priv->token, &nic_attr); 1529 if (ret < 0) { 1530 DPAA2_PMD_ERR( 1531 "Failure to get attribute. dpni@%p err code(%d)\n", 1532 dpni, ret); 1533 return ret; 1534 } 1535 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf); 1536 for (i = 0; i < (int)rss_conf->queue_num; i++) { 1537 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) || 1538 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) { 1539 DPAA2_PMD_ERR( 1540 "Queue/Group combination are not supported\n"); 1541 return -ENOTSUP; 1542 } 1543 } 1544 1545 flow->action = RTE_FLOW_ACTION_TYPE_RSS; 1546 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types, 1547 &key_cfg); 1548 if (ret < 0) { 1549 DPAA2_PMD_ERR( 1550 "unable to set flow distribution.please check queue config\n"); 1551 return ret; 1552 } 1553 1554 /* Allocate DMA'ble memory to write the rules */ 1555 param = (size_t)rte_malloc(NULL, 256, 64); 1556 if (!param) { 1557 DPAA2_PMD_ERR("Memory allocation failure\n"); 1558 return -1; 1559 } 1560 1561 if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) { 1562 DPAA2_PMD_ERR( 1563 "Unable to prepare extract parameters"); 1564 rte_free((void *)param); 1565 return -1; 1566 } 1567 1568 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); 1569 tc_cfg.dist_size = rss_conf->queue_num; 1570 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; 1571 tc_cfg.key_cfg_iova = (size_t)param; 1572 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; 1573 1574 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, 1575 priv->token, flow->tc_id, 1576 &tc_cfg); 1577 if (ret < 0) { 1578 DPAA2_PMD_ERR( 1579 "Distribution cannot be configured: %d\n", ret); 1580 rte_free((void *)param); 1581 return -1; 1582 } 1583 1584 rte_free((void *)param); 1585 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { 1586 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, 1587 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { 1588 DPAA2_PMD_ERR( 1589 "Unable to prepare extract parameters"); 1590 return -1; 1591 } 1592 memset(&qos_cfg, 0, 1593 sizeof(struct dpni_qos_tbl_cfg)); 1594 qos_cfg.discard_on_miss = true; 1595 qos_cfg.keep_entries = true; 1596 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; 1597 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, 1598 priv->token, &qos_cfg); 1599 if (ret < 0) { 1600 DPAA2_PMD_ERR( 1601 "Distribution can not be configured(%d)\n", 1602 ret); 1603 return -1; 1604 } 1605 } 1606 1607 /* Add Rule into QoS table */ 1608 index = flow->index + (flow->tc_id * nic_attr.fs_entries); 1609 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, 1610 &flow->rule, flow->tc_id, 1611 index); 1612 if (ret < 0) { 1613 DPAA2_PMD_ERR( 1614 "Error in entry addition in QoS table(%d)", 1615 ret); 1616 return ret; 1617 } 1618 break; 1619 case RTE_FLOW_ACTION_TYPE_END: 1620 end_of_list = 1; 1621 break; 1622 default: 1623 DPAA2_PMD_ERR("Invalid action type"); 1624 ret = -ENOTSUP; 1625 break; 1626 } 1627 j++; 1628 } 1629 1630 if (!ret) { 1631 /* New rules are inserted. */ 1632 if (!curr) { 1633 LIST_INSERT_HEAD(&priv->flows, flow, next); 1634 } else { 1635 while (LIST_NEXT(curr, next)) 1636 curr = LIST_NEXT(curr, next); 1637 LIST_INSERT_AFTER(curr, flow, next); 1638 } 1639 } 1640 return ret; 1641 } 1642 1643 static inline int 1644 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, 1645 const struct rte_flow_attr *attr) 1646 { 1647 int ret = 0; 1648 1649 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { 1650 DPAA2_PMD_ERR("Priority group is out of range\n"); 1651 ret = -ENOTSUP; 1652 } 1653 if (unlikely(attr->priority >= dpni_attr->fs_entries)) { 1654 DPAA2_PMD_ERR("Priority within the group is out of range\n"); 1655 ret = -ENOTSUP; 1656 } 1657 if (unlikely(attr->egress)) { 1658 DPAA2_PMD_ERR( 1659 "Flow configuration is not supported on egress side\n"); 1660 ret = -ENOTSUP; 1661 } 1662 if (unlikely(!attr->ingress)) { 1663 DPAA2_PMD_ERR("Ingress flag must be configured\n"); 1664 ret = -EINVAL; 1665 } 1666 return ret; 1667 } 1668 1669 static inline void 1670 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern) 1671 { 1672 switch (pattern->type) { 1673 case RTE_FLOW_ITEM_TYPE_ETH: 1674 default_mask = (const void *)&rte_flow_item_eth_mask; 1675 break; 1676 case RTE_FLOW_ITEM_TYPE_VLAN: 1677 default_mask = (const void *)&rte_flow_item_vlan_mask; 1678 break; 1679 case RTE_FLOW_ITEM_TYPE_IPV4: 1680 default_mask = (const void *)&rte_flow_item_ipv4_mask; 1681 break; 1682 case RTE_FLOW_ITEM_TYPE_IPV6: 1683 default_mask = (const void *)&rte_flow_item_ipv6_mask; 1684 break; 1685 case RTE_FLOW_ITEM_TYPE_ICMP: 1686 default_mask = (const void *)&rte_flow_item_icmp_mask; 1687 break; 1688 case RTE_FLOW_ITEM_TYPE_UDP: 1689 default_mask = (const void *)&rte_flow_item_udp_mask; 1690 break; 1691 case RTE_FLOW_ITEM_TYPE_TCP: 1692 default_mask = (const void *)&rte_flow_item_tcp_mask; 1693 break; 1694 case RTE_FLOW_ITEM_TYPE_SCTP: 1695 default_mask = (const void *)&rte_flow_item_sctp_mask; 1696 break; 1697 case RTE_FLOW_ITEM_TYPE_GRE: 1698 default_mask = (const void *)&rte_flow_item_gre_mask; 1699 break; 1700 default: 1701 DPAA2_PMD_ERR("Invalid pattern type"); 1702 } 1703 } 1704 1705 static inline int 1706 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv, 1707 const struct rte_flow_item pattern[]) 1708 { 1709 unsigned int i, j, k, is_found = 0; 1710 int ret = 0; 1711 1712 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { 1713 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) { 1714 if (dpaa2_supported_pattern_type[i] == pattern[j].type) { 1715 is_found = 1; 1716 break; 1717 } 1718 } 1719 if (!is_found) { 1720 ret = -ENOTSUP; 1721 break; 1722 } 1723 } 1724 /* Lets verify other combinations of given pattern rules */ 1725 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { 1726 if (!pattern[j].spec) { 1727 ret = -EINVAL; 1728 break; 1729 } 1730 if ((pattern[j].last) && (!pattern[j].mask)) 1731 dpaa2_dev_update_default_mask(&pattern[j]); 1732 } 1733 1734 /* DPAA2 platform has a limitation that extract parameter can not be */ 1735 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ 1736 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) { 1737 for (j = 0; j < MAX_TCS + 1; j++) { 1738 for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; j++) { 1739 if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type) 1740 break; 1741 } 1742 if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) 1743 ret = -ENOTSUP; 1744 } 1745 } 1746 return ret; 1747 } 1748 1749 static inline int 1750 dpaa2_dev_verify_actions(const struct rte_flow_action actions[]) 1751 { 1752 unsigned int i, j, is_found = 0; 1753 int ret = 0; 1754 1755 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { 1756 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) { 1757 if (dpaa2_supported_action_type[i] == actions[j].type) { 1758 is_found = 1; 1759 break; 1760 } 1761 } 1762 if (!is_found) { 1763 ret = -ENOTSUP; 1764 break; 1765 } 1766 } 1767 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { 1768 if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf)) 1769 ret = -EINVAL; 1770 } 1771 return ret; 1772 } 1773 1774 static 1775 int dpaa2_flow_validate(struct rte_eth_dev *dev, 1776 const struct rte_flow_attr *flow_attr, 1777 const struct rte_flow_item pattern[], 1778 const struct rte_flow_action actions[], 1779 struct rte_flow_error *error) 1780 { 1781 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1782 struct dpni_attr dpni_attr; 1783 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1784 uint16_t token = priv->token; 1785 int ret = 0; 1786 1787 memset(&dpni_attr, 0, sizeof(struct dpni_attr)); 1788 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); 1789 if (ret < 0) { 1790 DPAA2_PMD_ERR( 1791 "Failure to get dpni@%p attribute, err code %d\n", 1792 dpni, ret); 1793 rte_flow_error_set(error, EPERM, 1794 RTE_FLOW_ERROR_TYPE_ATTR, 1795 flow_attr, "invalid"); 1796 return ret; 1797 } 1798 1799 /* Verify input attributes */ 1800 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); 1801 if (ret < 0) { 1802 DPAA2_PMD_ERR( 1803 "Invalid attributes are given\n"); 1804 rte_flow_error_set(error, EPERM, 1805 RTE_FLOW_ERROR_TYPE_ATTR, 1806 flow_attr, "invalid"); 1807 goto not_valid_params; 1808 } 1809 /* Verify input pattern list */ 1810 ret = dpaa2_dev_verify_patterns(priv, pattern); 1811 if (ret < 0) { 1812 DPAA2_PMD_ERR( 1813 "Invalid pattern list is given\n"); 1814 rte_flow_error_set(error, EPERM, 1815 RTE_FLOW_ERROR_TYPE_ITEM, 1816 pattern, "invalid"); 1817 goto not_valid_params; 1818 } 1819 /* Verify input action list */ 1820 ret = dpaa2_dev_verify_actions(actions); 1821 if (ret < 0) { 1822 DPAA2_PMD_ERR( 1823 "Invalid action list is given\n"); 1824 rte_flow_error_set(error, EPERM, 1825 RTE_FLOW_ERROR_TYPE_ACTION, 1826 actions, "invalid"); 1827 goto not_valid_params; 1828 } 1829 not_valid_params: 1830 return ret; 1831 } 1832 1833 static 1834 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, 1835 const struct rte_flow_attr *attr, 1836 const struct rte_flow_item pattern[], 1837 const struct rte_flow_action actions[], 1838 struct rte_flow_error *error) 1839 { 1840 struct rte_flow *flow = NULL; 1841 size_t key_iova = 0, mask_iova = 0; 1842 int ret; 1843 1844 flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE); 1845 if (!flow) { 1846 DPAA2_PMD_ERR("Failure to allocate memory for flow"); 1847 goto mem_failure; 1848 } 1849 /* Allocate DMA'ble memory to write the rules */ 1850 key_iova = (size_t)rte_malloc(NULL, 256, 64); 1851 if (!key_iova) { 1852 DPAA2_PMD_ERR( 1853 "Memory allocation failure for rule configration\n"); 1854 goto mem_failure; 1855 } 1856 mask_iova = (size_t)rte_malloc(NULL, 256, 64); 1857 if (!mask_iova) { 1858 DPAA2_PMD_ERR( 1859 "Memory allocation failure for rule configration\n"); 1860 goto mem_failure; 1861 } 1862 1863 flow->rule.key_iova = key_iova; 1864 flow->rule.mask_iova = mask_iova; 1865 flow->rule.key_size = 0; 1866 1867 switch (dpaa2_filter_type) { 1868 case RTE_ETH_FILTER_GENERIC: 1869 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, 1870 actions, error); 1871 if (ret < 0) { 1872 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION) 1873 rte_flow_error_set(error, EPERM, 1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1875 attr, "unknown"); 1876 DPAA2_PMD_ERR( 1877 "Failure to create flow, return code (%d)", ret); 1878 goto creation_error; 1879 } 1880 break; 1881 default: 1882 DPAA2_PMD_ERR("Filter type (%d) not supported", 1883 dpaa2_filter_type); 1884 break; 1885 } 1886 1887 return flow; 1888 mem_failure: 1889 rte_flow_error_set(error, EPERM, 1890 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1891 NULL, "memory alloc"); 1892 creation_error: 1893 rte_free((void *)flow); 1894 rte_free((void *)key_iova); 1895 rte_free((void *)mask_iova); 1896 1897 return NULL; 1898 } 1899 1900 static 1901 int dpaa2_flow_destroy(struct rte_eth_dev *dev, 1902 struct rte_flow *flow, 1903 struct rte_flow_error *error) 1904 { 1905 int ret = 0; 1906 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1907 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1908 1909 switch (flow->action) { 1910 case RTE_FLOW_ACTION_TYPE_QUEUE: 1911 /* Remove entry from QoS table first */ 1912 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, 1913 &flow->rule); 1914 if (ret < 0) { 1915 DPAA2_PMD_ERR( 1916 "Error in adding entry to QoS table(%d)", ret); 1917 goto error; 1918 } 1919 1920 /* Then remove entry from FS table */ 1921 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, 1922 flow->tc_id, &flow->rule); 1923 if (ret < 0) { 1924 DPAA2_PMD_ERR( 1925 "Error in entry addition in FS table(%d)", ret); 1926 goto error; 1927 } 1928 break; 1929 case RTE_FLOW_ACTION_TYPE_RSS: 1930 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, 1931 &flow->rule); 1932 if (ret < 0) { 1933 DPAA2_PMD_ERR( 1934 "Error in entry addition in QoS table(%d)", ret); 1935 goto error; 1936 } 1937 break; 1938 default: 1939 DPAA2_PMD_ERR( 1940 "Action type (%d) is not supported", flow->action); 1941 ret = -ENOTSUP; 1942 break; 1943 } 1944 1945 LIST_REMOVE(flow, next); 1946 /* Now free the flow */ 1947 rte_free(flow); 1948 1949 error: 1950 if (ret) 1951 rte_flow_error_set(error, EPERM, 1952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1953 NULL, "unknown"); 1954 return ret; 1955 } 1956 1957 /** 1958 * Destroy user-configured flow rules. 1959 * 1960 * This function skips internal flows rules. 1961 * 1962 * @see rte_flow_flush() 1963 * @see rte_flow_ops 1964 */ 1965 static int 1966 dpaa2_flow_flush(struct rte_eth_dev *dev, 1967 struct rte_flow_error *error) 1968 { 1969 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1970 struct rte_flow *flow = LIST_FIRST(&priv->flows); 1971 1972 while (flow) { 1973 struct rte_flow *next = LIST_NEXT(flow, next); 1974 1975 dpaa2_flow_destroy(dev, flow, error); 1976 flow = next; 1977 } 1978 return 0; 1979 } 1980 1981 static int 1982 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, 1983 struct rte_flow *flow __rte_unused, 1984 const struct rte_flow_action *actions __rte_unused, 1985 void *data __rte_unused, 1986 struct rte_flow_error *error __rte_unused) 1987 { 1988 return 0; 1989 } 1990 1991 /** 1992 * Clean up all flow rules. 1993 * 1994 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow 1995 * rules regardless of whether they are internal or user-configured. 1996 * 1997 * @param priv 1998 * Pointer to private structure. 1999 */ 2000 void 2001 dpaa2_flow_clean(struct rte_eth_dev *dev) 2002 { 2003 struct rte_flow *flow; 2004 struct dpaa2_dev_priv *priv = dev->data->dev_private; 2005 2006 while ((flow = LIST_FIRST(&priv->flows))) 2007 dpaa2_flow_destroy(dev, flow, NULL); 2008 } 2009 2010 const struct rte_flow_ops dpaa2_flow_ops = { 2011 .create = dpaa2_flow_create, 2012 .validate = dpaa2_flow_validate, 2013 .destroy = dpaa2_flow_destroy, 2014 .flush = dpaa2_flow_flush, 2015 .query = dpaa2_flow_query, 2016 }; 2017