1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 #include <cnxk_flow.h> 5 #include <cnxk_rep.h> 6 7 #define IS_REP_BIT 7 8 9 #define TNL_DCP_MATCH_ID 5 10 #define NRML_MATCH_ID 1 11 const struct cnxk_rte_flow_term_info term[] = { 12 [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct rte_flow_item_eth)}, 13 [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, sizeof(struct rte_flow_item_vlan)}, 14 [RTE_FLOW_ITEM_TYPE_E_TAG] = {ROC_NPC_ITEM_TYPE_E_TAG, sizeof(struct rte_flow_item_e_tag)}, 15 [RTE_FLOW_ITEM_TYPE_IPV4] = {ROC_NPC_ITEM_TYPE_IPV4, sizeof(struct rte_flow_item_ipv4)}, 16 [RTE_FLOW_ITEM_TYPE_IPV6] = {ROC_NPC_ITEM_TYPE_IPV6, sizeof(struct rte_flow_item_ipv6)}, 17 [RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT, 18 sizeof(struct rte_flow_item_ipv6_frag_ext)}, 19 [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = {ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4, 20 sizeof(struct rte_flow_item_arp_eth_ipv4)}, 21 [RTE_FLOW_ITEM_TYPE_MPLS] = {ROC_NPC_ITEM_TYPE_MPLS, sizeof(struct rte_flow_item_mpls)}, 22 [RTE_FLOW_ITEM_TYPE_ICMP] = {ROC_NPC_ITEM_TYPE_ICMP, sizeof(struct rte_flow_item_icmp)}, 23 [RTE_FLOW_ITEM_TYPE_UDP] = {ROC_NPC_ITEM_TYPE_UDP, sizeof(struct rte_flow_item_udp)}, 24 [RTE_FLOW_ITEM_TYPE_TCP] = {ROC_NPC_ITEM_TYPE_TCP, sizeof(struct rte_flow_item_tcp)}, 25 [RTE_FLOW_ITEM_TYPE_SCTP] = {ROC_NPC_ITEM_TYPE_SCTP, sizeof(struct rte_flow_item_sctp)}, 26 [RTE_FLOW_ITEM_TYPE_ESP] = {ROC_NPC_ITEM_TYPE_ESP, sizeof(struct rte_flow_item_esp)}, 27 [RTE_FLOW_ITEM_TYPE_GRE] = {ROC_NPC_ITEM_TYPE_GRE, sizeof(struct rte_flow_item_gre)}, 28 [RTE_FLOW_ITEM_TYPE_NVGRE] = {ROC_NPC_ITEM_TYPE_NVGRE, sizeof(struct rte_flow_item_nvgre)}, 29 [RTE_FLOW_ITEM_TYPE_VXLAN] = {ROC_NPC_ITEM_TYPE_VXLAN, sizeof(struct rte_flow_item_vxlan)}, 30 [RTE_FLOW_ITEM_TYPE_GTPC] = {ROC_NPC_ITEM_TYPE_GTPC, sizeof(struct rte_flow_item_gtp)}, 31 [RTE_FLOW_ITEM_TYPE_GTPU] = {ROC_NPC_ITEM_TYPE_GTPU, sizeof(struct rte_flow_item_gtp)}, 32 [RTE_FLOW_ITEM_TYPE_GENEVE] = {ROC_NPC_ITEM_TYPE_GENEVE, 33 sizeof(struct rte_flow_item_geneve)}, 34 [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {ROC_NPC_ITEM_TYPE_VXLAN_GPE, 35 sizeof(struct rte_flow_item_vxlan_gpe)}, 36 [RTE_FLOW_ITEM_TYPE_IPV6_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_EXT, 37 sizeof(struct rte_flow_item_ipv6_ext)}, 38 [RTE_FLOW_ITEM_TYPE_VOID] = {ROC_NPC_ITEM_TYPE_VOID, 0}, 39 [RTE_FLOW_ITEM_TYPE_ANY] = {ROC_NPC_ITEM_TYPE_ANY, 0}, 40 [RTE_FLOW_ITEM_TYPE_GRE_KEY] = {ROC_NPC_ITEM_TYPE_GRE_KEY, sizeof(uint32_t)}, 41 [RTE_FLOW_ITEM_TYPE_HIGIG2] = {ROC_NPC_ITEM_TYPE_HIGIG2, 42 sizeof(struct rte_flow_item_higig2_hdr)}, 43 [RTE_FLOW_ITEM_TYPE_RAW] = {ROC_NPC_ITEM_TYPE_RAW, sizeof(struct rte_flow_item_raw)}, 44 [RTE_FLOW_ITEM_TYPE_MARK] = {ROC_NPC_ITEM_TYPE_MARK, sizeof(struct rte_flow_item_mark)}, 45 [RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT, 46 sizeof(struct rte_flow_item_ipv6_routing_ext)}, 47 [RTE_FLOW_ITEM_TYPE_TX_QUEUE] = {ROC_NPC_ITEM_TYPE_TX_QUEUE, 48 sizeof(struct rte_flow_item_tx_queue)}, 49 [RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT] = {ROC_NPC_ITEM_TYPE_REPRESENTED_PORT, 50 sizeof(struct rte_flow_item_ethdev)}, 51 [RTE_FLOW_ITEM_TYPE_PPPOES] = {ROC_NPC_ITEM_TYPE_PPPOES, 52 sizeof(struct rte_flow_item_pppoe)} 53 }; 54 55 static int 56 npc_rss_action_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 57 const struct rte_flow_action *act) 58 { 59 const struct rte_flow_action_rss *rss; 60 61 rss = (const struct rte_flow_action_rss *)act->conf; 62 63 if (attr->egress) { 64 plt_err("No support of RSS in egress"); 65 return -EINVAL; 66 } 67 68 if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) { 69 plt_err("multi-queue mode is disabled"); 70 return -ENOTSUP; 71 } 72 73 if (!rss || !rss->queue_num) { 74 plt_err("no valid queues"); 75 return -EINVAL; 76 } 77 78 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) { 79 plt_err("non-default RSS hash functions are not supported"); 80 return -ENOTSUP; 81 } 82 83 if (rss->key_len && rss->key_len > ROC_NIX_RSS_KEY_LEN) { 84 plt_err("RSS hash key too large"); 85 return -ENOTSUP; 86 } 87 88 return 0; 89 } 90 91 static void 92 npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, 93 uint32_t *flowkey_cfg, uint64_t default_rss_types) 94 { 95 const struct roc_npc_action_rss *rss; 96 uint64_t rss_types; 97 98 rss = (const struct roc_npc_action_rss *)rss_action->conf; 99 rss_types = rss->types; 100 /* If no RSS types are specified, use default one */ 101 if (rss_types == 0) 102 rss_types = default_rss_types; 103 104 *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); 105 } 106 107 static int 108 npc_parse_port_id_action(struct rte_eth_dev *eth_dev, const struct rte_flow_action *action, 109 uint16_t *dst_pf_func, uint16_t *dst_channel) 110 { 111 const struct rte_flow_action_port_id *port_act; 112 struct rte_eth_dev *portid_eth_dev; 113 char if_name[RTE_ETH_NAME_MAX_LEN]; 114 struct cnxk_eth_dev *hw_dst; 115 struct roc_npc *roc_npc_dst; 116 int rc = 0; 117 118 port_act = (const struct rte_flow_action_port_id *)action->conf; 119 120 rc = rte_eth_dev_get_name_by_port(port_act->id, if_name); 121 if (rc) { 122 plt_err("Name not found for output port id"); 123 goto err_exit; 124 } 125 portid_eth_dev = rte_eth_dev_allocated(if_name); 126 if (!portid_eth_dev) { 127 plt_err("eth_dev not found for output port id"); 128 goto err_exit; 129 } 130 if (strcmp(portid_eth_dev->device->driver->name, eth_dev->device->driver->name) != 0) { 131 plt_err("Output port not under same driver"); 132 goto err_exit; 133 } 134 hw_dst = portid_eth_dev->data->dev_private; 135 roc_npc_dst = &hw_dst->npc; 136 *dst_pf_func = roc_npc_dst->pf_func; 137 *dst_channel = hw_dst->npc.channel; 138 139 return 0; 140 141 err_exit: 142 return -EINVAL; 143 } 144 145 static int 146 roc_npc_parse_sample_subaction(struct rte_eth_dev *eth_dev, const struct rte_flow_action actions[], 147 struct roc_npc_action_sample *sample_action) 148 { 149 uint16_t dst_pf_func = 0, dst_channel = 0; 150 const struct roc_npc_action_vf *vf_act; 151 int rc = 0, count = 0; 152 bool is_empty = true; 153 154 if (sample_action->ratio != 1) { 155 plt_err("Sample ratio must be 1"); 156 return -EINVAL; 157 } 158 159 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 160 is_empty = false; 161 switch (actions->type) { 162 case RTE_FLOW_ACTION_TYPE_PF: 163 count++; 164 sample_action->action_type |= ROC_NPC_ACTION_TYPE_PF; 165 break; 166 case RTE_FLOW_ACTION_TYPE_VF: 167 count++; 168 vf_act = (const struct roc_npc_action_vf *)actions->conf; 169 sample_action->action_type |= ROC_NPC_ACTION_TYPE_VF; 170 sample_action->pf_func = vf_act->id & NPC_PFVF_FUNC_MASK; 171 break; 172 case RTE_FLOW_ACTION_TYPE_PORT_ID: 173 rc = npc_parse_port_id_action(eth_dev, actions, &dst_pf_func, &dst_channel); 174 if (rc) 175 return -EINVAL; 176 177 count++; 178 sample_action->action_type |= ROC_NPC_ACTION_TYPE_PORT_ID; 179 sample_action->pf_func = dst_pf_func; 180 sample_action->channel = dst_channel; 181 break; 182 default: 183 continue; 184 } 185 } 186 187 if (count > 1 || is_empty) 188 return -EINVAL; 189 190 return 0; 191 } 192 193 static int 194 append_mark_action(struct roc_npc_action *in_actions, uint8_t has_tunnel_pattern, 195 uint64_t *free_allocs, int *act_cnt) 196 { 197 struct rte_flow_action_mark *act_mark; 198 int i = *act_cnt, j = 0; 199 200 /* Add Mark action */ 201 i++; 202 act_mark = plt_zmalloc(sizeof(struct rte_flow_action_mark), 0); 203 if (!act_mark) { 204 plt_err("Error allocation memory"); 205 return -ENOMEM; 206 } 207 208 while (free_allocs[j] != 0) 209 j++; 210 free_allocs[j] = (uint64_t)act_mark; 211 /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */ 212 act_mark->id = 213 has_tunnel_pattern ? ((has_tunnel_pattern << 6) | TNL_DCP_MATCH_ID) : NRML_MATCH_ID; 214 in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; 215 in_actions[i].conf = (struct rte_flow_action_mark *)act_mark; 216 217 plt_rep_dbg("Assigned mark ID %x", act_mark->id); 218 219 *act_cnt = i; 220 221 return 0; 222 } 223 224 static int 225 append_rss_action(struct cnxk_eth_dev *dev, struct roc_npc_action *in_actions, uint16_t nb_rxq, 226 uint32_t *flowkey_cfg, uint64_t *free_allocs, uint16_t rss_repte_pf_func, 227 int *act_cnt) 228 { 229 struct roc_npc_action_rss *rss_conf; 230 int i = *act_cnt, j = 0, l, rc = 0; 231 uint16_t *queue_arr; 232 233 rss_conf = plt_zmalloc(sizeof(struct roc_npc_action_rss), 0); 234 if (!rss_conf) { 235 plt_err("Failed to allocate memory for rss conf"); 236 rc = -ENOMEM; 237 goto fail; 238 } 239 240 /* Add RSS action */ 241 rss_conf->queue_num = nb_rxq; 242 queue_arr = calloc(1, rss_conf->queue_num * sizeof(uint16_t)); 243 if (!queue_arr) { 244 plt_err("Failed to allocate memory for rss queue"); 245 rc = -ENOMEM; 246 goto free_rss; 247 } 248 249 for (l = 0; l < nb_rxq; l++) 250 queue_arr[l] = l; 251 rss_conf->queue = queue_arr; 252 rss_conf->key = NULL; 253 rss_conf->types = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP; 254 255 i++; 256 257 in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; 258 in_actions[i].conf = (struct roc_npc_action_rss *)rss_conf; 259 in_actions[i].rss_repte_pf_func = rss_repte_pf_func; 260 261 npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, 262 RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP); 263 264 *act_cnt = i; 265 266 while (free_allocs[j] != 0) 267 j++; 268 free_allocs[j] = (uint64_t)rss_conf; 269 270 return 0; 271 free_rss: 272 rte_free(rss_conf); 273 fail: 274 return rc; 275 } 276 277 static int 278 representor_rep_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *eth_dev, 279 struct rte_eth_dev *portid_eth_dev, 280 enum rte_flow_action_type act_type, uint8_t rep_pattern, 281 uint16_t *dst_pf_func, bool is_rep, uint8_t has_tunnel_pattern, 282 uint64_t *free_allocs, int *act_cnt, uint32_t *flowkey_cfg) 283 { 284 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 285 struct rte_eth_dev *rep_eth_dev = portid_eth_dev; 286 struct rte_flow_action_of_set_vlan_vid *vlan_vid; 287 struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; 288 struct rte_flow_action_of_push_vlan *push_vlan; 289 struct rte_flow_action_queue *act_q = NULL; 290 struct cnxk_rep_dev *rep_dev; 291 struct roc_npc *npc; 292 uint16_t vlan_tci; 293 int j = 0, rc; 294 295 /* For inserting an action in the list */ 296 int i = *act_cnt; 297 298 rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); 299 if (!is_rep) { 300 dev = cnxk_eth_pmd_priv(eth_dev); 301 npc = &dev->npc; 302 } else { 303 npc = &rep_dev->parent_dev->npc; 304 } 305 if (rep_pattern >> IS_REP_BIT) { /* Check for normal/representor port as action */ 306 if ((rep_pattern & 0x7f) == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 307 /* Case: Repr port pattern -> Default TX rule -> LBK -> 308 * Pattern RX LBK rule hit -> Action: send to new pf_func 309 */ 310 if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) { 311 /* New pf_func corresponds to ESW + queue corresponding to rep_id */ 312 act_q = plt_zmalloc(sizeof(struct rte_flow_action_queue), 0); 313 if (!act_q) { 314 plt_err("Error allocation memory"); 315 return -ENOMEM; 316 } 317 act_q->index = rep_dev->rep_id; 318 319 while (free_allocs[j] != 0) 320 j++; 321 free_allocs[j] = (uint64_t)act_q; 322 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; 323 in_actions[i].conf = (struct rte_flow_action_queue *)act_q; 324 npc->rep_act_pf_func = rep_dev->parent_dev->npc.pf_func; 325 } else { 326 /* New pf_func corresponds to hw_func of representee */ 327 in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; 328 npc->rep_act_pf_func = rep_dev->hw_func; 329 *dst_pf_func = rep_dev->hw_func; 330 } 331 /* Additional action to strip the VLAN from packets received by LBK */ 332 i++; 333 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP; 334 goto done; 335 } 336 /* Case: Repd port pattern -> TX Rule with VLAN -> LBK -> Default RX LBK rule hit 337 * base on vlan, if packet goes to ESW or actual pf_func -> Action : 338 * act port_representor: send to ESW respective using 1<<8 | rep_id as tci value 339 * act represented_port: send to actual port using rep_id as tci value. 340 */ 341 /* Add RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN action */ 342 push_vlan = plt_zmalloc(sizeof(struct rte_flow_action_of_push_vlan), 0); 343 if (!push_vlan) { 344 plt_err("Error allocation memory"); 345 return -ENOMEM; 346 } 347 348 while (free_allocs[j] != 0) 349 j++; 350 free_allocs[j] = (uint64_t)push_vlan; 351 push_vlan->ethertype = ntohs(ROC_ESWITCH_VLAN_TPID); 352 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_ETHTYPE_INSERT; 353 in_actions[i].conf = (struct rte_flow_action_of_push_vlan *)push_vlan; 354 i++; 355 356 /* Add RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP action */ 357 vlan_pcp = plt_zmalloc(sizeof(struct rte_flow_action_of_set_vlan_pcp), 0); 358 if (!vlan_pcp) { 359 plt_err("Error allocation memory"); 360 return -ENOMEM; 361 } 362 363 free_allocs[j + 1] = (uint64_t)vlan_pcp; 364 vlan_pcp->vlan_pcp = 0; 365 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT; 366 in_actions[i].conf = (struct rte_flow_action_of_set_vlan_pcp *)vlan_pcp; 367 i++; 368 369 /* Add RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID action */ 370 vlan_vid = plt_zmalloc(sizeof(struct rte_flow_action_of_set_vlan_vid), 0); 371 if (!vlan_vid) { 372 plt_err("Error allocation memory"); 373 return -ENOMEM; 374 } 375 376 free_allocs[j + 2] = (uint64_t)vlan_vid; 377 if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) 378 vlan_tci = rep_dev->rep_id | (1ULL << CNXK_ESWITCH_VFPF_SHIFT); 379 else 380 vlan_tci = rep_dev->rep_id; 381 vlan_vid->vlan_vid = ntohs(vlan_tci); 382 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_INSERT; 383 in_actions[i].conf = (struct rte_flow_action_of_set_vlan_vid *)vlan_vid; 384 385 /* Change default channel to UCAST_CHAN (63) while sending */ 386 npc->rep_act_rep = true; 387 } else { 388 if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) { 389 /* Case: Pattern wire port -> Pattern RX rule-> 390 * Action: pf_func = ESW. queue = rep_id 391 */ 392 act_q = plt_zmalloc(sizeof(struct rte_flow_action_queue), 0); 393 if (!act_q) { 394 plt_err("Error allocation memory"); 395 return -ENOMEM; 396 } 397 while (free_allocs[j] != 0) 398 j++; 399 free_allocs[j] = (uint64_t)act_q; 400 act_q->index = rep_dev->rep_id; 401 402 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; 403 in_actions[i].conf = (struct rte_flow_action_queue *)act_q; 404 npc->rep_act_pf_func = rep_dev->parent_dev->npc.pf_func; 405 } else { 406 /* Case: Pattern wire port -> Pattern RX rule-> 407 * Action: Receive at actual hw_func 408 */ 409 in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; 410 npc->rep_act_pf_func = rep_dev->hw_func; 411 *dst_pf_func = rep_dev->hw_func; 412 413 /* Append a mark action - needed to identify the flow */ 414 rc = append_mark_action(in_actions, has_tunnel_pattern, free_allocs, &i); 415 if (rc) 416 return rc; 417 /* Append RSS action if representee has RSS enabled */ 418 if (rep_dev->nb_rxq > 1) { 419 /* PF can install rule for only its VF acting as representee */ 420 if (rep_dev->hw_func && 421 roc_eswitch_is_repte_pfs_vf(rep_dev->hw_func, 422 roc_nix_get_pf_func(npc->roc_nix))) { 423 rc = append_rss_action(dev, in_actions, rep_dev->nb_rxq, 424 flowkey_cfg, free_allocs, 425 rep_dev->hw_func, &i); 426 if (rc) 427 return rc; 428 } 429 } 430 } 431 } 432 done: 433 *act_cnt = i; 434 435 return 0; 436 } 437 438 static int 439 representor_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *portid_eth_dev, 440 uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, uint64_t *free_allocs, 441 int *act_cnt) 442 { 443 struct rte_eth_dev *rep_eth_dev = portid_eth_dev; 444 struct cnxk_rep_dev *rep_dev; 445 /* For inserting an action in the list */ 446 int i = *act_cnt, rc; 447 448 rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); 449 450 *dst_pf_func = rep_dev->hw_func; 451 452 rc = append_mark_action(in_actions, has_tunnel_pattern, free_allocs, &i); 453 if (rc) 454 return rc; 455 456 *act_cnt = i; 457 plt_rep_dbg("Rep port %d ID %d rep_dev->hw_func 0x%x", rep_dev->port_id, rep_dev->rep_id, 458 rep_dev->hw_func); 459 460 return 0; 461 } 462 463 static int 464 cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 465 const struct rte_flow_action actions[], struct roc_npc_action in_actions[], 466 struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, 467 uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, bool is_rep, 468 uint8_t rep_pattern, uint64_t *free_allocs) 469 { 470 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 471 const struct rte_flow_action_queue *act_q = NULL; 472 const struct rte_flow_action_ethdev *act_ethdev; 473 const struct rte_flow_action_sample *act_sample; 474 const struct rte_flow_action_port_id *port_act; 475 struct rte_eth_dev *portid_eth_dev; 476 char if_name[RTE_ETH_NAME_MAX_LEN]; 477 struct cnxk_eth_dev *hw_dst; 478 struct roc_npc *roc_npc_dst; 479 bool is_vf_action = false; 480 int i = 0, rc = 0; 481 int rq; 482 483 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { 484 switch (actions->type) { 485 case RTE_FLOW_ACTION_TYPE_VOID: 486 in_actions[i].type = ROC_NPC_ACTION_TYPE_VOID; 487 break; 488 489 case RTE_FLOW_ACTION_TYPE_MARK: 490 in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; 491 in_actions[i].conf = actions->conf; 492 break; 493 494 case RTE_FLOW_ACTION_TYPE_FLAG: 495 in_actions[i].type = ROC_NPC_ACTION_TYPE_FLAG; 496 break; 497 498 case RTE_FLOW_ACTION_TYPE_COUNT: 499 in_actions[i].type = ROC_NPC_ACTION_TYPE_COUNT; 500 break; 501 502 case RTE_FLOW_ACTION_TYPE_DROP: 503 in_actions[i].type = ROC_NPC_ACTION_TYPE_DROP; 504 break; 505 506 case RTE_FLOW_ACTION_TYPE_PF: 507 in_actions[i].type = ROC_NPC_ACTION_TYPE_PF; 508 break; 509 510 case RTE_FLOW_ACTION_TYPE_VF: 511 in_actions[i].type = ROC_NPC_ACTION_TYPE_VF; 512 in_actions[i].conf = actions->conf; 513 is_vf_action = true; 514 break; 515 516 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: 517 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: 518 in_actions[i].conf = actions->conf; 519 act_ethdev = (const struct rte_flow_action_ethdev *)actions->conf; 520 if (rte_eth_dev_get_name_by_port(act_ethdev->port_id, if_name)) { 521 plt_err("Name not found for output port id"); 522 goto err_exit; 523 } 524 portid_eth_dev = rte_eth_dev_allocated(if_name); 525 if (!portid_eth_dev) { 526 plt_err("eth_dev not found for output port id"); 527 goto err_exit; 528 } 529 530 plt_rep_dbg("Rule installed by port %d if_name %s act_ethdev->port_id %d", 531 eth_dev->data->port_id, if_name, act_ethdev->port_id); 532 if (cnxk_ethdev_is_representor(if_name)) { 533 if (representor_rep_portid_action(in_actions, eth_dev, 534 portid_eth_dev, actions->type, rep_pattern, 535 dst_pf_func, is_rep, has_tunnel_pattern, 536 free_allocs, &i, flowkey_cfg)) { 537 plt_err("Representor port action set failed"); 538 goto err_exit; 539 } 540 } else { 541 if (actions->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) 542 continue; 543 /* Normal port as represented_port as action not supported*/ 544 return -ENOTSUP; 545 } 546 break; 547 case RTE_FLOW_ACTION_TYPE_PORT_ID: 548 /* No port ID action on representor ethdevs */ 549 if (is_rep) 550 continue; 551 in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; 552 in_actions[i].conf = actions->conf; 553 act_ethdev = (const struct rte_flow_action_ethdev *)actions->conf; 554 port_act = (const struct rte_flow_action_port_id *)actions->conf; 555 if (rte_eth_dev_get_name_by_port( 556 actions->type != RTE_FLOW_ACTION_TYPE_PORT_ID ? 557 act_ethdev->port_id : 558 port_act->id, 559 if_name)) { 560 plt_err("Name not found for output port id"); 561 goto err_exit; 562 } 563 portid_eth_dev = rte_eth_dev_allocated(if_name); 564 if (!portid_eth_dev) { 565 plt_err("eth_dev not found for output port id"); 566 goto err_exit; 567 } 568 569 if (cnxk_ethdev_is_representor(if_name)) { 570 plt_rep_dbg("Representor port %d act port %d", port_act->id, 571 act_ethdev->port_id); 572 if (representor_portid_action(in_actions, portid_eth_dev, 573 dst_pf_func, has_tunnel_pattern, 574 free_allocs, &i)) { 575 plt_err("Representor port action set failed"); 576 goto err_exit; 577 } 578 } else { 579 if (strcmp(portid_eth_dev->device->driver->name, 580 eth_dev->device->driver->name) != 0) { 581 plt_err("Output port not under same driver"); 582 goto err_exit; 583 } 584 585 hw_dst = portid_eth_dev->data->dev_private; 586 roc_npc_dst = &hw_dst->npc; 587 *dst_pf_func = roc_npc_dst->pf_func; 588 } 589 break; 590 591 case RTE_FLOW_ACTION_TYPE_QUEUE: 592 act_q = (const struct rte_flow_action_queue *)actions->conf; 593 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; 594 in_actions[i].conf = actions->conf; 595 break; 596 597 case RTE_FLOW_ACTION_TYPE_RSS: 598 /* No RSS action on representor ethdevs */ 599 if (is_rep) 600 continue; 601 rc = npc_rss_action_validate(eth_dev, attr, actions); 602 if (rc) 603 goto err_exit; 604 605 in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; 606 in_actions[i].conf = actions->conf; 607 npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, 608 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 609 break; 610 611 case RTE_FLOW_ACTION_TYPE_SECURITY: 612 in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC; 613 in_actions[i].conf = actions->conf; 614 break; 615 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: 616 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP; 617 break; 618 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: 619 in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_INSERT; 620 in_actions[i].conf = actions->conf; 621 break; 622 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: 623 in_actions[i].type = 624 ROC_NPC_ACTION_TYPE_VLAN_ETHTYPE_INSERT; 625 in_actions[i].conf = actions->conf; 626 break; 627 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: 628 in_actions[i].type = 629 ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT; 630 in_actions[i].conf = actions->conf; 631 break; 632 case RTE_FLOW_ACTION_TYPE_METER: 633 in_actions[i].type = ROC_NPC_ACTION_TYPE_METER; 634 in_actions[i].conf = actions->conf; 635 break; 636 case RTE_FLOW_ACTION_TYPE_AGE: 637 in_actions[i].type = ROC_NPC_ACTION_TYPE_AGE; 638 in_actions[i].conf = actions->conf; 639 break; 640 case RTE_FLOW_ACTION_TYPE_SAMPLE: 641 act_sample = actions->conf; 642 in_sample_actions->ratio = act_sample->ratio; 643 rc = roc_npc_parse_sample_subaction(eth_dev, act_sample->actions, 644 in_sample_actions); 645 if (rc) { 646 plt_err("Sample subaction parsing failed."); 647 goto err_exit; 648 } 649 650 in_actions[i].type = ROC_NPC_ACTION_TYPE_SAMPLE; 651 in_actions[i].conf = in_sample_actions; 652 break; 653 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: 654 continue; 655 default: 656 plt_npc_dbg("Action is not supported = %d", actions->type); 657 goto err_exit; 658 } 659 i++; 660 } 661 662 if (!is_vf_action && act_q) { 663 rq = act_q->index; 664 if (rq >= eth_dev->data->nb_rx_queues) { 665 plt_npc_dbg("Invalid queue index"); 666 goto err_exit; 667 } 668 } 669 in_actions[i].type = ROC_NPC_ACTION_TYPE_END; 670 return 0; 671 672 err_exit: 673 return -EINVAL; 674 } 675 676 static int 677 cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern[], 678 struct roc_npc_item_info in_pattern[], uint8_t *has_tunnel_pattern, bool is_rep, 679 uint8_t *rep_pattern, uint64_t *free_allocs) 680 { 681 const struct rte_flow_item_ethdev *rep_eth_dev; 682 struct rte_eth_dev *portid_eth_dev; 683 char if_name[RTE_ETH_NAME_MAX_LEN]; 684 struct cnxk_eth_dev *hw_dst; 685 struct cnxk_rep_dev *rdev; 686 struct cnxk_eth_dev *dev; 687 struct roc_npc *npc; 688 int i = 0, j = 0; 689 690 if (!is_rep) { 691 dev = cnxk_eth_pmd_priv(eth_dev); 692 npc = &dev->npc; 693 } else { 694 rdev = cnxk_rep_pmd_priv(eth_dev); 695 npc = &rdev->parent_dev->npc; 696 697 npc->rep_npc = npc; 698 npc->rep_port_id = rdev->port_id; 699 npc->rep_pf_func = rdev->hw_func; 700 } 701 702 while (pattern->type != RTE_FLOW_ITEM_TYPE_END) { 703 in_pattern[i].spec = pattern->spec; 704 in_pattern[i].last = pattern->last; 705 in_pattern[i].mask = pattern->mask; 706 in_pattern[i].type = term[pattern->type].item_type; 707 in_pattern[i].size = term[pattern->type].item_size; 708 if (pattern->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT || 709 pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 710 rep_eth_dev = (const struct rte_flow_item_ethdev *)pattern->spec; 711 if (rte_eth_dev_get_name_by_port(rep_eth_dev->port_id, if_name)) { 712 plt_err("Name not found for output port id"); 713 goto fail; 714 } 715 portid_eth_dev = rte_eth_dev_allocated(if_name); 716 if (!portid_eth_dev) { 717 plt_err("eth_dev not found for output port id"); 718 goto fail; 719 } 720 *rep_pattern = pattern->type; 721 if (cnxk_ethdev_is_representor(if_name)) { 722 /* Case where represented port not part of same 723 * app and represented by a representor port. 724 */ 725 struct cnxk_rep_dev *rep_dev; 726 struct cnxk_eswitch_dev *eswitch_dev; 727 728 rep_dev = cnxk_rep_pmd_priv(portid_eth_dev); 729 eswitch_dev = rep_dev->parent_dev; 730 npc->rep_npc = &eswitch_dev->npc; 731 npc->rep_port_id = rep_eth_dev->port_id; 732 npc->rep_pf_func = rep_dev->hw_func; 733 734 if (pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { 735 struct rte_flow_item_vlan *vlan; 736 737 npc->rep_pf_func = eswitch_dev->npc.pf_func; 738 /* Add VLAN pattern corresponding to rep_id */ 739 i++; 740 vlan = plt_zmalloc(sizeof(struct rte_flow_item_vlan), 0); 741 if (!vlan) { 742 plt_err("error allocation memory"); 743 return -ENOMEM; 744 } 745 746 while (free_allocs[j] != 0) 747 j++; 748 free_allocs[j] = (uint64_t)vlan; 749 750 npc->rep_rx_channel = ROC_ESWITCH_LBK_CHAN; 751 vlan->hdr.vlan_tci = RTE_BE16(rep_dev->rep_id); 752 in_pattern[i].spec = (struct rte_flow_item_vlan *)vlan; 753 in_pattern[i].last = NULL; 754 in_pattern[i].mask = &rte_flow_item_vlan_mask; 755 in_pattern[i].type = 756 term[RTE_FLOW_ITEM_TYPE_VLAN].item_type; 757 in_pattern[i].size = 758 term[RTE_FLOW_ITEM_TYPE_VLAN].item_size; 759 } 760 *rep_pattern |= 1 << IS_REP_BIT; 761 plt_rep_dbg("Represented port %d act port %d rep_dev->hw_func 0x%x", 762 rep_eth_dev->port_id, eth_dev->data->port_id, 763 rep_dev->hw_func); 764 } else { 765 if (strcmp(portid_eth_dev->device->driver->name, 766 eth_dev->device->driver->name) != 0) { 767 plt_err("Output port not under same driver"); 768 goto fail; 769 } 770 /* Normal port as port_representor pattern can't be supported */ 771 if (pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) 772 return -ENOTSUP; 773 /* Case where represented port part of same app 774 * as PF. 775 */ 776 hw_dst = portid_eth_dev->data->dev_private; 777 npc->rep_npc = &hw_dst->npc; 778 npc->rep_port_id = rep_eth_dev->port_id; 779 npc->rep_pf_func = hw_dst->npc.pf_func; 780 } 781 } 782 783 if (pattern->type == RTE_FLOW_ITEM_TYPE_VXLAN || 784 pattern->type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || 785 pattern->type == RTE_FLOW_ITEM_TYPE_GRE) 786 *has_tunnel_pattern = pattern->type; 787 788 pattern++; 789 i++; 790 } 791 in_pattern[i].type = ROC_NPC_ITEM_TYPE_END; 792 return 0; 793 fail: 794 return -EINVAL; 795 } 796 797 static int 798 cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 799 const struct rte_flow_item pattern[], const struct rte_flow_action actions[], 800 struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[], 801 struct roc_npc_action in_actions[], 802 struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, 803 uint16_t *dst_pf_func, bool is_rep, uint64_t *free_allocs) 804 { 805 uint8_t has_tunnel_pattern = 0, rep_pattern = 0; 806 int rc; 807 808 in_attr->priority = attr->priority; 809 in_attr->ingress = attr->ingress; 810 in_attr->egress = attr->egress; 811 812 rc = cnxk_map_pattern(eth_dev, pattern, in_pattern, &has_tunnel_pattern, is_rep, 813 &rep_pattern, free_allocs); 814 if (rc) { 815 plt_err("Failed to map pattern list"); 816 return rc; 817 } 818 819 if (attr->transfer) { 820 /* rep_pattern is used to identify if RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT 821 * OR RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR is defined + if pattern's portid is 822 * normal port or representor port. 823 * For normal port_id, rep_pattern = pattern-> type 824 * For representor port, rep_pattern = pattern-> type | 1 << IS_REP_BIT 825 */ 826 if (is_rep || rep_pattern) { 827 if (rep_pattern == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT || 828 ((rep_pattern & 0x7f) == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR)) 829 /* If pattern is port_representor or pattern has normal port as 830 * represented port, install ingress rule. 831 */ 832 in_attr->ingress = attr->transfer; 833 else 834 in_attr->egress = attr->transfer; 835 } else { 836 in_attr->ingress = attr->transfer; 837 } 838 } 839 840 return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg, 841 dst_pf_func, has_tunnel_pattern, is_rep, rep_pattern, free_allocs); 842 } 843 844 int 845 cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 846 const struct rte_flow_item pattern[], 847 const struct rte_flow_action actions[], struct rte_flow_error *error, 848 bool is_rep) 849 { 850 struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1]; 851 struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT]; 852 struct roc_npc_action_sample in_sample_action; 853 struct cnxk_rep_dev *rep_dev; 854 struct roc_npc_attr in_attr; 855 uint64_t *free_allocs, sz; 856 struct cnxk_eth_dev *dev; 857 struct roc_npc_flow flow; 858 uint32_t flowkey_cfg = 0; 859 uint16_t dst_pf_func = 0; 860 struct roc_npc *npc; 861 int rc, j; 862 863 /* is_rep set for operation performed via representor ports */ 864 if (!is_rep) { 865 dev = cnxk_eth_pmd_priv(eth_dev); 866 npc = &dev->npc; 867 /* Skip flow validation for MACsec. */ 868 if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY && 869 cnxk_eth_macsec_sess_get_by_sess(dev, actions[0].conf) != NULL) 870 return 0; 871 } else { 872 rep_dev = cnxk_rep_pmd_priv(eth_dev); 873 npc = &rep_dev->parent_dev->npc; 874 } 875 876 memset(&flow, 0, sizeof(flow)); 877 memset(&in_sample_action, 0, sizeof(in_sample_action)); 878 flow.is_validate = true; 879 880 sz = ROC_NPC_MAX_ACTION_COUNT + ROC_NPC_ITEM_TYPE_END + 1; 881 free_allocs = plt_zmalloc(sz * sizeof(uint64_t), 0); 882 if (!free_allocs) { 883 rte_flow_error_set(error, -ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 884 "Failed to map flow data"); 885 return -ENOMEM; 886 } 887 rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions, 888 &in_sample_action, &flowkey_cfg, &dst_pf_func, is_rep, free_allocs); 889 if (rc) { 890 rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 891 "Failed to map flow data"); 892 goto clean; 893 } 894 895 rc = roc_npc_flow_parse(npc, &in_attr, in_pattern, in_actions, &flow); 896 897 if (rc) { 898 rte_flow_error_set(error, 0, rc, NULL, 899 "Flow validation failed"); 900 goto clean; 901 } 902 clean: 903 /* Freeing the allocations done for additional patterns/actions */ 904 for (j = 0; (j < (int)sz) && free_allocs[j]; j++) 905 plt_free((void *)free_allocs[j]); 906 plt_free(free_allocs); 907 908 return rc; 909 } 910 911 static int 912 cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 913 const struct rte_flow_item pattern[], const struct rte_flow_action actions[], 914 struct rte_flow_error *error) 915 { 916 return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false); 917 } 918 919 struct roc_npc_flow * 920 cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 921 const struct rte_flow_item pattern[], 922 const struct rte_flow_action actions[], struct rte_flow_error *error, 923 bool is_rep) 924 { 925 struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1] = {0}; 926 struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT] = {0}; 927 struct roc_npc_action_sample in_sample_action; 928 struct cnxk_rep_dev *rep_dev = NULL; 929 struct roc_npc_flow *flow = NULL; 930 struct cnxk_eth_dev *dev = NULL; 931 struct roc_npc_attr in_attr; 932 uint64_t *free_allocs, sz; 933 uint16_t dst_pf_func = 0; 934 struct roc_npc *npc; 935 int errcode = 0; 936 int rc, j; 937 938 /* is_rep set for operation performed via representor ports */ 939 if (!is_rep) { 940 dev = cnxk_eth_pmd_priv(eth_dev); 941 npc = &dev->npc; 942 } else { 943 rep_dev = cnxk_rep_pmd_priv(eth_dev); 944 npc = &rep_dev->parent_dev->npc; 945 } 946 947 sz = ROC_NPC_MAX_ACTION_COUNT + ROC_NPC_ITEM_TYPE_END + 1; 948 free_allocs = plt_zmalloc(sz * sizeof(uint64_t), 0); 949 if (!free_allocs) { 950 rte_flow_error_set(error, -ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 951 "Failed to map flow data"); 952 return NULL; 953 } 954 memset(&in_sample_action, 0, sizeof(in_sample_action)); 955 memset(&in_attr, 0, sizeof(struct roc_npc_attr)); 956 rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions, 957 &in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func, is_rep, 958 free_allocs); 959 if (rc) { 960 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, 961 "Failed to map flow data"); 962 goto clean; 963 } 964 965 flow = roc_npc_flow_create(npc, &in_attr, in_pattern, in_actions, dst_pf_func, &errcode); 966 if (errcode != 0) { 967 rte_flow_error_set(error, errcode, errcode, NULL, roc_error_msg_get(errcode)); 968 goto clean; 969 } 970 971 clean: 972 /* Freeing the allocations done for additional patterns/actions */ 973 for (j = 0; (j < (int)sz) && free_allocs[j]; j++) 974 plt_free((void *)free_allocs[j]); 975 plt_free(free_allocs); 976 977 return flow; 978 } 979 980 struct roc_npc_flow * 981 cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, 982 const struct rte_flow_item pattern[], const struct rte_flow_action actions[], 983 struct rte_flow_error *error) 984 { 985 return cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false); 986 } 987 988 int 989 cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, 990 struct rte_flow_error *error, bool is_rep) 991 { 992 struct cnxk_rep_dev *rep_dev; 993 struct cnxk_eth_dev *dev; 994 struct roc_npc *npc; 995 int rc; 996 997 /* is_rep set for operation performed via representor ports */ 998 if (!is_rep) { 999 dev = cnxk_eth_pmd_priv(eth_dev); 1000 npc = &dev->npc; 1001 } else { 1002 rep_dev = cnxk_rep_pmd_priv(eth_dev); 1003 npc = &rep_dev->parent_dev->npc; 1004 } 1005 1006 rc = roc_npc_flow_destroy(npc, flow); 1007 if (rc) 1008 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1009 "Flow Destroy failed"); 1010 return rc; 1011 } 1012 1013 int 1014 cnxk_flow_destroy(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, 1015 struct rte_flow_error *error) 1016 { 1017 return cnxk_flow_destroy_common(eth_dev, flow, error, false); 1018 } 1019 1020 int 1021 cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep) 1022 { 1023 struct cnxk_rep_dev *rep_dev; 1024 struct cnxk_eth_dev *dev; 1025 struct roc_npc *npc; 1026 int rc; 1027 1028 /* is_rep set for operation performed via representor ports */ 1029 if (!is_rep) { 1030 dev = cnxk_eth_pmd_priv(eth_dev); 1031 npc = &dev->npc; 1032 } else { 1033 rep_dev = cnxk_rep_pmd_priv(eth_dev); 1034 npc = &rep_dev->parent_dev->npc; 1035 } 1036 1037 rc = roc_npc_mcam_free_all_resources(npc); 1038 if (rc) { 1039 rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1040 "Failed to flush filter"); 1041 return -rte_errno; 1042 } 1043 1044 return 0; 1045 } 1046 1047 static int 1048 cnxk_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) 1049 { 1050 return cnxk_flow_flush_common(eth_dev, error, false); 1051 } 1052 1053 int 1054 cnxk_flow_query_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, 1055 const struct rte_flow_action *action, void *data, 1056 struct rte_flow_error *error, bool is_rep) 1057 { 1058 struct roc_npc_flow *in_flow = (struct roc_npc_flow *)flow; 1059 struct rte_flow_query_count *query = data; 1060 struct cnxk_rep_dev *rep_dev; 1061 struct cnxk_eth_dev *dev; 1062 struct roc_npc *npc; 1063 const char *errmsg = NULL; 1064 int errcode = ENOTSUP; 1065 int rc; 1066 1067 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) { 1068 errmsg = "Only COUNT is supported in query"; 1069 goto err_exit; 1070 } 1071 1072 if (in_flow->ctr_id == NPC_COUNTER_NONE) { 1073 errmsg = "Counter is not available"; 1074 goto err_exit; 1075 } 1076 1077 /* is_rep set for operation performed via representor ports */ 1078 if (!is_rep) { 1079 dev = cnxk_eth_pmd_priv(eth_dev); 1080 npc = &dev->npc; 1081 } else { 1082 rep_dev = cnxk_rep_pmd_priv(eth_dev); 1083 npc = &rep_dev->parent_dev->npc; 1084 } 1085 1086 if (in_flow->use_pre_alloc) 1087 rc = roc_npc_inl_mcam_read_counter(in_flow->ctr_id, &query->hits); 1088 else 1089 rc = roc_npc_mcam_read_counter(npc, in_flow->ctr_id, &query->hits); 1090 if (rc != 0) { 1091 errcode = EIO; 1092 errmsg = "Error reading flow counter"; 1093 goto err_exit; 1094 } 1095 query->hits_set = 1; 1096 query->bytes_set = 0; 1097 1098 if (query->reset) { 1099 if (in_flow->use_pre_alloc) 1100 rc = roc_npc_inl_mcam_clear_counter(in_flow->ctr_id); 1101 else 1102 rc = roc_npc_mcam_clear_counter(npc, in_flow->ctr_id); 1103 } 1104 if (rc != 0) { 1105 errcode = EIO; 1106 errmsg = "Error clearing flow counter"; 1107 goto err_exit; 1108 } 1109 1110 return 0; 1111 1112 err_exit: 1113 rte_flow_error_set(error, errcode, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1114 NULL, errmsg); 1115 return -rte_errno; 1116 } 1117 1118 static int 1119 cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, 1120 const struct rte_flow_action *action, void *data, struct rte_flow_error *error) 1121 { 1122 return cnxk_flow_query_common(eth_dev, flow, action, data, error, false); 1123 } 1124 1125 static int 1126 cnxk_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, int enable __rte_unused, 1127 struct rte_flow_error *error) 1128 { 1129 /* If we support, we need to un-install the default mcam 1130 * entry for this port. 1131 */ 1132 1133 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 1134 NULL, "Flow isolation not supported"); 1135 1136 return -rte_errno; 1137 } 1138 1139 int 1140 cnxk_flow_dev_dump_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, 1141 struct rte_flow_error *error, bool is_rep) 1142 { 1143 struct cnxk_rep_dev *rep_dev; 1144 struct cnxk_eth_dev *dev; 1145 struct roc_npc *npc; 1146 1147 /* is_rep set for operation performed via representor ports */ 1148 if (!is_rep) { 1149 dev = cnxk_eth_pmd_priv(eth_dev); 1150 npc = &dev->npc; 1151 } else { 1152 rep_dev = cnxk_rep_pmd_priv(eth_dev); 1153 npc = &rep_dev->parent_dev->npc; 1154 } 1155 1156 if (file == NULL) { 1157 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1158 "Invalid file"); 1159 return -rte_errno; 1160 } 1161 1162 if (flow != NULL) { 1163 roc_npc_flow_mcam_dump(file, npc, (struct roc_npc_flow *)flow); 1164 return 0; 1165 } 1166 1167 roc_npc_flow_dump(file, npc, -1); 1168 1169 return 0; 1170 } 1171 1172 static int 1173 cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, 1174 struct rte_flow_error *error) 1175 { 1176 return cnxk_flow_dev_dump_common(eth_dev, flow, file, error, false); 1177 } 1178 1179 static int 1180 cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, uint32_t nb_contexts, 1181 struct rte_flow_error *err) 1182 { 1183 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 1184 struct roc_npc *roc_npc = &dev->npc; 1185 struct roc_npc_flow_age *flow_age; 1186 uint32_t start_id; 1187 uint32_t end_id; 1188 int cnt = 0; 1189 uint32_t sn; 1190 uint32_t i; 1191 1192 RTE_SET_USED(err); 1193 1194 flow_age = &roc_npc->flow_age; 1195 1196 if (!flow_age->age_flow_refcnt) 1197 return 0; 1198 1199 do { 1200 sn = plt_seqcount_read_begin(&flow_age->seq_cnt); 1201 1202 if (nb_contexts == 0) { 1203 cnt = flow_age->aged_flows_cnt; 1204 } else { 1205 start_id = flow_age->start_id; 1206 end_id = flow_age->end_id; 1207 for (i = start_id; i <= end_id; i++) { 1208 if ((int)nb_contexts == cnt) 1209 break; 1210 if (plt_bitmap_get(flow_age->aged_flows, i)) { 1211 context[cnt] = 1212 roc_npc_aged_flow_ctx_get(roc_npc, i); 1213 cnt++; 1214 } 1215 } 1216 } 1217 } while (plt_seqcount_read_retry(&flow_age->seq_cnt, sn)); 1218 1219 return cnt; 1220 } 1221 1222 static int 1223 cnxk_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, 1224 struct rte_flow_action **pmd_actions, uint32_t *num_of_actions, 1225 __rte_unused struct rte_flow_error *err) 1226 { 1227 struct rte_flow_action *nfp_action; 1228 1229 nfp_action = rte_zmalloc("nfp_tun_action", sizeof(struct rte_flow_action), 0); 1230 if (nfp_action == NULL) { 1231 plt_err("Alloc memory for nfp tunnel action failed."); 1232 return -ENOMEM; 1233 } 1234 1235 if (tunnel->is_ipv6) 1236 nfp_action->conf = (void *)~0; 1237 1238 switch (tunnel->type) { 1239 case RTE_FLOW_ITEM_TYPE_VXLAN: 1240 nfp_action->type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP; 1241 *pmd_actions = nfp_action; 1242 *num_of_actions = 1; 1243 break; 1244 default: 1245 *pmd_actions = NULL; 1246 *num_of_actions = 0; 1247 rte_free(nfp_action); 1248 break; 1249 } 1250 1251 return 0; 1252 } 1253 1254 static int 1255 cnxk_flow_tunnel_action_decap_release(__rte_unused struct rte_eth_dev *dev, 1256 struct rte_flow_action *pmd_actions, uint32_t num_of_actions, 1257 __rte_unused struct rte_flow_error *err) 1258 { 1259 uint32_t i; 1260 struct rte_flow_action *nfp_action; 1261 1262 for (i = 0; i < num_of_actions; i++) { 1263 nfp_action = &pmd_actions[i]; 1264 nfp_action->conf = NULL; 1265 rte_free(nfp_action); 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int 1272 cnxk_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, 1273 __rte_unused struct rte_flow_tunnel *tunnel, 1274 __rte_unused struct rte_flow_item **pmd_items, uint32_t *num_of_items, 1275 __rte_unused struct rte_flow_error *err) 1276 { 1277 *num_of_items = 0; 1278 1279 return 0; 1280 } 1281 1282 static int 1283 cnxk_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, 1284 __rte_unused struct rte_flow_item *pmd_items, 1285 __rte_unused uint32_t num_of_items, 1286 __rte_unused struct rte_flow_error *err) 1287 { 1288 return 0; 1289 } 1290 1291 struct rte_flow_ops cnxk_flow_ops = { 1292 .validate = cnxk_flow_validate, 1293 .flush = cnxk_flow_flush, 1294 .query = cnxk_flow_query, 1295 .isolate = cnxk_flow_isolate, 1296 .dev_dump = cnxk_flow_dev_dump, 1297 .get_aged_flows = cnxk_flow_get_aged_flows, 1298 .tunnel_match = cnxk_flow_tunnel_match, 1299 .tunnel_item_release = cnxk_flow_tunnel_item_release, 1300 .tunnel_decap_set = cnxk_flow_tunnel_decap_set, 1301 .tunnel_action_decap_release = cnxk_flow_tunnel_action_decap_release, 1302 }; 1303