1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2021,2023 NXP 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 13 #include <rte_ethdev.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_flow_driver.h> 17 #include <rte_tailq.h> 18 19 #include <bus_fslmc_driver.h> 20 #include <fsl_dpdmux.h> 21 #include <fsl_dpkg.h> 22 23 #include <dpaa2_ethdev.h> 24 #include <dpaa2_pmd_logs.h> 25 26 struct dpaa2_dpdmux_dev { 27 TAILQ_ENTRY(dpaa2_dpdmux_dev) next; 28 /**< Pointer to Next device instance */ 29 struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */ 30 uint16_t token; 31 uint32_t dpdmux_id; /*HW ID for DPDMUX object */ 32 uint8_t num_ifs; /* Number of interfaces in DPDMUX */ 33 }; 34 35 #define DPAA2_MUX_FLOW_MAX_RULE_NUM 8 36 struct dpaa2_mux_flow { 37 struct dpdmux_rule_cfg rule[DPAA2_MUX_FLOW_MAX_RULE_NUM]; 38 }; 39 40 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); 41 static struct dpdmux_dev_list dpdmux_dev_list = 42 TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */ 43 44 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) 45 { 46 struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; 47 48 /* Get DPDMUX dev handle from list using index */ 49 TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { 50 if (dpdmux_dev->dpdmux_id == dpdmux_id) 51 break; 52 } 53 54 return dpdmux_dev; 55 } 56 57 int 58 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, 59 struct rte_flow_item pattern[], 60 struct rte_flow_action actions[]) 61 { 62 struct dpaa2_dpdmux_dev *dpdmux_dev; 63 static struct dpkg_profile_cfg s_kg_cfg; 64 struct dpkg_profile_cfg kg_cfg; 65 const struct rte_flow_action_vf *vf_conf; 66 struct dpdmux_cls_action dpdmux_action; 67 uint8_t *key_va = NULL, *mask_va = NULL; 68 void *key_cfg_va = NULL; 69 uint64_t key_iova, mask_iova, key_cfg_iova; 70 uint8_t key_size = 0; 71 int ret = 0, loop = 0; 72 static int s_i; 73 struct dpkg_extract *extract; 74 struct dpdmux_rule_cfg rule; 75 76 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); 77 78 /* Find the DPDMUX from dpdmux_id in our list */ 79 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 80 if (!dpdmux_dev) { 81 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 82 ret = -ENODEV; 83 goto creation_error; 84 } 85 86 key_cfg_va = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, 87 RTE_CACHE_LINE_SIZE); 88 if (!key_cfg_va) { 89 DPAA2_PMD_ERR("Unable to allocate key configure buffer"); 90 ret = -ENOMEM; 91 goto creation_error; 92 } 93 94 key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_va, 95 DIST_PARAM_IOVA_SIZE); 96 if (key_cfg_iova == RTE_BAD_IOVA) { 97 DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", 98 __func__, key_cfg_va); 99 ret = -ENOBUFS; 100 goto creation_error; 101 } 102 103 key_va = rte_zmalloc(NULL, (2 * DIST_PARAM_IOVA_SIZE), 104 RTE_CACHE_LINE_SIZE); 105 if (!key_va) { 106 DPAA2_PMD_ERR("Unable to allocate flow dist parameter"); 107 ret = -ENOMEM; 108 goto creation_error; 109 } 110 111 key_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_va, 112 (2 * DIST_PARAM_IOVA_SIZE)); 113 if (key_iova == RTE_BAD_IOVA) { 114 DPAA2_PMD_ERR("%s: No IOMMU mapping for address(%p)", 115 __func__, key_va); 116 ret = -ENOBUFS; 117 goto creation_error; 118 } 119 120 mask_va = key_va + DIST_PARAM_IOVA_SIZE; 121 mask_iova = key_iova + DIST_PARAM_IOVA_SIZE; 122 123 /* Currently taking only IP protocol as an extract type. 124 * This can be extended to other fields using pattern->type. 125 */ 126 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); 127 128 while (pattern[loop].type != RTE_FLOW_ITEM_TYPE_END) { 129 if (kg_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 130 DPAA2_PMD_ERR("Too many extracts(%d)", 131 kg_cfg.num_extracts); 132 ret = -ENOTSUP; 133 goto creation_error; 134 } 135 switch (pattern[loop].type) { 136 case RTE_FLOW_ITEM_TYPE_IPV4: 137 { 138 const struct rte_flow_item_ipv4 *spec; 139 const struct rte_flow_item_ipv4 *mask; 140 141 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 142 extract->type = DPKG_EXTRACT_FROM_HDR; 143 extract->extract.from_hdr.prot = NET_PROT_IP; 144 extract->extract.from_hdr.field = NH_FLD_IP_PROTO; 145 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 146 147 kg_cfg.num_extracts++; 148 149 spec = pattern[loop].spec; 150 mask = pattern[loop].mask; 151 rte_memcpy(&key_va[key_size], 152 &spec->hdr.next_proto_id, sizeof(uint8_t)); 153 if (mask) { 154 rte_memcpy(&mask_va[key_size], 155 &mask->hdr.next_proto_id, 156 sizeof(uint8_t)); 157 } else { 158 mask_va[key_size] = 0xff; 159 } 160 key_size += sizeof(uint8_t); 161 } 162 break; 163 164 case RTE_FLOW_ITEM_TYPE_VLAN: 165 { 166 const struct rte_flow_item_vlan *spec; 167 const struct rte_flow_item_vlan *mask; 168 169 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 170 extract->type = DPKG_EXTRACT_FROM_HDR; 171 extract->extract.from_hdr.prot = NET_PROT_VLAN; 172 extract->extract.from_hdr.field = NH_FLD_VLAN_TCI; 173 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 174 175 kg_cfg.num_extracts++; 176 177 spec = pattern[loop].spec; 178 mask = pattern[loop].mask; 179 rte_memcpy(&key_va[key_size], 180 &spec->tci, sizeof(uint16_t)); 181 if (mask) { 182 rte_memcpy(&mask_va[key_size], 183 &mask->tci, sizeof(uint16_t)); 184 } else { 185 memset(&mask_va[key_size], 0xff, 186 sizeof(rte_be16_t)); 187 } 188 key_size += sizeof(uint16_t); 189 } 190 break; 191 192 case RTE_FLOW_ITEM_TYPE_UDP: 193 { 194 const struct rte_flow_item_udp *spec; 195 const struct rte_flow_item_udp *mask; 196 197 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 198 extract->type = DPKG_EXTRACT_FROM_HDR; 199 extract->extract.from_hdr.prot = NET_PROT_UDP; 200 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 201 extract->extract.from_hdr.field = NH_FLD_UDP_PORT_DST; 202 kg_cfg.num_extracts++; 203 204 spec = pattern[loop].spec; 205 mask = pattern[loop].mask; 206 rte_memcpy(&key_va[key_size], 207 &spec->hdr.dst_port, sizeof(rte_be16_t)); 208 if (mask) { 209 rte_memcpy(&mask_va[key_size], 210 &mask->hdr.dst_port, 211 sizeof(rte_be16_t)); 212 } else { 213 memset(&mask_va[key_size], 0xff, 214 sizeof(rte_be16_t)); 215 } 216 key_size += sizeof(rte_be16_t); 217 } 218 break; 219 220 case RTE_FLOW_ITEM_TYPE_ETH: 221 { 222 const struct rte_flow_item_eth *spec; 223 const struct rte_flow_item_eth *mask; 224 225 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 226 extract->type = DPKG_EXTRACT_FROM_HDR; 227 extract->extract.from_hdr.prot = NET_PROT_ETH; 228 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 229 extract->extract.from_hdr.field = NH_FLD_ETH_TYPE; 230 kg_cfg.num_extracts++; 231 232 spec = pattern[loop].spec; 233 mask = pattern[loop].mask; 234 rte_memcpy(&key_va[key_size], 235 &spec->type, sizeof(rte_be16_t)); 236 if (mask) { 237 rte_memcpy(&mask_va[key_size], 238 &mask->type, sizeof(rte_be16_t)); 239 } else { 240 memset(&mask_va[key_size], 0xff, 241 sizeof(rte_be16_t)); 242 } 243 key_size += sizeof(rte_be16_t); 244 } 245 break; 246 247 case RTE_FLOW_ITEM_TYPE_RAW: 248 { 249 const struct rte_flow_item_raw *spec; 250 const struct rte_flow_item_raw *mask; 251 252 spec = pattern[loop].spec; 253 mask = pattern[loop].mask; 254 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 255 extract->type = DPKG_EXTRACT_FROM_DATA; 256 extract->extract.from_data.offset = spec->offset; 257 extract->extract.from_data.size = spec->length; 258 kg_cfg.num_extracts++; 259 260 rte_memcpy(&key_va[key_size], 261 spec->pattern, spec->length); 262 if (mask && mask->pattern) { 263 rte_memcpy(&mask_va[key_size], 264 mask->pattern, spec->length); 265 } else { 266 memset(&mask_va[key_size], 0xff, spec->length); 267 } 268 269 key_size += spec->length; 270 } 271 break; 272 273 default: 274 DPAA2_PMD_ERR("Not supported pattern[%d] type: %d", 275 loop, pattern[loop].type); 276 ret = -ENOTSUP; 277 goto creation_error; 278 } 279 loop++; 280 } 281 282 ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_va); 283 if (ret) { 284 DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); 285 goto creation_error; 286 } 287 288 if (!s_i) { 289 ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, 290 CMD_PRI_LOW, dpdmux_dev->token, key_cfg_iova); 291 if (ret) { 292 DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", 293 ret); 294 goto creation_error; 295 } 296 rte_memcpy(&s_kg_cfg, &kg_cfg, sizeof(struct dpkg_profile_cfg)); 297 } else { 298 if (memcmp(&s_kg_cfg, &kg_cfg, 299 sizeof(struct dpkg_profile_cfg))) { 300 DPAA2_PMD_ERR("%s: Single flow support only.", 301 __func__); 302 ret = -ENOTSUP; 303 goto creation_error; 304 } 305 } 306 307 vf_conf = actions[0].conf; 308 if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { 309 DPAA2_PMD_ERR("Invalid destination id(%d)", vf_conf->id); 310 goto creation_error; 311 } 312 dpdmux_action.dest_if = vf_conf->id; 313 314 rule.key_iova = key_iova; 315 rule.mask_iova = mask_iova; 316 rule.key_size = key_size; 317 rule.entry_index = s_i; 318 s_i++; 319 320 /* As now our key extract parameters are set, let us configure 321 * the rule. 322 */ 323 ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, 324 CMD_PRI_LOW, dpdmux_dev->token, 325 &rule, &dpdmux_action); 326 if (ret) { 327 DPAA2_PMD_ERR("Add classification entry failed:err(%d)", ret); 328 goto creation_error; 329 } 330 331 creation_error: 332 if (key_cfg_va) 333 rte_free(key_cfg_va); 334 if (key_va) 335 rte_free(key_va); 336 337 return ret; 338 } 339 340 int 341 rte_pmd_dpaa2_mux_flow_l2(uint32_t dpdmux_id, 342 uint8_t mac_addr[6], uint16_t vlan_id, int dest_if) 343 { 344 struct dpaa2_dpdmux_dev *dpdmux_dev; 345 struct dpdmux_l2_rule rule; 346 int ret, i; 347 348 /* Find the DPDMUX from dpdmux_id in our list */ 349 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 350 if (!dpdmux_dev) { 351 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 352 return -ENODEV; 353 } 354 355 for (i = 0; i < 6; i++) 356 rule.mac_addr[i] = mac_addr[i]; 357 rule.vlan_id = vlan_id; 358 359 ret = dpdmux_if_add_l2_rule(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 360 dpdmux_dev->token, dest_if, &rule); 361 if (ret) { 362 DPAA2_PMD_ERR("dpdmux_if_add_l2_rule failed:err(%d)", ret); 363 return ret; 364 } 365 366 return 0; 367 } 368 369 int 370 rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id, uint16_t max_rx_frame_len) 371 { 372 struct dpaa2_dpdmux_dev *dpdmux_dev; 373 int ret; 374 375 /* Find the DPDMUX from dpdmux_id in our list */ 376 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 377 if (!dpdmux_dev) { 378 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 379 return -1; 380 } 381 382 ret = dpdmux_set_max_frame_length(&dpdmux_dev->dpdmux, 383 CMD_PRI_LOW, dpdmux_dev->token, max_rx_frame_len); 384 if (ret) { 385 DPAA2_PMD_ERR("DPDMUX:Unable to set mtu. check config %d", ret); 386 return ret; 387 } 388 389 DPAA2_PMD_INFO("dpdmux mtu set as %u", 390 DPAA2_MAX_RX_PKT_LEN - RTE_ETHER_CRC_LEN); 391 392 return ret; 393 } 394 395 /* dump the status of the dpaa2_mux counters on the console */ 396 void 397 rte_pmd_dpaa2_mux_dump_counter(FILE *f, uint32_t dpdmux_id, int num_if) 398 { 399 struct dpaa2_dpdmux_dev *dpdmux; 400 uint64_t counter; 401 int ret; 402 int if_id; 403 404 /* Find the DPDMUX from dpdmux_id in our list */ 405 dpdmux = get_dpdmux_from_id(dpdmux_id); 406 if (!dpdmux) { 407 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 408 return; 409 } 410 411 for (if_id = 0; if_id < num_if; if_id++) { 412 fprintf(f, "dpdmux.%d\n", if_id); 413 414 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 415 dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME, &counter); 416 if (!ret) 417 fprintf(f, "DPDMUX_CNT_ING_FRAME %" PRIu64 "\n", 418 counter); 419 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 420 dpdmux->token, if_id, DPDMUX_CNT_ING_BYTE, &counter); 421 if (!ret) 422 fprintf(f, "DPDMUX_CNT_ING_BYTE %" PRIu64 "\n", 423 counter); 424 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 425 dpdmux->token, if_id, DPDMUX_CNT_ING_FLTR_FRAME, 426 &counter); 427 if (!ret) 428 fprintf(f, "DPDMUX_CNT_ING_FLTR_FRAME %" PRIu64 "\n", 429 counter); 430 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 431 dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME_DISCARD, 432 &counter); 433 if (!ret) 434 fprintf(f, "DPDMUX_CNT_ING_FRAME_DISCARD %" PRIu64 "\n", 435 counter); 436 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 437 dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_FRAME, 438 &counter); 439 if (!ret) 440 fprintf(f, "DPDMUX_CNT_ING_MCAST_FRAME %" PRIu64 "\n", 441 counter); 442 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 443 dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_BYTE, 444 &counter); 445 if (!ret) 446 fprintf(f, "DPDMUX_CNT_ING_MCAST_BYTE %" PRIu64 "\n", 447 counter); 448 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 449 dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_FRAME, 450 &counter); 451 if (!ret) 452 fprintf(f, "DPDMUX_CNT_ING_BCAST_FRAME %" PRIu64 "\n", 453 counter); 454 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 455 dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_BYTES, 456 &counter); 457 if (!ret) 458 fprintf(f, "DPDMUX_CNT_ING_BCAST_BYTES %" PRIu64 "\n", 459 counter); 460 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 461 dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME, &counter); 462 if (!ret) 463 fprintf(f, "DPDMUX_CNT_EGR_FRAME %" PRIu64 "\n", 464 counter); 465 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 466 dpdmux->token, if_id, DPDMUX_CNT_EGR_BYTE, &counter); 467 if (!ret) 468 fprintf(f, "DPDMUX_CNT_EGR_BYTE %" PRIu64 "\n", 469 counter); 470 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 471 dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME_DISCARD, 472 &counter); 473 if (!ret) 474 fprintf(f, "DPDMUX_CNT_EGR_FRAME_DISCARD %" PRIu64 "\n", 475 counter); 476 } 477 } 478 479 static int 480 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, 481 struct vfio_device_info *obj_info __rte_unused, 482 struct rte_dpaa2_device *obj) 483 { 484 struct dpaa2_dpdmux_dev *dpdmux_dev; 485 struct dpdmux_attr attr; 486 int ret, dpdmux_id = obj->object_id; 487 uint16_t maj_ver; 488 uint16_t min_ver; 489 uint8_t skip_reset_flags; 490 491 PMD_INIT_FUNC_TRACE(); 492 493 /* Allocate DPAA2 dpdmux handle */ 494 dpdmux_dev = rte_zmalloc(NULL, 495 sizeof(struct dpaa2_dpdmux_dev), RTE_CACHE_LINE_SIZE); 496 if (!dpdmux_dev) { 497 DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); 498 return -ENOMEM; 499 } 500 501 /* Open the dpdmux object */ 502 dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 503 ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id, 504 &dpdmux_dev->token); 505 if (ret) { 506 DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret); 507 goto init_err; 508 } 509 510 ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 511 dpdmux_dev->token, &attr); 512 if (ret) { 513 DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret); 514 goto init_err; 515 } 516 517 if (attr.method != DPDMUX_METHOD_C_VLAN_MAC) { 518 ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 519 dpdmux_dev->token, attr.default_if); 520 if (ret) { 521 DPAA2_PMD_ERR("setting default interface failed in %s", 522 __func__); 523 goto init_err; 524 } 525 skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE 526 | DPDMUX_SKIP_UNICAST_RULES | DPDMUX_SKIP_MULTICAST_RULES; 527 } else { 528 skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE; 529 } 530 531 ret = dpdmux_get_api_version(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 532 &maj_ver, &min_ver); 533 if (ret) { 534 DPAA2_PMD_ERR("setting version failed in %s", 535 __func__); 536 goto init_err; 537 } 538 539 /* The new dpdmux_set/get_resetable() API are available starting with 540 * DPDMUX_VER_MAJOR==6 and DPDMUX_VER_MINOR==6 541 */ 542 if (maj_ver >= 6 && min_ver >= 6) { 543 ret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 544 dpdmux_dev->token, skip_reset_flags); 545 if (ret) { 546 DPAA2_PMD_ERR("setting default interface failed in %s", 547 __func__); 548 goto init_err; 549 } 550 } 551 552 if (maj_ver >= 6 && min_ver >= 9) { 553 struct dpdmux_error_cfg mux_err_cfg; 554 555 memset(&mux_err_cfg, 0, sizeof(mux_err_cfg)); 556 /* Note: Discarded flag(DPDMUX_ERROR_DISC) has effect only when 557 * ERROR_ACTION is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. 558 */ 559 mux_err_cfg.errors = DPDMUX_ALL_ERRORS; 560 mux_err_cfg.error_action = DPDMUX_ERROR_ACTION_CONTINUE; 561 562 ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux, 563 CMD_PRI_LOW, 564 dpdmux_dev->token, DPAA2_DPDMUX_DPMAC_IDX, 565 &mux_err_cfg); 566 if (ret) { 567 DPAA2_PMD_ERR("dpdmux_if_set_errors_behavior %s err %d", 568 __func__, ret); 569 goto init_err; 570 } 571 } 572 573 dpdmux_dev->dpdmux_id = dpdmux_id; 574 dpdmux_dev->num_ifs = attr.num_ifs; 575 576 TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next); 577 578 return 0; 579 580 init_err: 581 rte_free(dpdmux_dev); 582 583 return -1; 584 } 585 586 static void 587 dpaa2_close_dpdmux_device(int object_id) 588 { 589 struct dpaa2_dpdmux_dev *dpdmux_dev; 590 591 dpdmux_dev = get_dpdmux_from_id((uint32_t)object_id); 592 593 if (dpdmux_dev) { 594 dpdmux_close(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 595 dpdmux_dev->token); 596 TAILQ_REMOVE(&dpdmux_dev_list, dpdmux_dev, next); 597 rte_free(dpdmux_dev); 598 } 599 } 600 601 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { 602 .dev_type = DPAA2_MUX, 603 .create = dpaa2_create_dpdmux_device, 604 .close = dpaa2_close_dpdmux_device, 605 }; 606 607 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); 608