1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2021,2023 NXP 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 13 #include <rte_ethdev.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_flow_driver.h> 17 #include <rte_tailq.h> 18 19 #include <bus_fslmc_driver.h> 20 #include <fsl_dpdmux.h> 21 #include <fsl_dpkg.h> 22 23 #include <dpaa2_ethdev.h> 24 #include <dpaa2_pmd_logs.h> 25 26 struct dpaa2_dpdmux_dev { 27 TAILQ_ENTRY(dpaa2_dpdmux_dev) next; 28 /**< Pointer to Next device instance */ 29 struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */ 30 uint16_t token; 31 uint32_t dpdmux_id; /*HW ID for DPDMUX object */ 32 uint8_t num_ifs; /* Number of interfaces in DPDMUX */ 33 }; 34 35 #define DPAA2_MUX_FLOW_MAX_RULE_NUM 8 36 struct dpaa2_mux_flow { 37 struct dpdmux_rule_cfg rule[DPAA2_MUX_FLOW_MAX_RULE_NUM]; 38 }; 39 40 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); 41 static struct dpdmux_dev_list dpdmux_dev_list = 42 TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */ 43 44 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) 45 { 46 struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; 47 48 /* Get DPDMUX dev handle from list using index */ 49 TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { 50 if (dpdmux_dev->dpdmux_id == dpdmux_id) 51 break; 52 } 53 54 return dpdmux_dev; 55 } 56 57 int 58 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, 59 struct rte_flow_item pattern[], 60 struct rte_flow_action actions[]) 61 { 62 struct dpaa2_dpdmux_dev *dpdmux_dev; 63 static struct dpkg_profile_cfg s_kg_cfg; 64 struct dpkg_profile_cfg kg_cfg; 65 const struct rte_flow_action_vf *vf_conf; 66 struct dpdmux_cls_action dpdmux_action; 67 uint8_t *key_va = NULL, *mask_va = NULL; 68 void *key_cfg_va = NULL; 69 uint64_t key_iova, mask_iova, key_cfg_iova; 70 uint8_t key_size = 0; 71 int ret = 0, loop = 0; 72 static int s_i; 73 struct dpkg_extract *extract; 74 struct dpdmux_rule_cfg rule; 75 76 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); 77 78 /* Find the DPDMUX from dpdmux_id in our list */ 79 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 80 if (!dpdmux_dev) { 81 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 82 ret = -ENODEV; 83 goto creation_error; 84 } 85 86 key_cfg_va = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, 87 RTE_CACHE_LINE_SIZE); 88 if (!key_cfg_va) { 89 DPAA2_PMD_ERR("Unable to allocate key configure buffer"); 90 ret = -ENOMEM; 91 goto creation_error; 92 } 93 94 key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_va, 95 DIST_PARAM_IOVA_SIZE); 96 if (key_cfg_iova == RTE_BAD_IOVA) { 97 DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", 98 __func__, key_cfg_va); 99 ret = -ENOBUFS; 100 goto creation_error; 101 } 102 103 key_va = rte_zmalloc(NULL, (2 * DIST_PARAM_IOVA_SIZE), 104 RTE_CACHE_LINE_SIZE); 105 if (!key_va) { 106 DPAA2_PMD_ERR("Unable to allocate flow dist parameter"); 107 ret = -ENOMEM; 108 goto creation_error; 109 } 110 111 key_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_va, 112 (2 * DIST_PARAM_IOVA_SIZE)); 113 if (key_iova == RTE_BAD_IOVA) { 114 DPAA2_PMD_ERR("%s: No IOMMU mapping for address(%p)", 115 __func__, key_va); 116 ret = -ENOBUFS; 117 goto creation_error; 118 } 119 120 mask_va = key_va + DIST_PARAM_IOVA_SIZE; 121 mask_iova = key_iova + DIST_PARAM_IOVA_SIZE; 122 123 /* Currently taking only IP protocol as an extract type. 124 * This can be extended to other fields using pattern->type. 125 */ 126 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); 127 128 while (pattern[loop].type != RTE_FLOW_ITEM_TYPE_END) { 129 if (kg_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 130 DPAA2_PMD_ERR("Too many extracts(%d)", 131 kg_cfg.num_extracts); 132 ret = -ENOTSUP; 133 goto creation_error; 134 } 135 switch (pattern[loop].type) { 136 case RTE_FLOW_ITEM_TYPE_IPV4: 137 { 138 const struct rte_flow_item_ipv4 *spec; 139 const struct rte_flow_item_ipv4 *mask; 140 141 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 142 extract->type = DPKG_EXTRACT_FROM_HDR; 143 extract->extract.from_hdr.prot = NET_PROT_IP; 144 extract->extract.from_hdr.field = NH_FLD_IP_PROTO; 145 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 146 147 kg_cfg.num_extracts++; 148 149 spec = pattern[loop].spec; 150 mask = pattern[loop].mask; 151 rte_memcpy(&key_va[key_size], 152 &spec->hdr.next_proto_id, sizeof(uint8_t)); 153 if (mask) { 154 rte_memcpy(&mask_va[key_size], 155 &mask->hdr.next_proto_id, 156 sizeof(uint8_t)); 157 } else { 158 mask_va[key_size] = 0xff; 159 } 160 key_size += sizeof(uint8_t); 161 } 162 break; 163 164 case RTE_FLOW_ITEM_TYPE_VLAN: 165 { 166 const struct rte_flow_item_vlan *spec; 167 const struct rte_flow_item_vlan *mask; 168 169 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 170 extract->type = DPKG_EXTRACT_FROM_HDR; 171 extract->extract.from_hdr.prot = NET_PROT_VLAN; 172 extract->extract.from_hdr.field = NH_FLD_VLAN_TCI; 173 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 174 175 kg_cfg.num_extracts++; 176 177 spec = pattern[loop].spec; 178 mask = pattern[loop].mask; 179 rte_memcpy(&key_va[key_size], 180 &spec->tci, sizeof(uint16_t)); 181 if (mask) { 182 rte_memcpy(&mask_va[key_size], 183 &mask->tci, sizeof(uint16_t)); 184 } else { 185 memset(&mask_va[key_size], 0xff, 186 sizeof(rte_be16_t)); 187 } 188 key_size += sizeof(uint16_t); 189 } 190 break; 191 192 case RTE_FLOW_ITEM_TYPE_UDP: 193 { 194 const struct rte_flow_item_udp *spec; 195 const struct rte_flow_item_udp *mask; 196 197 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 198 extract->type = DPKG_EXTRACT_FROM_HDR; 199 extract->extract.from_hdr.prot = NET_PROT_UDP; 200 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 201 extract->extract.from_hdr.field = NH_FLD_UDP_PORT_DST; 202 kg_cfg.num_extracts++; 203 204 spec = pattern[loop].spec; 205 mask = pattern[loop].mask; 206 rte_memcpy(&key_va[key_size], 207 &spec->hdr.dst_port, sizeof(rte_be16_t)); 208 if (mask) { 209 rte_memcpy(&mask_va[key_size], 210 &mask->hdr.dst_port, 211 sizeof(rte_be16_t)); 212 } else { 213 memset(&mask_va[key_size], 0xff, 214 sizeof(rte_be16_t)); 215 } 216 key_size += sizeof(rte_be16_t); 217 } 218 break; 219 220 case RTE_FLOW_ITEM_TYPE_ETH: 221 { 222 const struct rte_flow_item_eth *spec; 223 const struct rte_flow_item_eth *mask; 224 225 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 226 extract->type = DPKG_EXTRACT_FROM_HDR; 227 extract->extract.from_hdr.prot = NET_PROT_ETH; 228 extract->extract.from_hdr.type = DPKG_FULL_FIELD; 229 extract->extract.from_hdr.field = NH_FLD_ETH_TYPE; 230 kg_cfg.num_extracts++; 231 232 spec = pattern[loop].spec; 233 mask = pattern[loop].mask; 234 rte_memcpy(&key_va[key_size], 235 &spec->type, sizeof(rte_be16_t)); 236 if (mask) { 237 rte_memcpy(&mask_va[key_size], 238 &mask->type, sizeof(rte_be16_t)); 239 } else { 240 memset(&mask_va[key_size], 0xff, 241 sizeof(rte_be16_t)); 242 } 243 key_size += sizeof(rte_be16_t); 244 } 245 break; 246 247 case RTE_FLOW_ITEM_TYPE_RAW: 248 { 249 const struct rte_flow_item_raw *spec; 250 const struct rte_flow_item_raw *mask; 251 252 spec = pattern[loop].spec; 253 mask = pattern[loop].mask; 254 extract = &kg_cfg.extracts[kg_cfg.num_extracts]; 255 extract->type = DPKG_EXTRACT_FROM_DATA; 256 extract->extract.from_data.offset = spec->offset; 257 extract->extract.from_data.size = spec->length; 258 kg_cfg.num_extracts++; 259 260 rte_memcpy(&key_va[key_size], 261 spec->pattern, spec->length); 262 if (mask && mask->pattern) { 263 rte_memcpy(&mask_va[key_size], 264 mask->pattern, spec->length); 265 } else { 266 memset(&mask_va[key_size], 0xff, spec->length); 267 } 268 269 key_size += spec->length; 270 } 271 break; 272 273 default: 274 DPAA2_PMD_ERR("Not supported pattern[%d] type: %d", 275 loop, pattern[loop].type); 276 ret = -ENOTSUP; 277 goto creation_error; 278 } 279 loop++; 280 } 281 282 ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_va); 283 if (ret) { 284 DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); 285 goto creation_error; 286 } 287 288 if (!s_i) { 289 ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, 290 CMD_PRI_LOW, dpdmux_dev->token, key_cfg_iova); 291 if (ret) { 292 DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", 293 ret); 294 goto creation_error; 295 } 296 rte_memcpy(&s_kg_cfg, &kg_cfg, sizeof(struct dpkg_profile_cfg)); 297 } else { 298 if (memcmp(&s_kg_cfg, &kg_cfg, 299 sizeof(struct dpkg_profile_cfg))) { 300 DPAA2_PMD_ERR("%s: Single flow support only.", 301 __func__); 302 ret = -ENOTSUP; 303 goto creation_error; 304 } 305 } 306 307 vf_conf = actions[0].conf; 308 if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { 309 DPAA2_PMD_ERR("Invalid destination id(%d)", vf_conf->id); 310 goto creation_error; 311 } 312 dpdmux_action.dest_if = vf_conf->id; 313 314 rule.key_iova = key_iova; 315 rule.mask_iova = mask_iova; 316 rule.key_size = key_size; 317 rule.entry_index = s_i; 318 s_i++; 319 320 /* As now our key extract parameters are set, let us configure 321 * the rule. 322 */ 323 ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, 324 CMD_PRI_LOW, dpdmux_dev->token, 325 &rule, &dpdmux_action); 326 if (ret) { 327 DPAA2_PMD_ERR("Add classification entry failed:err(%d)", ret); 328 goto creation_error; 329 } 330 331 creation_error: 332 rte_free(key_cfg_va); 333 rte_free(key_va); 334 335 return ret; 336 } 337 338 int 339 rte_pmd_dpaa2_mux_flow_l2(uint32_t dpdmux_id, 340 uint8_t mac_addr[6], uint16_t vlan_id, int dest_if) 341 { 342 struct dpaa2_dpdmux_dev *dpdmux_dev; 343 struct dpdmux_l2_rule rule; 344 int ret, i; 345 346 /* Find the DPDMUX from dpdmux_id in our list */ 347 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 348 if (!dpdmux_dev) { 349 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 350 return -ENODEV; 351 } 352 353 for (i = 0; i < 6; i++) 354 rule.mac_addr[i] = mac_addr[i]; 355 rule.vlan_id = vlan_id; 356 357 ret = dpdmux_if_add_l2_rule(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 358 dpdmux_dev->token, dest_if, &rule); 359 if (ret) { 360 DPAA2_PMD_ERR("dpdmux_if_add_l2_rule failed:err(%d)", ret); 361 return ret; 362 } 363 364 return 0; 365 } 366 367 int 368 rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id, uint16_t max_rx_frame_len) 369 { 370 struct dpaa2_dpdmux_dev *dpdmux_dev; 371 int ret; 372 373 /* Find the DPDMUX from dpdmux_id in our list */ 374 dpdmux_dev = get_dpdmux_from_id(dpdmux_id); 375 if (!dpdmux_dev) { 376 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 377 return -1; 378 } 379 380 ret = dpdmux_set_max_frame_length(&dpdmux_dev->dpdmux, 381 CMD_PRI_LOW, dpdmux_dev->token, max_rx_frame_len); 382 if (ret) { 383 DPAA2_PMD_ERR("DPDMUX:Unable to set mtu. check config %d", ret); 384 return ret; 385 } 386 387 DPAA2_PMD_INFO("dpdmux mtu set as %u", 388 DPAA2_MAX_RX_PKT_LEN - RTE_ETHER_CRC_LEN); 389 390 return ret; 391 } 392 393 /* dump the status of the dpaa2_mux counters on the console */ 394 void 395 rte_pmd_dpaa2_mux_dump_counter(FILE *f, uint32_t dpdmux_id, int num_if) 396 { 397 struct dpaa2_dpdmux_dev *dpdmux; 398 uint64_t counter; 399 int ret; 400 int if_id; 401 402 /* Find the DPDMUX from dpdmux_id in our list */ 403 dpdmux = get_dpdmux_from_id(dpdmux_id); 404 if (!dpdmux) { 405 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); 406 return; 407 } 408 409 for (if_id = 0; if_id < num_if; if_id++) { 410 fprintf(f, "dpdmux.%d\n", if_id); 411 412 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 413 dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME, &counter); 414 if (!ret) 415 fprintf(f, "DPDMUX_CNT_ING_FRAME %" PRIu64 "\n", 416 counter); 417 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 418 dpdmux->token, if_id, DPDMUX_CNT_ING_BYTE, &counter); 419 if (!ret) 420 fprintf(f, "DPDMUX_CNT_ING_BYTE %" PRIu64 "\n", 421 counter); 422 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 423 dpdmux->token, if_id, DPDMUX_CNT_ING_FLTR_FRAME, 424 &counter); 425 if (!ret) 426 fprintf(f, "DPDMUX_CNT_ING_FLTR_FRAME %" PRIu64 "\n", 427 counter); 428 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 429 dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME_DISCARD, 430 &counter); 431 if (!ret) 432 fprintf(f, "DPDMUX_CNT_ING_FRAME_DISCARD %" PRIu64 "\n", 433 counter); 434 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 435 dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_FRAME, 436 &counter); 437 if (!ret) 438 fprintf(f, "DPDMUX_CNT_ING_MCAST_FRAME %" PRIu64 "\n", 439 counter); 440 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 441 dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_BYTE, 442 &counter); 443 if (!ret) 444 fprintf(f, "DPDMUX_CNT_ING_MCAST_BYTE %" PRIu64 "\n", 445 counter); 446 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 447 dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_FRAME, 448 &counter); 449 if (!ret) 450 fprintf(f, "DPDMUX_CNT_ING_BCAST_FRAME %" PRIu64 "\n", 451 counter); 452 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 453 dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_BYTES, 454 &counter); 455 if (!ret) 456 fprintf(f, "DPDMUX_CNT_ING_BCAST_BYTES %" PRIu64 "\n", 457 counter); 458 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 459 dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME, &counter); 460 if (!ret) 461 fprintf(f, "DPDMUX_CNT_EGR_FRAME %" PRIu64 "\n", 462 counter); 463 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 464 dpdmux->token, if_id, DPDMUX_CNT_EGR_BYTE, &counter); 465 if (!ret) 466 fprintf(f, "DPDMUX_CNT_EGR_BYTE %" PRIu64 "\n", 467 counter); 468 ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, 469 dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME_DISCARD, 470 &counter); 471 if (!ret) 472 fprintf(f, "DPDMUX_CNT_EGR_FRAME_DISCARD %" PRIu64 "\n", 473 counter); 474 } 475 } 476 477 static int 478 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, 479 struct vfio_device_info *obj_info __rte_unused, 480 struct rte_dpaa2_device *obj) 481 { 482 struct dpaa2_dpdmux_dev *dpdmux_dev; 483 struct dpdmux_attr attr; 484 int ret, dpdmux_id = obj->object_id; 485 uint16_t maj_ver; 486 uint16_t min_ver; 487 uint8_t skip_reset_flags; 488 489 PMD_INIT_FUNC_TRACE(); 490 491 /* Allocate DPAA2 dpdmux handle */ 492 dpdmux_dev = rte_zmalloc(NULL, 493 sizeof(struct dpaa2_dpdmux_dev), RTE_CACHE_LINE_SIZE); 494 if (!dpdmux_dev) { 495 DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); 496 return -ENOMEM; 497 } 498 499 /* Open the dpdmux object */ 500 dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 501 ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id, 502 &dpdmux_dev->token); 503 if (ret) { 504 DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret); 505 goto init_err; 506 } 507 508 ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 509 dpdmux_dev->token, &attr); 510 if (ret) { 511 DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret); 512 goto init_err; 513 } 514 515 if (attr.method != DPDMUX_METHOD_C_VLAN_MAC) { 516 ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 517 dpdmux_dev->token, attr.default_if); 518 if (ret) { 519 DPAA2_PMD_ERR("setting default interface failed in %s", 520 __func__); 521 goto init_err; 522 } 523 skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE 524 | DPDMUX_SKIP_UNICAST_RULES | DPDMUX_SKIP_MULTICAST_RULES; 525 } else { 526 skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE; 527 } 528 529 ret = dpdmux_get_api_version(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 530 &maj_ver, &min_ver); 531 if (ret) { 532 DPAA2_PMD_ERR("setting version failed in %s", 533 __func__); 534 goto init_err; 535 } 536 537 /* The new dpdmux_set/get_resetable() API are available starting with 538 * DPDMUX_VER_MAJOR==6 and DPDMUX_VER_MINOR==6 539 */ 540 if (maj_ver >= 6 && min_ver >= 6) { 541 ret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 542 dpdmux_dev->token, skip_reset_flags); 543 if (ret) { 544 DPAA2_PMD_ERR("setting default interface failed in %s", 545 __func__); 546 goto init_err; 547 } 548 } 549 550 if (maj_ver >= 6 && min_ver >= 9) { 551 struct dpdmux_error_cfg mux_err_cfg; 552 553 memset(&mux_err_cfg, 0, sizeof(mux_err_cfg)); 554 /* Note: Discarded flag(DPDMUX_ERROR_DISC) has effect only when 555 * ERROR_ACTION is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. 556 */ 557 mux_err_cfg.errors = DPDMUX_ALL_ERRORS; 558 mux_err_cfg.error_action = DPDMUX_ERROR_ACTION_CONTINUE; 559 560 ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux, 561 CMD_PRI_LOW, 562 dpdmux_dev->token, DPAA2_DPDMUX_DPMAC_IDX, 563 &mux_err_cfg); 564 if (ret) { 565 DPAA2_PMD_ERR("dpdmux_if_set_errors_behavior %s err %d", 566 __func__, ret); 567 goto init_err; 568 } 569 } 570 571 dpdmux_dev->dpdmux_id = dpdmux_id; 572 dpdmux_dev->num_ifs = attr.num_ifs; 573 574 TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next); 575 576 return 0; 577 578 init_err: 579 rte_free(dpdmux_dev); 580 581 return -1; 582 } 583 584 static void 585 dpaa2_close_dpdmux_device(int object_id) 586 { 587 struct dpaa2_dpdmux_dev *dpdmux_dev; 588 589 dpdmux_dev = get_dpdmux_from_id((uint32_t)object_id); 590 591 if (dpdmux_dev) { 592 dpdmux_close(&dpdmux_dev->dpdmux, CMD_PRI_LOW, 593 dpdmux_dev->token); 594 TAILQ_REMOVE(&dpdmux_dev_list, dpdmux_dev, next); 595 rte_free(dpdmux_dev); 596 } 597 } 598 599 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { 600 .dev_type = DPAA2_MUX, 601 .create = dpaa2_create_dpdmux_device, 602 .close = dpaa2_close_dpdmux_device, 603 }; 604 605 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); 606