1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2022 Intel Corporation 3 */ 4 5 #include "cpfl_representor.h" 6 #include "cpfl_rxtx.h" 7 #include "cpfl_flow.h" 8 #include "cpfl_rules.h" 9 10 static int 11 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter, 12 struct cpfl_repr_id *repr_id, 13 struct rte_eth_dev *dev) 14 { 15 int ret; 16 17 if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0) 18 return -ENOENT; 19 20 ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev); 21 22 return ret; 23 } 24 25 static int 26 cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter, 27 struct cpfl_repr_id *repr_id) 28 { 29 int ret; 30 31 rte_spinlock_lock(&adapter->repr_lock); 32 if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) { 33 ret = -EEXIST; 34 goto err; 35 } 36 37 ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id); 38 if (ret < 0) 39 goto err; 40 41 rte_spinlock_unlock(&adapter->repr_lock); 42 return 0; 43 err: 44 rte_spinlock_unlock(&adapter->repr_lock); 45 return ret; 46 } 47 48 static int 49 cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter, 50 struct rte_eth_devargs *eth_da) 51 { 52 struct cpfl_repr_id repr_id; 53 int ret, c, p, v; 54 55 for (c = 0; c < eth_da->nb_mh_controllers; c++) { 56 for (p = 0; p < eth_da->nb_ports; p++) { 57 repr_id.type = eth_da->type; 58 if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { 59 repr_id.host_id = eth_da->mh_controllers[c]; 60 repr_id.pf_id = eth_da->ports[p]; 61 repr_id.vf_id = 0; 62 ret = cpfl_repr_allowlist_add(adapter, &repr_id); 63 if (ret == -EEXIST) 64 continue; 65 if (ret) { 66 PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, " 67 "host_id = %d, pf_id = %d.", 68 repr_id.host_id, repr_id.pf_id); 69 return ret; 70 } 71 } else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) { 72 for (v = 0; v < eth_da->nb_representor_ports; v++) { 73 repr_id.host_id = eth_da->mh_controllers[c]; 74 repr_id.pf_id = eth_da->ports[p]; 75 repr_id.vf_id = eth_da->representor_ports[v]; 76 ret = cpfl_repr_allowlist_add(adapter, &repr_id); 77 if (ret == -EEXIST) 78 continue; 79 if (ret) { 80 PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, " 81 "host_id = %d, pf_id = %d, vf_id = %d.", 82 repr_id.host_id, 83 repr_id.pf_id, 84 repr_id.vf_id); 85 return ret; 86 } 87 } 88 } 89 } 90 } 91 92 return 0; 93 } 94 95 int 96 cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter, struct cpfl_devargs *devargs) 97 { 98 int ret, i, j; 99 100 /* check and refine repr args */ 101 for (i = 0; i < devargs->repr_args_num; i++) { 102 struct rte_eth_devargs *eth_da = &devargs->repr_args[i]; 103 104 /* set default host_id to host */ 105 if (eth_da->nb_mh_controllers == 0) { 106 eth_da->nb_mh_controllers = 1; 107 eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST; 108 } else { 109 for (j = 0; j < eth_da->nb_mh_controllers; j++) { 110 if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) { 111 PMD_INIT_LOG(ERR, "Invalid Host ID %d", 112 eth_da->mh_controllers[j]); 113 return -EINVAL; 114 } 115 } 116 } 117 118 /* set default pf to APF */ 119 if (eth_da->nb_ports == 0) { 120 eth_da->nb_ports = 1; 121 eth_da->ports[0] = CPFL_PF_TYPE_APF; 122 } else { 123 for (j = 0; j < eth_da->nb_ports; j++) { 124 if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) { 125 PMD_INIT_LOG(ERR, "Invalid Host ID %d", 126 eth_da->ports[j]); 127 return -EINVAL; 128 } 129 } 130 } 131 132 ret = cpfl_repr_devargs_process_one(adapter, eth_da); 133 if (ret != 0) 134 return ret; 135 } 136 137 return 0; 138 } 139 140 static int 141 cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter, 142 struct cpfl_repr_id *repr_id) 143 { 144 int ret; 145 146 rte_spinlock_lock(&adapter->repr_lock); 147 148 ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id); 149 if (ret < 0) { 150 PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist." 151 "host_id = %d, type = %d, pf_id = %d, vf_id = %d", 152 repr_id->host_id, repr_id->type, 153 repr_id->pf_id, repr_id->vf_id); 154 goto err; 155 } 156 157 rte_spinlock_unlock(&adapter->repr_lock); 158 return 0; 159 err: 160 rte_spinlock_unlock(&adapter->repr_lock); 161 return ret; 162 } 163 164 static int 165 cpfl_repr_uninit(struct rte_eth_dev *eth_dev) 166 { 167 struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); 168 struct cpfl_adapter_ext *adapter = repr->itf.adapter; 169 170 eth_dev->data->mac_addrs = NULL; 171 172 cpfl_repr_allowlist_del(adapter, &repr->repr_id); 173 174 return 0; 175 } 176 177 static int 178 cpfl_repr_dev_configure(struct rte_eth_dev *dev) 179 { 180 /* now only 1 RX queue is supported */ 181 if (dev->data->nb_rx_queues > 1) 182 return -EINVAL; 183 184 return 0; 185 } 186 187 static int 188 cpfl_repr_dev_close(struct rte_eth_dev *dev) 189 { 190 return cpfl_repr_uninit(dev); 191 } 192 193 static int 194 cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev, 195 struct rte_eth_dev_info *dev_info) 196 { 197 struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); 198 199 dev_info->device = ethdev->device; 200 dev_info->max_mac_addrs = 1; 201 dev_info->max_rx_queues = 1; 202 dev_info->max_tx_queues = 1; 203 dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE; 204 dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE; 205 206 dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL; 207 208 dev_info->rx_offload_capa = 209 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 210 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 211 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 212 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 213 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 214 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 215 RTE_ETH_RX_OFFLOAD_SCATTER | 216 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 217 RTE_ETH_RX_OFFLOAD_RSS_HASH | 218 RTE_ETH_RX_OFFLOAD_TIMESTAMP; 219 220 dev_info->tx_offload_capa = 221 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 222 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | 223 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 224 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 225 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 226 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 227 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 228 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 229 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 230 231 dev_info->default_rxconf = (struct rte_eth_rxconf) { 232 .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH, 233 .rx_drop_en = 0, 234 .offloads = 0, 235 }; 236 237 dev_info->default_txconf = (struct rte_eth_txconf) { 238 .tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH, 239 .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH, 240 .offloads = 0, 241 }; 242 243 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 244 .nb_max = CPFL_MAX_RING_DESC, 245 .nb_min = CPFL_MIN_RING_DESC, 246 .nb_align = CPFL_ALIGN_RING_DESC, 247 }; 248 249 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 250 .nb_max = CPFL_MAX_RING_DESC, 251 .nb_min = CPFL_MIN_RING_DESC, 252 .nb_align = CPFL_ALIGN_RING_DESC, 253 }; 254 255 dev_info->switch_info.name = ethdev->device->name; 256 dev_info->switch_info.domain_id = 0; /* the same domain*/ 257 dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id; 258 259 return 0; 260 } 261 262 static int 263 cpfl_repr_dev_start(struct rte_eth_dev *dev) 264 { 265 uint16_t i; 266 267 for (i = 0; i < dev->data->nb_tx_queues; i++) 268 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 269 for (i = 0; i < dev->data->nb_rx_queues; i++) 270 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 271 272 return 0; 273 } 274 275 static int 276 cpfl_repr_dev_stop(struct rte_eth_dev *dev) 277 { 278 uint16_t i; 279 280 for (i = 0; i < dev->data->nb_tx_queues; i++) 281 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 282 for (i = 0; i < dev->data->nb_rx_queues; i++) 283 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 284 285 dev->data->dev_started = 0; 286 return 0; 287 } 288 289 static int 290 cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, 291 __rte_unused uint16_t queue_id, 292 __rte_unused uint16_t nb_desc, 293 __rte_unused unsigned int socket_id, 294 __rte_unused const struct rte_eth_rxconf *conf, 295 __rte_unused struct rte_mempool *pool) 296 { 297 /* Dummy */ 298 return 0; 299 } 300 301 static int 302 cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, 303 __rte_unused uint16_t queue_id, 304 __rte_unused uint16_t nb_desc, 305 __rte_unused unsigned int socket_id, 306 __rte_unused const struct rte_eth_txconf *conf) 307 { 308 /* Dummy */ 309 return 0; 310 } 311 312 static int 313 cpfl_func_id_get(uint8_t host_id, uint8_t pf_id) 314 { 315 if ((host_id != CPFL_HOST_ID_HOST && 316 host_id != CPFL_HOST_ID_ACC) || 317 (pf_id != CPFL_PF_TYPE_APF && 318 pf_id != CPFL_PF_TYPE_CPF)) 319 return -EINVAL; 320 321 static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = { 322 [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF, 323 [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID, 324 [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID, 325 [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID, 326 }; 327 328 return func_id_map[host_id][pf_id]; 329 } 330 331 static int 332 cpfl_repr_link_update(struct rte_eth_dev *ethdev, 333 int wait_to_complete) 334 { 335 struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev); 336 struct rte_eth_link *dev_link = ðdev->data->dev_link; 337 struct cpfl_adapter_ext *adapter = repr->itf.adapter; 338 struct cpchnl2_get_vport_info_response response; 339 struct cpfl_vport_id vi; 340 int ret; 341 342 if (!rte_eth_dev_is_repr(ethdev)) { 343 PMD_INIT_LOG(ERR, "This ethdev is not representor."); 344 return -EINVAL; 345 } 346 347 if (wait_to_complete) { 348 if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) { 349 /* PF */ 350 vi.func_type = CPCHNL2_FTYPE_LAN_PF; 351 vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id); 352 vi.vf_id = 0; 353 } else { 354 /* VF */ 355 vi.func_type = CPCHNL2_FTYPE_LAN_VF; 356 vi.pf_id = CPFL_HOST0_APF; 357 vi.vf_id = repr->repr_id.vf_id; 358 } 359 ret = cpfl_cc_vport_info_get(adapter, &repr->vport_info->vport.vport, 360 &vi, &response); 361 if (ret < 0) { 362 PMD_INIT_LOG(ERR, "Fail to get vport info."); 363 return ret; 364 } 365 366 if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) 367 repr->func_up = true; 368 else 369 repr->func_up = false; 370 } 371 372 dev_link->link_status = repr->func_up ? 373 RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 374 375 return 0; 376 } 377 378 static int 379 cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev, 380 const struct rte_flow_ops **ops) 381 { 382 if (!dev) 383 return -EINVAL; 384 385 #ifdef RTE_HAS_JANSSON 386 *ops = &cpfl_flow_ops; 387 #else 388 *ops = NULL; 389 PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library."); 390 #endif 391 return 0; 392 } 393 394 static const struct eth_dev_ops cpfl_repr_dev_ops = { 395 .dev_start = cpfl_repr_dev_start, 396 .dev_stop = cpfl_repr_dev_stop, 397 .dev_configure = cpfl_repr_dev_configure, 398 .dev_close = cpfl_repr_dev_close, 399 .dev_infos_get = cpfl_repr_dev_info_get, 400 401 .rx_queue_setup = cpfl_repr_rx_queue_setup, 402 .tx_queue_setup = cpfl_repr_tx_queue_setup, 403 404 .link_update = cpfl_repr_link_update, 405 .flow_ops_get = cpfl_dev_repr_flow_ops_get, 406 }; 407 408 static int 409 cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) 410 { 411 struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev); 412 struct cpfl_repr_param *param = init_param; 413 struct cpfl_adapter_ext *adapter = param->adapter; 414 int ret; 415 416 repr->repr_id = param->repr_id; 417 repr->vport_info = param->vport_info; 418 repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR; 419 repr->itf.adapter = adapter; 420 repr->itf.data = eth_dev->data; 421 if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED) 422 repr->func_up = true; 423 424 TAILQ_INIT(&repr->itf.flow_list); 425 memset(repr->itf.dma, 0, sizeof(repr->itf.dma)); 426 memset(repr->itf.msg, 0, sizeof(repr->itf.msg)); 427 ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma, 428 sizeof(union cpfl_rule_cfg_pkt_record), 429 CPFL_FLOW_BATCH_SIZE); 430 if (ret < 0) 431 return ret; 432 433 eth_dev->dev_ops = &cpfl_repr_dev_ops; 434 435 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 436 437 eth_dev->data->representor_id = 438 CPFL_REPRESENTOR_ID(repr->repr_id.type, 439 repr->repr_id.host_id, 440 repr->repr_id.pf_id, 441 repr->repr_id.vf_id); 442 443 eth_dev->data->mac_addrs = &repr->mac_addr; 444 445 rte_eth_random_addr(repr->mac_addr.addr_bytes); 446 447 return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev); 448 } 449 450 static bool 451 cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id, 452 struct cpchnl2_vport_info *info) 453 { 454 int func_id; 455 456 if (repr_id->type == RTE_ETH_REPRESENTOR_PF && 457 info->func_type == CPCHNL2_FTYPE_LAN_PF) { 458 func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); 459 if (func_id < 0 || func_id != info->pf_id) 460 return false; 461 else 462 return true; 463 } else if (repr_id->type == RTE_ETH_REPRESENTOR_VF && 464 info->func_type == CPCHNL2_FTYPE_LAN_VF) { 465 if (repr_id->vf_id == info->vf_id) 466 return true; 467 } 468 469 return false; 470 } 471 472 static int 473 cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter, 474 const struct cpfl_repr_id *repr_id, 475 struct cpchnl2_get_vport_list_response *response) 476 { 477 struct cpfl_vport_id vi; 478 int ret; 479 480 if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { 481 /* PF */ 482 vi.func_type = CPCHNL2_FTYPE_LAN_PF; 483 vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); 484 vi.vf_id = 0; 485 } else { 486 /* VF */ 487 vi.func_type = CPCHNL2_FTYPE_LAN_VF; 488 vi.pf_id = CPFL_HOST0_APF; 489 vi.vf_id = repr_id->vf_id; 490 } 491 492 ret = cpfl_cc_vport_list_get(adapter, &vi, response); 493 494 return ret; 495 } 496 497 static int 498 cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter, 499 const struct cpfl_repr_id *repr_id, 500 struct cpchnl2_vport_id *vport_id, 501 struct cpchnl2_get_vport_info_response *response) 502 { 503 struct cpfl_vport_id vi; 504 int ret; 505 506 if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { 507 /* PF */ 508 vi.func_type = CPCHNL2_FTYPE_LAN_PF; 509 vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); 510 vi.vf_id = 0; 511 } else { 512 /* VF */ 513 vi.func_type = CPCHNL2_FTYPE_LAN_VF; 514 vi.pf_id = CPFL_HOST0_APF; 515 vi.vf_id = repr_id->vf_id; 516 } 517 518 ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response); 519 520 return ret; 521 } 522 523 static int 524 cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter, 525 const struct cpfl_repr_id *repr_id, uint32_t vport_id, 526 struct cpchnl2_get_vport_info_response *response) 527 { 528 struct cpfl_vport_id vi; 529 int ret; 530 531 vi.vport_id = vport_id; 532 if (repr_id->type == RTE_ETH_REPRESENTOR_PF) { 533 /* PF */ 534 vi.func_type = CPCHNL2_FTYPE_LAN_VF; 535 vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id); 536 } else { 537 /* VF */ 538 vi.func_type = CPCHNL2_FTYPE_LAN_VF; 539 vi.pf_id = CPFL_HOST0_APF; 540 vi.vf_id = repr_id->vf_id; 541 } 542 543 ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response); 544 if (ret != 0) { 545 PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor."); 546 return ret; 547 } 548 549 return 0; 550 } 551 552 int 553 cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) 554 { 555 struct rte_eth_dev *dev; 556 uint32_t iter = 0; 557 const struct cpfl_repr_id *repr_id; 558 const struct cpfl_vport_id *vp_id; 559 struct cpchnl2_get_vport_list_response *vlist_resp; 560 struct cpchnl2_get_vport_info_response vinfo_resp; 561 int ret; 562 563 vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0); 564 if (vlist_resp == NULL) 565 return -ENOMEM; 566 567 rte_spinlock_lock(&adapter->repr_lock); 568 569 while (rte_hash_iterate(adapter->repr_allowlist_hash, 570 (const void **)&repr_id, (void **)&dev, &iter) >= 0) { 571 struct cpfl_vport_info *vi; 572 char name[RTE_ETH_NAME_MAX_LEN]; 573 uint32_t iter_iter = 0; 574 int i; 575 576 /* skip representor already be created */ 577 if (dev != NULL) 578 continue; 579 580 if (repr_id->type == RTE_ETH_REPRESENTOR_VF) 581 snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d", 582 pci_dev->name, 583 repr_id->host_id, 584 repr_id->pf_id, 585 repr_id->vf_id); 586 else 587 snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d", 588 pci_dev->name, 589 repr_id->host_id, 590 repr_id->pf_id); 591 592 /* get vport list for the port representor */ 593 ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp); 594 if (ret != 0) { 595 PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list", 596 repr_id->host_id, repr_id->pf_id, repr_id->vf_id); 597 goto err; 598 } 599 600 if (vlist_resp->nof_vports == 0) { 601 PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name); 602 continue; 603 } 604 605 /* get all vport info for the port representor */ 606 for (i = 0; i < vlist_resp->nof_vports; i++) { 607 ret = cpfl_repr_vport_info_query(adapter, repr_id, 608 &vlist_resp->vports[i], &vinfo_resp); 609 if (ret != 0) { 610 PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info", 611 repr_id->host_id, repr_id->pf_id, repr_id->vf_id, 612 vlist_resp->vports[i].vport_id); 613 goto err; 614 } 615 616 ret = cpfl_repr_vport_map_update(adapter, repr_id, 617 vlist_resp->vports[i].vport_id, &vinfo_resp); 618 if (ret != 0) { 619 PMD_INIT_LOG(ERR, "Failed to update host%d pf%d vf%d vport[%d]'s info to vport_map_hash", 620 repr_id->host_id, repr_id->pf_id, repr_id->vf_id, 621 vlist_resp->vports[i].vport_id); 622 goto err; 623 } 624 } 625 626 /* find the matched vport */ 627 rte_spinlock_lock(&adapter->vport_map_lock); 628 629 while (rte_hash_iterate(adapter->vport_map_hash, 630 (const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) { 631 struct cpfl_repr_param param; 632 633 if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info)) 634 continue; 635 636 param.adapter = adapter; 637 param.repr_id = *repr_id; 638 param.vport_info = vi; 639 640 ret = rte_eth_dev_create(&pci_dev->device, 641 name, 642 sizeof(struct cpfl_repr), 643 NULL, NULL, cpfl_repr_init, 644 ¶m); 645 if (ret != 0) { 646 PMD_INIT_LOG(ERR, "Failed to create representor %s", name); 647 rte_spinlock_unlock(&adapter->vport_map_lock); 648 goto err; 649 } 650 break; 651 } 652 653 rte_spinlock_unlock(&adapter->vport_map_lock); 654 } 655 656 err: 657 rte_spinlock_unlock(&adapter->repr_lock); 658 rte_free(vlist_resp); 659 return ret; 660 } 661