139cf83f1SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause 239cf83f1SHyong Youb Kim * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved. 339cf83f1SHyong Youb Kim */ 439cf83f1SHyong Youb Kim 539cf83f1SHyong Youb Kim #include <stdint.h> 639cf83f1SHyong Youb Kim #include <stdio.h> 739cf83f1SHyong Youb Kim 81f37cb2bSDavid Marchand #include <bus_pci_driver.h> 939cf83f1SHyong Youb Kim #include <rte_common.h> 101acb7f54SDavid Marchand #include <dev_driver.h> 11df96fd0dSBruce Richardson #include <ethdev_driver.h> 12df96fd0dSBruce Richardson #include <ethdev_pci.h> 1339cf83f1SHyong Youb Kim #include <rte_flow_driver.h> 1439cf83f1SHyong Youb Kim #include <rte_kvargs.h> 1539cf83f1SHyong Youb Kim #include <rte_pci.h> 1639cf83f1SHyong Youb Kim #include <rte_string_fns.h> 1739cf83f1SHyong Youb Kim 1839cf83f1SHyong Youb Kim #include "enic_compat.h" 1939cf83f1SHyong Youb Kim #include "enic.h" 2039cf83f1SHyong Youb Kim #include "vnic_dev.h" 2139cf83f1SHyong Youb Kim #include "vnic_enet.h" 2239cf83f1SHyong Youb Kim #include "vnic_intr.h" 2339cf83f1SHyong Youb Kim #include "vnic_cq.h" 2439cf83f1SHyong Youb Kim #include "vnic_wq.h" 2539cf83f1SHyong Youb Kim #include "vnic_rq.h" 2639cf83f1SHyong Youb Kim 27edd08548SHyong Youb Kim static uint16_t enic_vf_recv_pkts(void *rx_queue, 28edd08548SHyong Youb Kim struct rte_mbuf **rx_pkts, 29edd08548SHyong Youb Kim uint16_t nb_pkts) 3039cf83f1SHyong Youb Kim { 31edd08548SHyong Youb Kim return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts); 3239cf83f1SHyong Youb Kim } 3339cf83f1SHyong Youb Kim 34edd08548SHyong Youb Kim static uint16_t enic_vf_xmit_pkts(void *tx_queue, 35edd08548SHyong Youb Kim struct rte_mbuf **tx_pkts, 36edd08548SHyong Youb Kim uint16_t nb_pkts) 3739cf83f1SHyong Youb Kim { 38edd08548SHyong Youb Kim return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts); 3939cf83f1SHyong Youb Kim } 4039cf83f1SHyong Youb Kim 41edd08548SHyong Youb Kim static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 42edd08548SHyong Youb Kim uint16_t queue_idx, 43edd08548SHyong Youb Kim uint16_t nb_desc, 44edd08548SHyong Youb Kim unsigned int socket_id, 45edd08548SHyong Youb Kim const struct rte_eth_txconf *tx_conf) 4639cf83f1SHyong Youb Kim { 47edd08548SHyong Youb Kim struct enic_vf_representor *vf; 48edd08548SHyong Youb Kim struct vnic_wq *wq; 49edd08548SHyong Youb Kim struct enic *pf; 50edd08548SHyong Youb Kim int err; 51edd08548SHyong Youb Kim 5239cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 5339cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5439cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 55edd08548SHyong Youb Kim /* Only one queue now */ 56edd08548SHyong Youb Kim if (queue_idx != 0) 57edd08548SHyong Youb Kim return -EINVAL; 58edd08548SHyong Youb Kim vf = eth_dev->data->dev_private; 59edd08548SHyong Youb Kim pf = vf->pf; 60edd08548SHyong Youb Kim wq = &pf->wq[vf->pf_wq_idx]; 61edd08548SHyong Youb Kim wq->offloads = tx_conf->offloads | 62edd08548SHyong Youb Kim eth_dev->data->dev_conf.txmode.offloads; 63edd08548SHyong Youb Kim eth_dev->data->tx_queues[0] = (void *)wq; 64edd08548SHyong Youb Kim /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */ 65edd08548SHyong Youb Kim err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc); 66edd08548SHyong Youb Kim if (err) { 67*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in allocating wq"); 68edd08548SHyong Youb Kim return err; 69edd08548SHyong Youb Kim } 7039cf83f1SHyong Youb Kim return 0; 7139cf83f1SHyong Youb Kim } 7239cf83f1SHyong Youb Kim 737483341aSXueming Li static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 7439cf83f1SHyong Youb Kim { 757483341aSXueming Li void *txq = dev->data->tx_queues[qid]; 767483341aSXueming Li 7739cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 7839cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 7939cf83f1SHyong Youb Kim return; 80edd08548SHyong Youb Kim enic_free_wq(txq); 8139cf83f1SHyong Youb Kim } 8239cf83f1SHyong Youb Kim 83edd08548SHyong Youb Kim static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 84edd08548SHyong Youb Kim uint16_t queue_idx, 85edd08548SHyong Youb Kim uint16_t nb_desc, 86edd08548SHyong Youb Kim unsigned int socket_id, 87edd08548SHyong Youb Kim const struct rte_eth_rxconf *rx_conf, 88edd08548SHyong Youb Kim struct rte_mempool *mp) 8939cf83f1SHyong Youb Kim { 90edd08548SHyong Youb Kim struct enic_vf_representor *vf; 91edd08548SHyong Youb Kim struct enic *pf; 92edd08548SHyong Youb Kim int ret; 93edd08548SHyong Youb Kim 9439cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 9539cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 9639cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 97edd08548SHyong Youb Kim /* Only 1 queue now */ 98edd08548SHyong Youb Kim if (queue_idx != 0) 99edd08548SHyong Youb Kim return -EINVAL; 100edd08548SHyong Youb Kim vf = eth_dev->data->dev_private; 101edd08548SHyong Youb Kim pf = vf->pf; 102edd08548SHyong Youb Kim eth_dev->data->rx_queues[queue_idx] = 103edd08548SHyong Youb Kim (void *)&pf->rq[vf->pf_rq_sop_idx]; 104edd08548SHyong Youb Kim ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc, 105edd08548SHyong Youb Kim rx_conf->rx_free_thresh); 106edd08548SHyong Youb Kim if (ret) { 107*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in allocating rq"); 108edd08548SHyong Youb Kim return ret; 109edd08548SHyong Youb Kim } 11039cf83f1SHyong Youb Kim return 0; 11139cf83f1SHyong Youb Kim } 11239cf83f1SHyong Youb Kim 1137483341aSXueming Li static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 11439cf83f1SHyong Youb Kim { 1157483341aSXueming Li void *rxq = dev->data->rx_queues[qid]; 1167483341aSXueming Li 11739cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 11839cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 11939cf83f1SHyong Youb Kim return; 120edd08548SHyong Youb Kim enic_free_rq(rxq); 12139cf83f1SHyong Youb Kim } 12239cf83f1SHyong Youb Kim 12339cf83f1SHyong Youb Kim static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused) 12439cf83f1SHyong Youb Kim { 12539cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 12639cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 12739cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 12839cf83f1SHyong Youb Kim return 0; 12939cf83f1SHyong Youb Kim } 13039cf83f1SHyong Youb Kim 131a93cf169SHyong Youb Kim static int 132a93cf169SHyong Youb Kim setup_rep_vf_fwd(struct enic_vf_representor *vf) 133a93cf169SHyong Youb Kim { 134a93cf169SHyong Youb Kim int ret; 135a93cf169SHyong Youb Kim 136a93cf169SHyong Youb Kim ENICPMD_FUNC_TRACE(); 137a93cf169SHyong Youb Kim /* Representor -> VF rule 138a93cf169SHyong Youb Kim * Egress packets from this representor are on the representor's WQ. 139a93cf169SHyong Youb Kim * So, loop back that WQ to VF. 140a93cf169SHyong Youb Kim */ 141a93cf169SHyong Youb Kim ret = enic_fm_add_rep2vf_flow(vf); 142a93cf169SHyong Youb Kim if (ret) { 143a93cf169SHyong Youb Kim ENICPMD_LOG(ERR, "Cannot create representor->VF flow"); 144a93cf169SHyong Youb Kim return ret; 145a93cf169SHyong Youb Kim } 146a93cf169SHyong Youb Kim /* VF -> representor rule 147a93cf169SHyong Youb Kim * Packets from VF loop back to the representor, unless they match 148a93cf169SHyong Youb Kim * user-added flows. 149a93cf169SHyong Youb Kim */ 150a93cf169SHyong Youb Kim ret = enic_fm_add_vf2rep_flow(vf); 151a93cf169SHyong Youb Kim if (ret) { 152a93cf169SHyong Youb Kim ENICPMD_LOG(ERR, "Cannot create VF->representor flow"); 153a93cf169SHyong Youb Kim return ret; 154a93cf169SHyong Youb Kim } 155a93cf169SHyong Youb Kim return 0; 156a93cf169SHyong Youb Kim } 157a93cf169SHyong Youb Kim 15839cf83f1SHyong Youb Kim static int enic_vf_dev_start(struct rte_eth_dev *eth_dev) 15939cf83f1SHyong Youb Kim { 16039cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 161edd08548SHyong Youb Kim struct vnic_rq *data_rq; 162edd08548SHyong Youb Kim int index, cq_idx; 163edd08548SHyong Youb Kim struct enic *pf; 16439cf83f1SHyong Youb Kim int ret; 16539cf83f1SHyong Youb Kim 16639cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 16739cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 16839cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 16939cf83f1SHyong Youb Kim 17039cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 171edd08548SHyong Youb Kim pf = vf->pf; 172a93cf169SHyong Youb Kim /* Get representor flowman for flow API and representor path */ 173a93cf169SHyong Youb Kim ret = enic_fm_init(&vf->enic); 174a93cf169SHyong Youb Kim if (ret) 175a93cf169SHyong Youb Kim return ret; 176a93cf169SHyong Youb Kim /* Set up implicit flow rules to forward between representor and VF */ 177a93cf169SHyong Youb Kim ret = setup_rep_vf_fwd(vf); 178a93cf169SHyong Youb Kim if (ret) { 179a93cf169SHyong Youb Kim ENICPMD_LOG(ERR, "Cannot set up representor-VF flows"); 180a93cf169SHyong Youb Kim return ret; 181a93cf169SHyong Youb Kim } 18239cf83f1SHyong Youb Kim /* Remove all packet filters so no ingress packets go to VF. 18339cf83f1SHyong Youb Kim * When PF enables switchdev, it will ensure packet filters 18439cf83f1SHyong Youb Kim * are removed. So, this is not technically needed. 18539cf83f1SHyong Youb Kim */ 18639cf83f1SHyong Youb Kim ENICPMD_LOG(DEBUG, "Clear packet filters"); 18739cf83f1SHyong Youb Kim ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0); 18839cf83f1SHyong Youb Kim if (ret) { 18939cf83f1SHyong Youb Kim ENICPMD_LOG(ERR, "Cannot clear packet filters"); 19039cf83f1SHyong Youb Kim return ret; 19139cf83f1SHyong Youb Kim } 192edd08548SHyong Youb Kim 193edd08548SHyong Youb Kim /* Start WQ: see enic_init_vnic_resources */ 194edd08548SHyong Youb Kim index = vf->pf_wq_idx; 195edd08548SHyong Youb Kim cq_idx = vf->pf_wq_cq_idx; 196edd08548SHyong Youb Kim vnic_wq_init(&pf->wq[index], cq_idx, 1, 0); 197edd08548SHyong Youb Kim vnic_cq_init(&pf->cq[cq_idx], 198edd08548SHyong Youb Kim 0 /* flow_control_enable */, 199edd08548SHyong Youb Kim 1 /* color_enable */, 200edd08548SHyong Youb Kim 0 /* cq_head */, 201edd08548SHyong Youb Kim 0 /* cq_tail */, 202edd08548SHyong Youb Kim 1 /* cq_tail_color */, 203edd08548SHyong Youb Kim 0 /* interrupt_enable */, 204edd08548SHyong Youb Kim 0 /* cq_entry_enable */, 205edd08548SHyong Youb Kim 1 /* cq_message_enable */, 206edd08548SHyong Youb Kim 0 /* interrupt offset */, 207edd08548SHyong Youb Kim (uint64_t)pf->wq[index].cqmsg_rz->iova); 208edd08548SHyong Youb Kim /* enic_start_wq */ 209edd08548SHyong Youb Kim vnic_wq_enable(&pf->wq[index]); 210edd08548SHyong Youb Kim eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; 211edd08548SHyong Youb Kim 212edd08548SHyong Youb Kim /* Start RQ: see enic_init_vnic_resources */ 213edd08548SHyong Youb Kim index = vf->pf_rq_sop_idx; 214edd08548SHyong Youb Kim cq_idx = enic_cq_rq(vf->pf, index); 215edd08548SHyong Youb Kim vnic_rq_init(&pf->rq[index], cq_idx, 1, 0); 216edd08548SHyong Youb Kim data_rq = &pf->rq[vf->pf_rq_data_idx]; 217edd08548SHyong Youb Kim if (data_rq->in_use) 218edd08548SHyong Youb Kim vnic_rq_init(data_rq, cq_idx, 1, 0); 219edd08548SHyong Youb Kim vnic_cq_init(&pf->cq[cq_idx], 220edd08548SHyong Youb Kim 0 /* flow_control_enable */, 221edd08548SHyong Youb Kim 1 /* color_enable */, 222edd08548SHyong Youb Kim 0 /* cq_head */, 223edd08548SHyong Youb Kim 0 /* cq_tail */, 224edd08548SHyong Youb Kim 1 /* cq_tail_color */, 225edd08548SHyong Youb Kim 0, 226edd08548SHyong Youb Kim 1 /* cq_entry_enable */, 227edd08548SHyong Youb Kim 0 /* cq_message_enable */, 228edd08548SHyong Youb Kim 0, 229edd08548SHyong Youb Kim 0 /* cq_message_addr */); 230edd08548SHyong Youb Kim /* enic_enable */ 231edd08548SHyong Youb Kim ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]); 232edd08548SHyong Youb Kim if (ret) { 233*f665790aSDavid Marchand ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs"); 234edd08548SHyong Youb Kim return ret; 235edd08548SHyong Youb Kim } 236edd08548SHyong Youb Kim ret = enic_alloc_rx_queue_mbufs(pf, data_rq); 237edd08548SHyong Youb Kim if (ret) { 238edd08548SHyong Youb Kim /* Release the allocated mbufs for the sop rq*/ 239edd08548SHyong Youb Kim enic_rxmbuf_queue_release(pf, &pf->rq[index]); 240*f665790aSDavid Marchand ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs"); 241edd08548SHyong Youb Kim return ret; 242edd08548SHyong Youb Kim } 243edd08548SHyong Youb Kim enic_start_rq(pf, vf->pf_rq_sop_idx); 244edd08548SHyong Youb Kim eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; 245edd08548SHyong Youb Kim eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; 24639cf83f1SHyong Youb Kim return 0; 24739cf83f1SHyong Youb Kim } 24839cf83f1SHyong Youb Kim 24962024eb8SIvan Ilchenko static int enic_vf_dev_stop(struct rte_eth_dev *eth_dev) 25039cf83f1SHyong Youb Kim { 251edd08548SHyong Youb Kim struct enic_vf_representor *vf; 252edd08548SHyong Youb Kim struct vnic_rq *rq; 253edd08548SHyong Youb Kim struct enic *pf; 254edd08548SHyong Youb Kim 25539cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 25639cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 25762024eb8SIvan Ilchenko return 0; 258edd08548SHyong Youb Kim /* Undo dev_start. Disable/clean WQ */ 259edd08548SHyong Youb Kim vf = eth_dev->data->dev_private; 260edd08548SHyong Youb Kim pf = vf->pf; 261edd08548SHyong Youb Kim vnic_wq_disable(&pf->wq[vf->pf_wq_idx]); 262edd08548SHyong Youb Kim vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf); 263edd08548SHyong Youb Kim vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]); 264edd08548SHyong Youb Kim /* Disable/clean RQ */ 265edd08548SHyong Youb Kim rq = &pf->rq[vf->pf_rq_sop_idx]; 266edd08548SHyong Youb Kim vnic_rq_disable(rq); 267edd08548SHyong Youb Kim vnic_rq_clean(rq, enic_free_rq_buf); 268edd08548SHyong Youb Kim rq = &pf->rq[vf->pf_rq_data_idx]; 269edd08548SHyong Youb Kim if (rq->in_use) { 270edd08548SHyong Youb Kim vnic_rq_disable(rq); 271edd08548SHyong Youb Kim vnic_rq_clean(rq, enic_free_rq_buf); 272edd08548SHyong Youb Kim } 273edd08548SHyong Youb Kim vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]); 274edd08548SHyong Youb Kim eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED; 275edd08548SHyong Youb Kim eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED; 276a93cf169SHyong Youb Kim /* Clean up representor flowman */ 277a93cf169SHyong Youb Kim enic_fm_destroy(&vf->enic); 27862024eb8SIvan Ilchenko 27962024eb8SIvan Ilchenko return 0; 28039cf83f1SHyong Youb Kim } 28139cf83f1SHyong Youb Kim 28239cf83f1SHyong Youb Kim /* 28339cf83f1SHyong Youb Kim * "close" is no-op for now and solely exists so that rte_eth_dev_close() 28439cf83f1SHyong Youb Kim * can finish its own cleanup without errors. 28539cf83f1SHyong Youb Kim */ 286b142387bSThomas Monjalon static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused) 28739cf83f1SHyong Youb Kim { 28839cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 28939cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 290b142387bSThomas Monjalon return 0; 291b142387bSThomas Monjalon return 0; 29239cf83f1SHyong Youb Kim } 29339cf83f1SHyong Youb Kim 294a93cf169SHyong Youb Kim static int 295a93cf169SHyong Youb Kim adjust_flow_attr(const struct rte_flow_attr *attrs, 296a93cf169SHyong Youb Kim struct rte_flow_attr *vf_attrs, 297a93cf169SHyong Youb Kim struct rte_flow_error *error) 298a93cf169SHyong Youb Kim { 299a93cf169SHyong Youb Kim if (!attrs) { 300a93cf169SHyong Youb Kim return rte_flow_error_set(error, EINVAL, 301a93cf169SHyong Youb Kim RTE_FLOW_ERROR_TYPE_ATTR, 302a93cf169SHyong Youb Kim NULL, "no attribute specified"); 303a93cf169SHyong Youb Kim } 304a93cf169SHyong Youb Kim /* 305a93cf169SHyong Youb Kim * Swap ingress and egress as the firmware view of direction 306a93cf169SHyong Youb Kim * is the opposite of the representor. 307a93cf169SHyong Youb Kim */ 308a93cf169SHyong Youb Kim *vf_attrs = *attrs; 309a93cf169SHyong Youb Kim if (attrs->ingress && !attrs->egress) { 310a93cf169SHyong Youb Kim vf_attrs->ingress = 0; 311a93cf169SHyong Youb Kim vf_attrs->egress = 1; 312a93cf169SHyong Youb Kim return 0; 313a93cf169SHyong Youb Kim } 314a93cf169SHyong Youb Kim return rte_flow_error_set(error, ENOTSUP, 315a93cf169SHyong Youb Kim RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, 316a93cf169SHyong Youb Kim "representor only supports ingress"); 317a93cf169SHyong Youb Kim } 318a93cf169SHyong Youb Kim 319a93cf169SHyong Youb Kim static int 320a93cf169SHyong Youb Kim enic_vf_flow_validate(struct rte_eth_dev *dev, 321a93cf169SHyong Youb Kim const struct rte_flow_attr *attrs, 322a93cf169SHyong Youb Kim const struct rte_flow_item pattern[], 323a93cf169SHyong Youb Kim const struct rte_flow_action actions[], 324a93cf169SHyong Youb Kim struct rte_flow_error *error) 325a93cf169SHyong Youb Kim { 326a93cf169SHyong Youb Kim struct rte_flow_attr vf_attrs; 327a93cf169SHyong Youb Kim int ret; 328a93cf169SHyong Youb Kim 329a93cf169SHyong Youb Kim ret = adjust_flow_attr(attrs, &vf_attrs, error); 330a93cf169SHyong Youb Kim if (ret) 331a93cf169SHyong Youb Kim return ret; 332a93cf169SHyong Youb Kim attrs = &vf_attrs; 333a93cf169SHyong Youb Kim return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error); 334a93cf169SHyong Youb Kim } 335a93cf169SHyong Youb Kim 336a93cf169SHyong Youb Kim static struct rte_flow * 337a93cf169SHyong Youb Kim enic_vf_flow_create(struct rte_eth_dev *dev, 338a93cf169SHyong Youb Kim const struct rte_flow_attr *attrs, 339a93cf169SHyong Youb Kim const struct rte_flow_item pattern[], 340a93cf169SHyong Youb Kim const struct rte_flow_action actions[], 341a93cf169SHyong Youb Kim struct rte_flow_error *error) 342a93cf169SHyong Youb Kim { 343a93cf169SHyong Youb Kim struct rte_flow_attr vf_attrs; 344a93cf169SHyong Youb Kim 345a93cf169SHyong Youb Kim if (adjust_flow_attr(attrs, &vf_attrs, error)) 346a93cf169SHyong Youb Kim return NULL; 347a93cf169SHyong Youb Kim attrs = &vf_attrs; 348a93cf169SHyong Youb Kim return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error); 349a93cf169SHyong Youb Kim } 350a93cf169SHyong Youb Kim 351a93cf169SHyong Youb Kim static int 352a93cf169SHyong Youb Kim enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, 353a93cf169SHyong Youb Kim struct rte_flow_error *error) 354a93cf169SHyong Youb Kim { 355a93cf169SHyong Youb Kim return enic_fm_flow_ops.destroy(dev, flow, error); 356a93cf169SHyong Youb Kim } 357a93cf169SHyong Youb Kim 358a93cf169SHyong Youb Kim static int 359a93cf169SHyong Youb Kim enic_vf_flow_query(struct rte_eth_dev *dev, 360a93cf169SHyong Youb Kim struct rte_flow *flow, 361a93cf169SHyong Youb Kim const struct rte_flow_action *actions, 362a93cf169SHyong Youb Kim void *data, 363a93cf169SHyong Youb Kim struct rte_flow_error *error) 364a93cf169SHyong Youb Kim { 365a93cf169SHyong Youb Kim return enic_fm_flow_ops.query(dev, flow, actions, data, error); 366a93cf169SHyong Youb Kim } 367a93cf169SHyong Youb Kim 368a93cf169SHyong Youb Kim static int 369a93cf169SHyong Youb Kim enic_vf_flow_flush(struct rte_eth_dev *dev, 370a93cf169SHyong Youb Kim struct rte_flow_error *error) 371a93cf169SHyong Youb Kim { 372a93cf169SHyong Youb Kim return enic_fm_flow_ops.flush(dev, error); 373a93cf169SHyong Youb Kim } 374a93cf169SHyong Youb Kim 375a93cf169SHyong Youb Kim static const struct rte_flow_ops enic_vf_flow_ops = { 376a93cf169SHyong Youb Kim .validate = enic_vf_flow_validate, 377a93cf169SHyong Youb Kim .create = enic_vf_flow_create, 378a93cf169SHyong Youb Kim .destroy = enic_vf_flow_destroy, 379a93cf169SHyong Youb Kim .flush = enic_vf_flow_flush, 380a93cf169SHyong Youb Kim .query = enic_vf_flow_query, 381a93cf169SHyong Youb Kim }; 382a93cf169SHyong Youb Kim 383a93cf169SHyong Youb Kim static int 384fb7ad441SThomas Monjalon enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev, 385fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 386a93cf169SHyong Youb Kim { 387a93cf169SHyong Youb Kim struct enic_vf_representor *vf; 388a93cf169SHyong Youb Kim 389a93cf169SHyong Youb Kim ENICPMD_FUNC_TRACE(); 390a93cf169SHyong Youb Kim vf = eth_dev->data->dev_private; 391fb7ad441SThomas Monjalon if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) { 392fb7ad441SThomas Monjalon ENICPMD_LOG(WARNING, 393fb7ad441SThomas Monjalon "VF representors require flowman support for rte_flow API"); 394a93cf169SHyong Youb Kim return -EINVAL; 395a93cf169SHyong Youb Kim } 396fb7ad441SThomas Monjalon 397fb7ad441SThomas Monjalon *ops = &enic_vf_flow_ops; 398fb7ad441SThomas Monjalon return 0; 399a93cf169SHyong Youb Kim } 400a93cf169SHyong Youb Kim 40139cf83f1SHyong Youb Kim static int enic_vf_link_update(struct rte_eth_dev *eth_dev, 40239cf83f1SHyong Youb Kim int wait_to_complete __rte_unused) 40339cf83f1SHyong Youb Kim { 40439cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 40539cf83f1SHyong Youb Kim struct rte_eth_link link; 40639cf83f1SHyong Youb Kim struct enic *pf; 40739cf83f1SHyong Youb Kim 40839cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 40939cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 41039cf83f1SHyong Youb Kim pf = vf->pf; 41139cf83f1SHyong Youb Kim /* 41239cf83f1SHyong Youb Kim * Link status and speed are same as PF. Update PF status and then 41339cf83f1SHyong Youb Kim * copy it to VF. 41439cf83f1SHyong Youb Kim */ 41539cf83f1SHyong Youb Kim enic_link_update(pf->rte_dev); 41639cf83f1SHyong Youb Kim rte_eth_linkstatus_get(pf->rte_dev, &link); 41739cf83f1SHyong Youb Kim rte_eth_linkstatus_set(eth_dev, &link); 41839cf83f1SHyong Youb Kim return 0; 41939cf83f1SHyong Youb Kim } 42039cf83f1SHyong Youb Kim 42139cf83f1SHyong Youb Kim static int enic_vf_stats_get(struct rte_eth_dev *eth_dev, 42239cf83f1SHyong Youb Kim struct rte_eth_stats *stats) 42339cf83f1SHyong Youb Kim { 42439cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 42539cf83f1SHyong Youb Kim struct vnic_stats *vs; 42639cf83f1SHyong Youb Kim int err; 42739cf83f1SHyong Youb Kim 42839cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 42939cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 43039cf83f1SHyong Youb Kim /* Get VF stats via PF */ 43139cf83f1SHyong Youb Kim err = vnic_dev_stats_dump(vf->enic.vdev, &vs); 43239cf83f1SHyong Youb Kim if (err) { 433*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in getting stats"); 43439cf83f1SHyong Youb Kim return err; 43539cf83f1SHyong Youb Kim } 43639cf83f1SHyong Youb Kim stats->ipackets = vs->rx.rx_frames_ok; 43739cf83f1SHyong Youb Kim stats->opackets = vs->tx.tx_frames_ok; 43839cf83f1SHyong Youb Kim stats->ibytes = vs->rx.rx_bytes_ok; 43939cf83f1SHyong Youb Kim stats->obytes = vs->tx.tx_bytes_ok; 44039cf83f1SHyong Youb Kim stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop; 44139cf83f1SHyong Youb Kim stats->oerrors = vs->tx.tx_errors; 44239cf83f1SHyong Youb Kim stats->imissed = vs->rx.rx_no_bufs; 44339cf83f1SHyong Youb Kim return 0; 44439cf83f1SHyong Youb Kim } 44539cf83f1SHyong Youb Kim 44639cf83f1SHyong Youb Kim static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev) 44739cf83f1SHyong Youb Kim { 44839cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 44939cf83f1SHyong Youb Kim int err; 45039cf83f1SHyong Youb Kim 45139cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 45239cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 45339cf83f1SHyong Youb Kim /* Ask PF to clear VF stats */ 45439cf83f1SHyong Youb Kim err = vnic_dev_stats_clear(vf->enic.vdev); 45539cf83f1SHyong Youb Kim if (err) 456*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in clearing stats"); 45739cf83f1SHyong Youb Kim return err; 45839cf83f1SHyong Youb Kim } 45939cf83f1SHyong Youb Kim 46039cf83f1SHyong Youb Kim static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev, 46139cf83f1SHyong Youb Kim struct rte_eth_dev_info *device_info) 46239cf83f1SHyong Youb Kim { 46339cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 46439cf83f1SHyong Youb Kim struct enic *pf; 46539cf83f1SHyong Youb Kim 46639cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 46739cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 46839cf83f1SHyong Youb Kim pf = vf->pf; 46939cf83f1SHyong Youb Kim device_info->max_rx_queues = eth_dev->data->nb_rx_queues; 47039cf83f1SHyong Youb Kim device_info->max_tx_queues = eth_dev->data->nb_tx_queues; 47139cf83f1SHyong Youb Kim device_info->min_rx_bufsize = ENIC_MIN_MTU; 47239cf83f1SHyong Youb Kim /* Max packet size is same as PF */ 47339cf83f1SHyong Youb Kim device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu); 47439cf83f1SHyong Youb Kim device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS; 47539cf83f1SHyong Youb Kim /* No offload capa, RSS, etc. until Tx/Rx handlers are added */ 47639cf83f1SHyong Youb Kim device_info->rx_offload_capa = 0; 47739cf83f1SHyong Youb Kim device_info->tx_offload_capa = 0; 47839cf83f1SHyong Youb Kim device_info->switch_info.name = pf->rte_dev->device->name; 47939cf83f1SHyong Youb Kim device_info->switch_info.domain_id = vf->switch_domain_id; 48039cf83f1SHyong Youb Kim device_info->switch_info.port_id = vf->vf_id; 48139cf83f1SHyong Youb Kim return 0; 48239cf83f1SHyong Youb Kim } 48339cf83f1SHyong Youb Kim 48439cf83f1SHyong Youb Kim static void set_vf_packet_filter(struct enic_vf_representor *vf) 48539cf83f1SHyong Youb Kim { 48639cf83f1SHyong Youb Kim /* switchdev: packet filters are ignored */ 48739cf83f1SHyong Youb Kim if (vf->enic.switchdev_mode) 48839cf83f1SHyong Youb Kim return; 48939cf83f1SHyong Youb Kim /* Ask PF to apply filters on VF */ 49039cf83f1SHyong Youb Kim vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */, 49139cf83f1SHyong Youb Kim 1 /* bcast */, vf->promisc, vf->allmulti); 49239cf83f1SHyong Youb Kim } 49339cf83f1SHyong Youb Kim 49439cf83f1SHyong Youb Kim static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev) 49539cf83f1SHyong Youb Kim { 49639cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 49739cf83f1SHyong Youb Kim 49839cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 49939cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 50039cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 50139cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 50239cf83f1SHyong Youb Kim vf->promisc = 1; 50339cf83f1SHyong Youb Kim set_vf_packet_filter(vf); 50439cf83f1SHyong Youb Kim return 0; 50539cf83f1SHyong Youb Kim } 50639cf83f1SHyong Youb Kim 50739cf83f1SHyong Youb Kim static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev) 50839cf83f1SHyong Youb Kim { 50939cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 51039cf83f1SHyong Youb Kim 51139cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 51239cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 51339cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 51439cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 51539cf83f1SHyong Youb Kim vf->promisc = 0; 51639cf83f1SHyong Youb Kim set_vf_packet_filter(vf); 51739cf83f1SHyong Youb Kim return 0; 51839cf83f1SHyong Youb Kim } 51939cf83f1SHyong Youb Kim 52039cf83f1SHyong Youb Kim static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev) 52139cf83f1SHyong Youb Kim { 52239cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 52339cf83f1SHyong Youb Kim 52439cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 52539cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 52639cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 52739cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 52839cf83f1SHyong Youb Kim vf->allmulti = 1; 52939cf83f1SHyong Youb Kim set_vf_packet_filter(vf); 53039cf83f1SHyong Youb Kim return 0; 53139cf83f1SHyong Youb Kim } 53239cf83f1SHyong Youb Kim 53339cf83f1SHyong Youb Kim static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev) 53439cf83f1SHyong Youb Kim { 53539cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 53639cf83f1SHyong Youb Kim 53739cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 53839cf83f1SHyong Youb Kim if (rte_eal_process_type() != RTE_PROC_PRIMARY) 53939cf83f1SHyong Youb Kim return -E_RTE_SECONDARY; 54039cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 54139cf83f1SHyong Youb Kim vf->allmulti = 0; 54239cf83f1SHyong Youb Kim set_vf_packet_filter(vf); 54339cf83f1SHyong Youb Kim return 0; 54439cf83f1SHyong Youb Kim } 54539cf83f1SHyong Youb Kim 54639cf83f1SHyong Youb Kim /* 54739cf83f1SHyong Youb Kim * A minimal set of handlers. 54839cf83f1SHyong Youb Kim * The representor can get/set a small set of VF settings via "proxy" devcmd. 54939cf83f1SHyong Youb Kim * With proxy devcmd, the PF driver basically tells the VIC firmware to 55039cf83f1SHyong Youb Kim * "perform this devcmd on that VF". 55139cf83f1SHyong Youb Kim */ 55239cf83f1SHyong Youb Kim static const struct eth_dev_ops enic_vf_representor_dev_ops = { 55339cf83f1SHyong Youb Kim .allmulticast_enable = enic_vf_allmulticast_enable, 55439cf83f1SHyong Youb Kim .allmulticast_disable = enic_vf_allmulticast_disable, 55539cf83f1SHyong Youb Kim .dev_configure = enic_vf_dev_configure, 55639cf83f1SHyong Youb Kim .dev_infos_get = enic_vf_dev_infos_get, 55739cf83f1SHyong Youb Kim .dev_start = enic_vf_dev_start, 55839cf83f1SHyong Youb Kim .dev_stop = enic_vf_dev_stop, 55939cf83f1SHyong Youb Kim .dev_close = enic_vf_dev_close, 560fb7ad441SThomas Monjalon .flow_ops_get = enic_vf_flow_ops_get, 56139cf83f1SHyong Youb Kim .link_update = enic_vf_link_update, 56239cf83f1SHyong Youb Kim .promiscuous_enable = enic_vf_promiscuous_enable, 56339cf83f1SHyong Youb Kim .promiscuous_disable = enic_vf_promiscuous_disable, 56439cf83f1SHyong Youb Kim .stats_get = enic_vf_stats_get, 56539cf83f1SHyong Youb Kim .stats_reset = enic_vf_stats_reset, 56639cf83f1SHyong Youb Kim .rx_queue_setup = enic_vf_dev_rx_queue_setup, 56739cf83f1SHyong Youb Kim .rx_queue_release = enic_vf_dev_rx_queue_release, 56839cf83f1SHyong Youb Kim .tx_queue_setup = enic_vf_dev_tx_queue_setup, 56939cf83f1SHyong Youb Kim .tx_queue_release = enic_vf_dev_tx_queue_release, 57039cf83f1SHyong Youb Kim }; 57139cf83f1SHyong Youb Kim 57239cf83f1SHyong Youb Kim static int get_vf_config(struct enic_vf_representor *vf) 57339cf83f1SHyong Youb Kim { 57439cf83f1SHyong Youb Kim struct vnic_enet_config *c; 57539cf83f1SHyong Youb Kim struct enic *pf; 57639cf83f1SHyong Youb Kim int switch_mtu; 57739cf83f1SHyong Youb Kim int err; 57839cf83f1SHyong Youb Kim 57939cf83f1SHyong Youb Kim c = &vf->config; 58039cf83f1SHyong Youb Kim pf = vf->pf; 58139cf83f1SHyong Youb Kim /* VF MAC */ 58239cf83f1SHyong Youb Kim err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes); 58339cf83f1SHyong Youb Kim if (err) { 584*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in getting MAC address"); 58539cf83f1SHyong Youb Kim return err; 58639cf83f1SHyong Youb Kim } 58739cf83f1SHyong Youb Kim rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs); 58839cf83f1SHyong Youb Kim 58939cf83f1SHyong Youb Kim /* VF MTU per its vNIC setting */ 59039cf83f1SHyong Youb Kim err = vnic_dev_spec(vf->enic.vdev, 59139cf83f1SHyong Youb Kim offsetof(struct vnic_enet_config, mtu), 59239cf83f1SHyong Youb Kim sizeof(c->mtu), &c->mtu); 59339cf83f1SHyong Youb Kim if (err) { 594*f665790aSDavid Marchand ENICPMD_LOG(ERR, "error in getting MTU"); 59539cf83f1SHyong Youb Kim return err; 59639cf83f1SHyong Youb Kim } 59739cf83f1SHyong Youb Kim /* 59839cf83f1SHyong Youb Kim * Blade switch (fabric interconnect) port's MTU. Assume the kernel 59939cf83f1SHyong Youb Kim * enic driver runs on VF. That driver automatically adjusts its MTU 60039cf83f1SHyong Youb Kim * according to the switch MTU. 60139cf83f1SHyong Youb Kim */ 60239cf83f1SHyong Youb Kim switch_mtu = vnic_dev_mtu(pf->vdev); 60339cf83f1SHyong Youb Kim vf->eth_dev->data->mtu = c->mtu; 60439cf83f1SHyong Youb Kim if (switch_mtu > c->mtu) 60539cf83f1SHyong Youb Kim vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu); 60639cf83f1SHyong Youb Kim return 0; 60739cf83f1SHyong Youb Kim } 60839cf83f1SHyong Youb Kim 60939cf83f1SHyong Youb Kim int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params) 61039cf83f1SHyong Youb Kim { 61139cf83f1SHyong Youb Kim struct enic_vf_representor *vf, *params; 61239cf83f1SHyong Youb Kim struct rte_pci_device *pdev; 61339cf83f1SHyong Youb Kim struct enic *pf, *vf_enic; 61439cf83f1SHyong Youb Kim struct rte_pci_addr *addr; 61539cf83f1SHyong Youb Kim int ret; 61639cf83f1SHyong Youb Kim 61739cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 61839cf83f1SHyong Youb Kim params = init_params; 61939cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 62039cf83f1SHyong Youb Kim vf->switch_domain_id = params->switch_domain_id; 62139cf83f1SHyong Youb Kim vf->vf_id = params->vf_id; 62239cf83f1SHyong Youb Kim vf->eth_dev = eth_dev; 62339cf83f1SHyong Youb Kim vf->pf = params->pf; 62439cf83f1SHyong Youb Kim vf->allmulti = 1; 62539cf83f1SHyong Youb Kim vf->promisc = 0; 62639cf83f1SHyong Youb Kim pf = vf->pf; 62739cf83f1SHyong Youb Kim vf->enic.switchdev_mode = pf->switchdev_mode; 62839cf83f1SHyong Youb Kim /* Only switchdev is supported now */ 62939cf83f1SHyong Youb Kim RTE_ASSERT(vf->enic.switchdev_mode); 630edd08548SHyong Youb Kim /* Allocate WQ, RQ, CQ for the representor */ 631edd08548SHyong Youb Kim vf->pf_wq_idx = vf_wq_idx(vf); 632edd08548SHyong Youb Kim vf->pf_wq_cq_idx = vf_wq_cq_idx(vf); 633edd08548SHyong Youb Kim vf->pf_rq_sop_idx = vf_rq_sop_idx(vf); 634edd08548SHyong Youb Kim vf->pf_rq_data_idx = vf_rq_data_idx(vf); 635edd08548SHyong Youb Kim /* Remove these assertions once queue allocation has an easy-to-use 636edd08548SHyong Youb Kim * allocator API instead of index number calculations used throughout 637edd08548SHyong Youb Kim * the driver.. 638edd08548SHyong Youb Kim */ 639edd08548SHyong Youb Kim RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx); 640edd08548SHyong Youb Kim RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) == 641edd08548SHyong Youb Kim vf->pf_rq_sop_idx); 642edd08548SHyong Youb Kim /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */ 643edd08548SHyong Youb Kim pf->vf_required_wq++; 644edd08548SHyong Youb Kim pf->vf_required_rq += 2; /* sop and data */ 645edd08548SHyong Youb Kim pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */ 646edd08548SHyong Youb Kim ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u", 647edd08548SHyong Youb Kim vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx, 648edd08548SHyong Youb Kim vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx)); 649edd08548SHyong Youb Kim if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) { 650edd08548SHyong Youb Kim ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)" 651edd08548SHyong Youb Kim " >= number of RQs (%u) in CIMC or UCSM", 652edd08548SHyong Youb Kim pf->conf_cq_count, pf->conf_rq_count); 653edd08548SHyong Youb Kim return -EINVAL; 654edd08548SHyong Youb Kim } 65539cf83f1SHyong Youb Kim 65639cf83f1SHyong Youb Kim /* Check for non-existent VFs */ 65739cf83f1SHyong Youb Kim pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev); 65839cf83f1SHyong Youb Kim if (vf->vf_id >= pdev->max_vfs) { 65939cf83f1SHyong Youb Kim ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u", 66039cf83f1SHyong Youb Kim vf->vf_id, pdev->max_vfs); 66139cf83f1SHyong Youb Kim return -ENODEV; 66239cf83f1SHyong Youb Kim } 66339cf83f1SHyong Youb Kim 66439cf83f1SHyong Youb Kim eth_dev->device->driver = pf->rte_dev->device->driver; 66539cf83f1SHyong Youb Kim eth_dev->dev_ops = &enic_vf_representor_dev_ops; 66664c952ecSAndrew Rybchenko eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 66739cf83f1SHyong Youb Kim eth_dev->data->representor_id = vf->vf_id; 668ff4e52efSViacheslav Galaktionov eth_dev->data->backer_port_id = pf->port_id; 66939cf83f1SHyong Youb Kim eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf", 67039cf83f1SHyong Youb Kim sizeof(struct rte_ether_addr) * 67139cf83f1SHyong Youb Kim ENIC_UNICAST_PERFECT_FILTERS, 0); 67239cf83f1SHyong Youb Kim if (eth_dev->data->mac_addrs == NULL) 67339cf83f1SHyong Youb Kim return -ENOMEM; 67439cf83f1SHyong Youb Kim /* Use 1 RX queue and 1 TX queue for representor path */ 67539cf83f1SHyong Youb Kim eth_dev->data->nb_rx_queues = 1; 67639cf83f1SHyong Youb Kim eth_dev->data->nb_tx_queues = 1; 67739cf83f1SHyong Youb Kim eth_dev->rx_pkt_burst = &enic_vf_recv_pkts; 67839cf83f1SHyong Youb Kim eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts; 67939cf83f1SHyong Youb Kim /* Initial link state copied from PF */ 68039cf83f1SHyong Youb Kim eth_dev->data->dev_link = pf->rte_dev->data->dev_link; 68139cf83f1SHyong Youb Kim /* Representor vdev to perform devcmd */ 68239cf83f1SHyong Youb Kim vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id); 68339cf83f1SHyong Youb Kim if (vf->enic.vdev == NULL) 68439cf83f1SHyong Youb Kim return -ENOMEM; 68539cf83f1SHyong Youb Kim ret = vnic_dev_alloc_stats_mem(vf->enic.vdev); 68639cf83f1SHyong Youb Kim if (ret) 68739cf83f1SHyong Youb Kim return ret; 68839cf83f1SHyong Youb Kim /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */ 68939cf83f1SHyong Youb Kim ret = get_vf_config(vf); 69039cf83f1SHyong Youb Kim if (ret) 69139cf83f1SHyong Youb Kim return ret; 69239cf83f1SHyong Youb Kim 69339cf83f1SHyong Youb Kim /* 69439cf83f1SHyong Youb Kim * Calculate VF BDF. The firmware ensures that PF BDF is always 69539cf83f1SHyong Youb Kim * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on. 69639cf83f1SHyong Youb Kim */ 69739cf83f1SHyong Youb Kim vf->bdf = pdev->addr; 69839cf83f1SHyong Youb Kim vf->bdf.function += vf->vf_id + 1; 69939cf83f1SHyong Youb Kim 70039cf83f1SHyong Youb Kim /* Copy a few fields used by enic_fm_flow */ 70139cf83f1SHyong Youb Kim vf_enic = &vf->enic; 70239cf83f1SHyong Youb Kim vf_enic->switch_domain_id = vf->switch_domain_id; 70339cf83f1SHyong Youb Kim vf_enic->flow_filter_mode = pf->flow_filter_mode; 70439cf83f1SHyong Youb Kim vf_enic->rte_dev = eth_dev; 70539cf83f1SHyong Youb Kim vf_enic->dev_data = eth_dev->data; 70639cf83f1SHyong Youb Kim LIST_INIT(&vf_enic->flows); 70739cf83f1SHyong Youb Kim LIST_INIT(&vf_enic->memzone_list); 70839cf83f1SHyong Youb Kim rte_spinlock_init(&vf_enic->memzone_list_lock); 70939cf83f1SHyong Youb Kim addr = &vf->bdf; 7102fc03b23SThomas Monjalon snprintf(vf_enic->bdf_name, PCI_PRI_STR_SIZE, PCI_PRI_FMT, 71139cf83f1SHyong Youb Kim addr->domain, addr->bus, addr->devid, addr->function); 71239cf83f1SHyong Youb Kim return 0; 71339cf83f1SHyong Youb Kim } 71439cf83f1SHyong Youb Kim 71539cf83f1SHyong Youb Kim int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev) 71639cf83f1SHyong Youb Kim { 71739cf83f1SHyong Youb Kim struct enic_vf_representor *vf; 71839cf83f1SHyong Youb Kim 71939cf83f1SHyong Youb Kim ENICPMD_FUNC_TRACE(); 72039cf83f1SHyong Youb Kim vf = eth_dev->data->dev_private; 72139cf83f1SHyong Youb Kim vnic_dev_unregister(vf->enic.vdev); 72239cf83f1SHyong Youb Kim return 0; 72339cf83f1SHyong Youb Kim } 724