1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Huawei Technologies Co., Ltd 3 */ 4 5 #include <rte_pci.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_mbuf.h> 9 #include <rte_malloc.h> 10 #include <rte_memcpy.h> 11 #include <rte_mempool.h> 12 #include <rte_errno.h> 13 #include <rte_ether.h> 14 15 #include "base/hinic_compat.h" 16 #include "base/hinic_pmd_hwdev.h" 17 #include "base/hinic_pmd_hwif.h" 18 #include "base/hinic_pmd_wq.h" 19 #include "base/hinic_pmd_cfg.h" 20 #include "base/hinic_pmd_mgmt.h" 21 #include "base/hinic_pmd_cmdq.h" 22 #include "base/hinic_pmd_niccfg.h" 23 #include "base/hinic_pmd_nicio.h" 24 #include "base/hinic_pmd_mbox.h" 25 #include "hinic_pmd_ethdev.h" 26 #include "hinic_pmd_tx.h" 27 #include "hinic_pmd_rx.h" 28 29 /* Vendor ID used by Huawei devices */ 30 #define HINIC_HUAWEI_VENDOR_ID 0x19E5 31 32 /* Hinic devices */ 33 #define HINIC_DEV_ID_PRD 0x1822 34 #define HINIC_DEV_ID_VF 0x375E 35 #define HINIC_DEV_ID_VF_HV 0x379E 36 37 /* Mezz card for Blade Server */ 38 #define HINIC_DEV_ID_MEZZ_25GE 0x0210 39 #define HINIC_DEV_ID_MEZZ_100GE 0x0205 40 41 /* 2*25G and 2*100G card */ 42 #define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 43 #define HINIC_DEV_ID_1822_100GE 0x0200 44 45 #define HINIC_SERVICE_MODE_NIC 2 46 47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 48 49 #define DEFAULT_BASE_COS 4 50 #define NR_MAX_COS 8 51 52 #define HINIC_MIN_RX_BUF_SIZE 1024 53 #define HINIC_MAX_UC_MAC_ADDRS 128 54 #define HINIC_MAX_MC_MAC_ADDRS 2048 55 56 #define HINIC_DEFAULT_BURST_SIZE 32 57 #define HINIC_DEFAULT_NB_QUEUES 1 58 #define HINIC_DEFAULT_RING_SIZE 1024 59 #define HINIC_MAX_LRO_SIZE 65536 60 61 /* 62 * vlan_id is a 12 bit number. 63 * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. 64 * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. 65 * The higher 7 bit val specifies VFTA array index. 66 */ 67 #define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) 68 #define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5) 69 70 #define HINIC_VLAN_FILTER_EN (1U << 0) 71 72 /* lro numer limit for one packet */ 73 #define HINIC_LRO_WQE_NUM_DEFAULT 8 74 75 struct hinic_xstats_name_off { 76 char name[RTE_ETH_XSTATS_NAME_SIZE]; 77 u32 offset; 78 }; 79 80 #define HINIC_FUNC_STAT(_stat_item) { \ 81 .name = #_stat_item, \ 82 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 83 } 84 85 #define HINIC_PORT_STAT(_stat_item) { \ 86 .name = #_stat_item, \ 87 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 88 } 89 90 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = { 91 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 92 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 93 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 94 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 95 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 96 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 97 98 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 99 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 100 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 101 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 102 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 103 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 104 105 HINIC_FUNC_STAT(tx_discard_vport), 106 HINIC_FUNC_STAT(rx_discard_vport), 107 HINIC_FUNC_STAT(tx_err_vport), 108 HINIC_FUNC_STAT(rx_err_vport), 109 }; 110 111 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \ 112 sizeof(hinic_vport_stats_strings[0])) 113 114 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = { 115 HINIC_PORT_STAT(mac_rx_total_pkt_num), 116 HINIC_PORT_STAT(mac_rx_total_oct_num), 117 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 118 HINIC_PORT_STAT(mac_rx_bad_oct_num), 119 HINIC_PORT_STAT(mac_rx_good_pkt_num), 120 HINIC_PORT_STAT(mac_rx_good_oct_num), 121 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 122 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 123 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 124 HINIC_PORT_STAT(mac_tx_total_pkt_num), 125 HINIC_PORT_STAT(mac_tx_total_oct_num), 126 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 127 HINIC_PORT_STAT(mac_tx_bad_oct_num), 128 HINIC_PORT_STAT(mac_tx_good_pkt_num), 129 HINIC_PORT_STAT(mac_tx_good_oct_num), 130 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 131 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 132 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 133 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 134 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 135 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 136 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 137 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 138 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 139 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 140 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 141 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 142 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 143 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 144 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 145 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 146 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 147 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 148 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 149 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 150 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 151 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 152 HINIC_PORT_STAT(mac_rx_mac_pause_num), 153 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 154 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 155 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 156 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 157 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 158 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 159 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 160 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 161 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 162 HINIC_PORT_STAT(mac_rx_mac_control_pkt_num), 163 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 164 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 165 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 166 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 167 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 168 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 169 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 170 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 171 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 172 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 173 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 174 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 175 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 176 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 177 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 178 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 179 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 180 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 181 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 182 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 183 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 184 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 185 HINIC_PORT_STAT(mac_trans_jabber_pkt_num), 186 HINIC_PORT_STAT(mac_tx_mac_pause_num), 187 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 188 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 189 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 190 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 191 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 192 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 193 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 194 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 195 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 196 HINIC_PORT_STAT(mac_tx_mac_control_pkt_num), 197 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 198 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 199 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 200 }; 201 202 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \ 203 sizeof(hinic_phyport_stats_strings[0])) 204 205 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = { 206 {"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)}, 207 {"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)}, 208 }; 209 210 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \ 211 sizeof(hinic_rxq_stats_strings[0])) 212 213 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = { 214 {"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)}, 215 {"offload_errors", offsetof(struct hinic_txq_stats, off_errs)}, 216 {"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)}, 217 {"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)}, 218 {"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)}, 219 {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)}, 220 {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)}, 221 }; 222 223 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \ 224 sizeof(hinic_txq_stats_strings[0])) 225 226 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) 227 { 228 if (HINIC_IS_VF(nic_dev->hwdev)) { 229 return (HINIC_VPORT_XSTATS_NUM + 230 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 231 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 232 } else { 233 return (HINIC_VPORT_XSTATS_NUM + 234 HINIC_PHYPORT_XSTATS_NUM + 235 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 236 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 237 } 238 } 239 240 static const struct rte_eth_desc_lim hinic_rx_desc_lim = { 241 .nb_max = HINIC_MAX_QUEUE_DEPTH, 242 .nb_min = HINIC_MIN_QUEUE_DEPTH, 243 .nb_align = HINIC_RXD_ALIGN, 244 }; 245 246 static const struct rte_eth_desc_lim hinic_tx_desc_lim = { 247 .nb_max = HINIC_MAX_QUEUE_DEPTH, 248 .nb_min = HINIC_MIN_QUEUE_DEPTH, 249 .nb_align = HINIC_TXD_ALIGN, 250 }; 251 252 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); 253 254 /** 255 * Interrupt handler triggered by NIC for handling 256 * specific event. 257 * 258 * @param: The address of parameter (struct rte_eth_dev *) regsitered before. 259 */ 260 static void hinic_dev_interrupt_handler(void *param) 261 { 262 struct rte_eth_dev *dev = param; 263 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 264 265 if (!rte_bit_relaxed_get32(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) { 266 PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d", 267 nic_dev->proc_dev_name, dev->data->port_id); 268 return; 269 } 270 271 /* aeq0 msg handler */ 272 hinic_dev_handle_aeq_event(nic_dev->hwdev, param); 273 } 274 275 /** 276 * Ethernet device configuration. 277 * 278 * Prepare the driver for a given number of TX and RX queues, mtu size 279 * and configure RSS. 280 * 281 * @param dev 282 * Pointer to Ethernet device structure. 283 * 284 * @return 285 * 0 on success, negative error value otherwise. 286 */ 287 static int hinic_dev_configure(struct rte_eth_dev *dev) 288 { 289 struct hinic_nic_dev *nic_dev; 290 struct hinic_nic_io *nic_io; 291 int err; 292 293 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 294 nic_io = nic_dev->hwdev->nic_io; 295 296 nic_dev->num_sq = dev->data->nb_tx_queues; 297 nic_dev->num_rq = dev->data->nb_rx_queues; 298 299 nic_io->num_sqs = dev->data->nb_tx_queues; 300 nic_io->num_rqs = dev->data->nb_rx_queues; 301 302 /* queue pair is max_num(sq, rq) */ 303 nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ? 304 nic_dev->num_sq : nic_dev->num_rq; 305 nic_io->num_qps = nic_dev->num_qps; 306 307 if (nic_dev->num_qps > nic_io->max_qps) { 308 PMD_DRV_LOG(ERR, 309 "Queue number out of range, get queue_num:%d, max_queue_num:%d", 310 nic_dev->num_qps, nic_io->max_qps); 311 return -EINVAL; 312 } 313 314 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 315 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 316 317 /* mtu size is 256~9600 */ 318 if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) < 319 HINIC_MIN_FRAME_SIZE || 320 HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) > 321 HINIC_MAX_JUMBO_FRAME_SIZE) { 322 PMD_DRV_LOG(ERR, 323 "Packet length out of range, get packet length:%d, " 324 "expect between %d and %d", 325 HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu), 326 HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE); 327 return -EINVAL; 328 } 329 330 nic_dev->mtu_size = dev->data->dev_conf.rxmode.mtu; 331 332 /* rss template */ 333 err = hinic_config_mq_mode(dev, TRUE); 334 if (err) { 335 PMD_DRV_LOG(ERR, "Config multi-queue failed"); 336 return err; 337 } 338 339 /* init vlan offoad */ 340 err = hinic_vlan_offload_set(dev, 341 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK); 342 if (err) { 343 PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed"); 344 (void)hinic_config_mq_mode(dev, FALSE); 345 return err; 346 } 347 348 /* clear fdir filter flag in function table */ 349 hinic_free_fdir_filter(nic_dev); 350 351 return HINIC_OK; 352 } 353 354 /** 355 * DPDK callback to create the receive queue. 356 * 357 * @param dev 358 * Pointer to Ethernet device structure. 359 * @param queue_idx 360 * RX queue index. 361 * @param nb_desc 362 * Number of descriptors for receive queue. 363 * @param socket_id 364 * NUMA socket on which memory must be allocated. 365 * @param rx_conf 366 * Thresholds parameters (unused_). 367 * @param mp 368 * Memory pool for buffer allocations. 369 * 370 * @return 371 * 0 on success, negative error value otherwise. 372 */ 373 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 374 uint16_t nb_desc, unsigned int socket_id, 375 __rte_unused const struct rte_eth_rxconf *rx_conf, 376 struct rte_mempool *mp) 377 { 378 int rc; 379 struct hinic_nic_dev *nic_dev; 380 struct hinic_hwdev *hwdev; 381 struct hinic_rxq *rxq; 382 u16 rq_depth, rx_free_thresh; 383 u32 buf_size; 384 385 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 386 hwdev = nic_dev->hwdev; 387 388 /* queue depth must be power of 2, otherwise will be aligned up */ 389 rq_depth = (nb_desc & (nb_desc - 1)) ? 390 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 391 392 /* 393 * Validate number of receive descriptors. 394 * It must not exceed hardware maximum and minimum. 395 */ 396 if (rq_depth > HINIC_MAX_QUEUE_DEPTH || 397 rq_depth < HINIC_MIN_QUEUE_DEPTH) { 398 PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 399 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 400 (int)nb_desc, (int)rq_depth, 401 (int)dev->data->port_id, (int)queue_idx); 402 return -EINVAL; 403 } 404 405 /* 406 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh 407 * descriptors are used or if the number of descriptors required 408 * to transmit a packet is greater than the number of free RX 409 * descriptors. 410 * The following constraints must be satisfied: 411 * rx_free_thresh must be greater than 0. 412 * rx_free_thresh must be less than the size of the ring minus 1. 413 * When set to zero use default values. 414 */ 415 rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ? 416 rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH); 417 if (rx_free_thresh >= (rq_depth - 1)) { 418 PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)", 419 (unsigned int)rx_free_thresh, 420 (int)dev->data->port_id, 421 (int)queue_idx); 422 return -EINVAL; 423 } 424 425 rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq), 426 RTE_CACHE_LINE_SIZE, socket_id); 427 if (!rxq) { 428 PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s", 429 queue_idx, dev->data->name); 430 return -ENOMEM; 431 } 432 nic_dev->rxqs[queue_idx] = rxq; 433 434 /* alloc rx sq hw wqe page */ 435 rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id); 436 if (rc) { 437 PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d", 438 queue_idx, dev->data->name, rq_depth); 439 goto ceate_rq_fail; 440 } 441 442 /* mbuf pool must be assigned before setup rx resources */ 443 rxq->mb_pool = mp; 444 445 rc = 446 hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) - 447 RTE_PKTMBUF_HEADROOM, &buf_size); 448 if (rc) { 449 PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s", 450 dev->data->name); 451 goto adjust_bufsize_fail; 452 } 453 454 /* rx queue info, rearm control */ 455 rxq->wq = &hwdev->nic_io->rq_wq[queue_idx]; 456 rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr; 457 rxq->nic_dev = nic_dev; 458 rxq->q_id = queue_idx; 459 rxq->q_depth = rq_depth; 460 rxq->buf_len = (u16)buf_size; 461 rxq->rx_free_thresh = rx_free_thresh; 462 rxq->socket_id = socket_id; 463 464 /* the last point cant do mbuf rearm in bulk */ 465 rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh; 466 467 /* device port identifier */ 468 rxq->port_id = dev->data->port_id; 469 470 /* alloc rx_cqe and prepare rq_wqe */ 471 rc = hinic_setup_rx_resources(rxq); 472 if (rc) { 473 PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s", 474 queue_idx, dev->data->name); 475 goto setup_rx_res_err; 476 } 477 478 /* record nic_dev rxq in rte_eth rx_queues */ 479 dev->data->rx_queues[queue_idx] = rxq; 480 481 return 0; 482 483 setup_rx_res_err: 484 adjust_bufsize_fail: 485 hinic_destroy_rq(hwdev, queue_idx); 486 487 ceate_rq_fail: 488 rte_free(rxq); 489 490 return rc; 491 } 492 493 static void hinic_reset_rx_queue(struct rte_eth_dev *dev) 494 { 495 struct hinic_rxq *rxq; 496 struct hinic_nic_dev *nic_dev; 497 int q_id = 0; 498 499 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 500 501 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { 502 rxq = dev->data->rx_queues[q_id]; 503 504 rxq->wq->cons_idx = 0; 505 rxq->wq->prod_idx = 0; 506 rxq->wq->delta = rxq->q_depth; 507 rxq->wq->mask = rxq->q_depth - 1; 508 509 /* alloc mbuf to rq */ 510 hinic_rx_alloc_pkts(rxq); 511 } 512 } 513 514 /** 515 * DPDK callback to configure the transmit queue. 516 * 517 * @param dev 518 * Pointer to Ethernet device structure. 519 * @param queue_idx 520 * Transmit queue index. 521 * @param nb_desc 522 * Number of descriptors for transmit queue. 523 * @param socket_id 524 * NUMA socket on which memory must be allocated. 525 * @param tx_conf 526 * Tx queue configuration parameters. 527 * 528 * @return 529 * 0 on success, negative error value otherwise. 530 */ 531 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 532 uint16_t nb_desc, unsigned int socket_id, 533 __rte_unused const struct rte_eth_txconf *tx_conf) 534 { 535 int rc; 536 struct hinic_nic_dev *nic_dev; 537 struct hinic_hwdev *hwdev; 538 struct hinic_txq *txq; 539 u16 sq_depth, tx_free_thresh; 540 541 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 542 hwdev = nic_dev->hwdev; 543 544 /* queue depth must be power of 2, otherwise will be aligned up */ 545 sq_depth = (nb_desc & (nb_desc - 1)) ? 546 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 547 548 /* 549 * Validate number of transmit descriptors. 550 * It must not exceed hardware maximum and minimum. 551 */ 552 if (sq_depth > HINIC_MAX_QUEUE_DEPTH || 553 sq_depth < HINIC_MIN_QUEUE_DEPTH) { 554 PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 555 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 556 (int)nb_desc, (int)sq_depth, 557 (int)dev->data->port_id, (int)queue_idx); 558 return -EINVAL; 559 } 560 561 /* 562 * The TX descriptor ring will be cleaned after txq->tx_free_thresh 563 * descriptors are used or if the number of descriptors required 564 * to transmit a packet is greater than the number of free TX 565 * descriptors. 566 * The following constraints must be satisfied: 567 * tx_free_thresh must be greater than 0. 568 * tx_free_thresh must be less than the size of the ring minus 1. 569 * When set to zero use default values. 570 */ 571 tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ? 572 tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH); 573 if (tx_free_thresh >= (sq_depth - 1)) { 574 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)", 575 (unsigned int)tx_free_thresh, (int)dev->data->port_id, 576 (int)queue_idx); 577 return -EINVAL; 578 } 579 580 txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq), 581 RTE_CACHE_LINE_SIZE, socket_id); 582 if (!txq) { 583 PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s", 584 queue_idx, dev->data->name); 585 return -ENOMEM; 586 } 587 nic_dev->txqs[queue_idx] = txq; 588 589 /* alloc tx sq hw wqepage */ 590 rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id); 591 if (rc) { 592 PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d", 593 queue_idx, dev->data->name, sq_depth); 594 goto create_sq_fail; 595 } 596 597 txq->q_id = queue_idx; 598 txq->q_depth = sq_depth; 599 txq->port_id = dev->data->port_id; 600 txq->tx_free_thresh = tx_free_thresh; 601 txq->nic_dev = nic_dev; 602 txq->wq = &hwdev->nic_io->sq_wq[queue_idx]; 603 txq->sq = &hwdev->nic_io->qps[queue_idx].sq; 604 txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr; 605 txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq); 606 txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) - 607 sizeof(struct hinic_sq_bufdesc); 608 txq->cos = nic_dev->default_cos; 609 txq->socket_id = socket_id; 610 611 /* alloc software txinfo */ 612 rc = hinic_setup_tx_resources(txq); 613 if (rc) { 614 PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s", 615 queue_idx, dev->data->name); 616 goto setup_tx_res_fail; 617 } 618 619 /* record nic_dev txq in rte_eth tx_queues */ 620 dev->data->tx_queues[queue_idx] = txq; 621 622 return HINIC_OK; 623 624 setup_tx_res_fail: 625 hinic_destroy_sq(hwdev, queue_idx); 626 627 create_sq_fail: 628 rte_free(txq); 629 630 return rc; 631 } 632 633 static void hinic_reset_tx_queue(struct rte_eth_dev *dev) 634 { 635 struct hinic_nic_dev *nic_dev; 636 struct hinic_txq *txq; 637 struct hinic_nic_io *nic_io; 638 struct hinic_hwdev *hwdev; 639 volatile u32 *ci_addr; 640 int q_id = 0; 641 642 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 643 hwdev = nic_dev->hwdev; 644 nic_io = hwdev->nic_io; 645 646 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { 647 txq = dev->data->tx_queues[q_id]; 648 649 txq->wq->cons_idx = 0; 650 txq->wq->prod_idx = 0; 651 txq->wq->delta = txq->q_depth; 652 txq->wq->mask = txq->q_depth - 1; 653 654 /* clear hardware ci */ 655 ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, 656 q_id); 657 *ci_addr = 0; 658 } 659 } 660 661 /** 662 * Get link speed from NIC. 663 * 664 * @param dev 665 * Pointer to Ethernet device structure. 666 * @param speed_capa 667 * Pointer to link speed structure. 668 */ 669 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) 670 { 671 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 672 u32 supported_link, advertised_link; 673 int err; 674 675 #define HINIC_LINK_MODE_SUPPORT_1G (1U << HINIC_GE_BASE_KX) 676 677 #define HINIC_LINK_MODE_SUPPORT_10G (1U << HINIC_10GE_BASE_KR) 678 679 #define HINIC_LINK_MODE_SUPPORT_25G ((1U << HINIC_25GE_BASE_KR_S) | \ 680 (1U << HINIC_25GE_BASE_CR_S) | \ 681 (1U << HINIC_25GE_BASE_KR) | \ 682 (1U << HINIC_25GE_BASE_CR)) 683 684 #define HINIC_LINK_MODE_SUPPORT_40G ((1U << HINIC_40GE_BASE_KR4) | \ 685 (1U << HINIC_40GE_BASE_CR4)) 686 687 #define HINIC_LINK_MODE_SUPPORT_100G ((1U << HINIC_100GE_BASE_KR4) | \ 688 (1U << HINIC_100GE_BASE_CR4)) 689 690 err = hinic_get_link_mode(nic_dev->hwdev, 691 &supported_link, &advertised_link); 692 if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || 693 advertised_link == HINIC_SUPPORTED_UNKNOWN) { 694 PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u", 695 nic_dev->proc_dev_name, dev->data->port_id); 696 } else { 697 *speed_capa = 0; 698 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G)) 699 *speed_capa |= RTE_ETH_LINK_SPEED_1G; 700 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G)) 701 *speed_capa |= RTE_ETH_LINK_SPEED_10G; 702 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G)) 703 *speed_capa |= RTE_ETH_LINK_SPEED_25G; 704 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G)) 705 *speed_capa |= RTE_ETH_LINK_SPEED_40G; 706 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G)) 707 *speed_capa |= RTE_ETH_LINK_SPEED_100G; 708 } 709 } 710 711 /** 712 * DPDK callback to get information about the device. 713 * 714 * @param dev 715 * Pointer to Ethernet device structure. 716 * @param info 717 * Pointer to Info structure output buffer. 718 */ 719 static int 720 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 721 { 722 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 723 724 info->max_rx_queues = nic_dev->nic_cap.max_rqs; 725 info->max_tx_queues = nic_dev->nic_cap.max_sqs; 726 info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; 727 info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; 728 info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS; 729 info->min_mtu = HINIC_MIN_MTU_SIZE; 730 info->max_mtu = HINIC_MAX_MTU_SIZE; 731 info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE; 732 733 hinic_get_speed_capa(dev, &info->speed_capa); 734 info->rx_queue_offload_capa = 0; 735 info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 736 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 737 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 738 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 739 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 740 RTE_ETH_RX_OFFLOAD_SCATTER | 741 RTE_ETH_RX_OFFLOAD_TCP_LRO | 742 RTE_ETH_RX_OFFLOAD_RSS_HASH; 743 744 info->tx_queue_offload_capa = 0; 745 info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 746 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 747 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 748 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 749 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 750 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 751 RTE_ETH_TX_OFFLOAD_TCP_TSO | 752 RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 753 754 info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 755 756 info->hash_key_size = HINIC_RSS_KEY_SIZE; 757 info->reta_size = HINIC_RSS_INDIR_SIZE; 758 info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL; 759 info->rx_desc_lim = hinic_rx_desc_lim; 760 info->tx_desc_lim = hinic_tx_desc_lim; 761 762 /* Driver-preferred Rx/Tx parameters */ 763 info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 764 info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 765 info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 766 info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 767 info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 768 info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 769 770 return 0; 771 } 772 773 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 774 size_t fw_size) 775 { 776 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 777 char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 778 int err; 779 780 err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver); 781 if (err) { 782 PMD_DRV_LOG(ERR, "Failed to get fw version"); 783 return -EINVAL; 784 } 785 786 if (fw_size < strlen(fw_ver) + 1) 787 return (strlen(fw_ver) + 1); 788 789 snprintf(fw_version, fw_size, "%s", fw_ver); 790 791 return 0; 792 } 793 794 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl) 795 { 796 int err; 797 798 err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl); 799 if (err) { 800 PMD_DRV_LOG(ERR, "Failed to set rx mode"); 801 return -EINVAL; 802 } 803 nic_dev->rx_mode_status = rx_mode_ctrl; 804 805 return 0; 806 } 807 808 static int hinic_rxtx_configure(struct rte_eth_dev *dev) 809 { 810 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 811 int err; 812 813 /* rx configure, if rss enable, need to init default configuration */ 814 err = hinic_rx_configure(dev); 815 if (err) { 816 PMD_DRV_LOG(ERR, "Configure rss failed"); 817 return err; 818 } 819 820 /* rx mode init */ 821 err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE); 822 if (err) { 823 PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed", 824 HINIC_DEFAULT_RX_MODE); 825 goto set_rx_mode_fail; 826 } 827 828 return HINIC_OK; 829 830 set_rx_mode_fail: 831 hinic_rx_remove_configure(dev); 832 833 return err; 834 } 835 836 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev) 837 { 838 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 839 840 (void)hinic_config_rx_mode(nic_dev, 0); 841 hinic_rx_remove_configure(dev); 842 } 843 844 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev, 845 struct rte_eth_link *link) 846 { 847 int rc; 848 u8 port_link_status = 0; 849 struct nic_port_info port_link_info; 850 struct hinic_hwdev *nic_hwdev = nic_dev->hwdev; 851 uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M, 852 RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G, 853 RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G, 854 RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G}; 855 856 rc = hinic_get_link_status(nic_hwdev, &port_link_status); 857 if (rc) 858 return rc; 859 860 if (!port_link_status) { 861 link->link_status = RTE_ETH_LINK_DOWN; 862 link->link_speed = 0; 863 link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 864 link->link_autoneg = RTE_ETH_LINK_FIXED; 865 return HINIC_OK; 866 } 867 868 memset(&port_link_info, 0, sizeof(port_link_info)); 869 rc = hinic_get_port_info(nic_hwdev, &port_link_info); 870 if (rc) 871 return rc; 872 873 link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX]; 874 link->link_duplex = port_link_info.duplex; 875 link->link_autoneg = port_link_info.autoneg_state; 876 link->link_status = port_link_status; 877 878 return HINIC_OK; 879 } 880 881 /** 882 * DPDK callback to retrieve physical link information. 883 * 884 * @param dev 885 * Pointer to Ethernet device structure. 886 * @param wait_to_complete 887 * Wait for request completion. 888 * 889 * @return 890 * 0 link status changed, -1 link status not changed 891 */ 892 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete) 893 { 894 #define CHECK_INTERVAL 10 /* 10ms */ 895 #define MAX_REPEAT_TIME 100 /* 1s (100 * 10ms) in total */ 896 int rc = HINIC_OK; 897 struct rte_eth_link link; 898 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 899 unsigned int rep_cnt = MAX_REPEAT_TIME; 900 901 memset(&link, 0, sizeof(link)); 902 do { 903 /* Get link status information from hardware */ 904 rc = hinic_priv_get_dev_link_status(nic_dev, &link); 905 if (rc != HINIC_OK) { 906 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 907 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 908 PMD_DRV_LOG(ERR, "Get link status failed"); 909 goto out; 910 } 911 912 if (!wait_to_complete || link.link_status) 913 break; 914 915 rte_delay_ms(CHECK_INTERVAL); 916 } while (rep_cnt--); 917 918 out: 919 rc = rte_eth_linkstatus_set(dev, &link); 920 return rc; 921 } 922 923 /** 924 * DPDK callback to bring the link UP. 925 * 926 * @param dev 927 * Pointer to Ethernet device structure. 928 * 929 * @return 930 * 0 on success, negative errno value on failure. 931 */ 932 static int hinic_dev_set_link_up(struct rte_eth_dev *dev) 933 { 934 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 935 int ret; 936 937 /* link status follow phy port status, up will open pma */ 938 ret = hinic_set_port_enable(nic_dev->hwdev, true); 939 if (ret) 940 PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d", 941 nic_dev->proc_dev_name, dev->data->port_id); 942 943 return ret; 944 } 945 946 /** 947 * DPDK callback to bring the link DOWN. 948 * 949 * @param dev 950 * Pointer to Ethernet device structure. 951 * 952 * @return 953 * 0 on success, negative errno value on failure. 954 */ 955 static int hinic_dev_set_link_down(struct rte_eth_dev *dev) 956 { 957 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 958 int ret; 959 960 /* link status follow phy port status, up will close pma */ 961 ret = hinic_set_port_enable(nic_dev->hwdev, false); 962 if (ret) 963 PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d", 964 nic_dev->proc_dev_name, dev->data->port_id); 965 966 return ret; 967 } 968 969 /** 970 * DPDK callback to start the device. 971 * 972 * @param dev 973 * Pointer to Ethernet device structure. 974 * 975 * @return 976 * 0 on success, negative errno value on failure. 977 */ 978 static int hinic_dev_start(struct rte_eth_dev *dev) 979 { 980 int rc; 981 char *name; 982 struct hinic_nic_dev *nic_dev; 983 984 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 985 name = dev->data->name; 986 987 /* reset rx and tx queue */ 988 hinic_reset_rx_queue(dev); 989 hinic_reset_tx_queue(dev); 990 991 /* get func rx buf size */ 992 hinic_get_func_rx_buf_size(nic_dev); 993 994 /* init txq and rxq context */ 995 rc = hinic_init_qp_ctxts(nic_dev->hwdev); 996 if (rc) { 997 PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s", 998 name); 999 goto init_qp_fail; 1000 } 1001 1002 /* rss template */ 1003 rc = hinic_config_mq_mode(dev, TRUE); 1004 if (rc) { 1005 PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s", 1006 name); 1007 goto cfg_mq_mode_fail; 1008 } 1009 1010 /* set default mtu */ 1011 rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size); 1012 if (rc) { 1013 PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s", 1014 nic_dev->mtu_size, name); 1015 goto set_mtu_fail; 1016 } 1017 1018 /* configure rss rx_mode and other rx or tx default feature */ 1019 rc = hinic_rxtx_configure(dev); 1020 if (rc) { 1021 PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s", 1022 name); 1023 goto cfg_rxtx_fail; 1024 } 1025 1026 /* reactive pf status, so that uP report asyn event */ 1027 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); 1028 1029 /* open virtual port and ready to start packet receiving */ 1030 rc = hinic_set_vport_enable(nic_dev->hwdev, true); 1031 if (rc) { 1032 PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name); 1033 goto en_vport_fail; 1034 } 1035 1036 /* open physical port and start packet receiving */ 1037 rc = hinic_set_port_enable(nic_dev->hwdev, true); 1038 if (rc) { 1039 PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s", 1040 name); 1041 goto en_port_fail; 1042 } 1043 1044 /* update eth_dev link status */ 1045 if (dev->data->dev_conf.intr_conf.lsc != 0) 1046 (void)hinic_link_update(dev, 0); 1047 1048 rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status); 1049 1050 return 0; 1051 1052 en_port_fail: 1053 (void)hinic_set_vport_enable(nic_dev->hwdev, false); 1054 1055 en_vport_fail: 1056 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT); 1057 1058 /* Flush tx && rx chip resources in case of set vport fake fail */ 1059 (void)hinic_flush_qp_res(nic_dev->hwdev); 1060 rte_delay_ms(100); 1061 1062 hinic_remove_rxtx_configure(dev); 1063 1064 cfg_rxtx_fail: 1065 set_mtu_fail: 1066 cfg_mq_mode_fail: 1067 hinic_free_qp_ctxts(nic_dev->hwdev); 1068 1069 init_qp_fail: 1070 hinic_free_all_rx_mbuf(dev); 1071 hinic_free_all_tx_mbuf(dev); 1072 1073 return rc; 1074 } 1075 1076 /** 1077 * DPDK callback to release the receive queue. 1078 * 1079 * @param dev 1080 * Pointer to Ethernet device structure. 1081 * @param qid 1082 * Receive queue index. 1083 */ 1084 static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1085 { 1086 struct hinic_rxq *rxq = dev->data->rx_queues[qid]; 1087 struct hinic_nic_dev *nic_dev; 1088 1089 if (!rxq) { 1090 PMD_DRV_LOG(WARNING, "Rxq is null when release"); 1091 return; 1092 } 1093 nic_dev = rxq->nic_dev; 1094 1095 /* free rxq_pkt mbuf */ 1096 hinic_free_all_rx_mbufs(rxq); 1097 1098 /* free rxq_cqe, rxq_info */ 1099 hinic_free_rx_resources(rxq); 1100 1101 /* free root rq wq */ 1102 hinic_destroy_rq(nic_dev->hwdev, rxq->q_id); 1103 1104 nic_dev->rxqs[rxq->q_id] = NULL; 1105 1106 /* free rxq */ 1107 rte_free(rxq); 1108 } 1109 1110 /** 1111 * DPDK callback to release the transmit queue. 1112 * 1113 * @param dev 1114 * Pointer to Ethernet device structure. 1115 * @param qid 1116 * Transmit queue index. 1117 */ 1118 static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1119 { 1120 struct hinic_txq *txq = dev->data->tx_queues[qid]; 1121 struct hinic_nic_dev *nic_dev; 1122 1123 if (!txq) { 1124 PMD_DRV_LOG(WARNING, "Txq is null when release"); 1125 return; 1126 } 1127 nic_dev = txq->nic_dev; 1128 1129 /* free txq_pkt mbuf */ 1130 hinic_free_all_tx_mbufs(txq); 1131 1132 /* free txq_info */ 1133 hinic_free_tx_resources(txq); 1134 1135 /* free root sq wq */ 1136 hinic_destroy_sq(nic_dev->hwdev, txq->q_id); 1137 nic_dev->txqs[txq->q_id] = NULL; 1138 1139 /* free txq */ 1140 rte_free(txq); 1141 } 1142 1143 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev) 1144 { 1145 u16 q_id; 1146 1147 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) 1148 hinic_destroy_rq(nic_dev->hwdev, q_id); 1149 } 1150 1151 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev) 1152 { 1153 u16 q_id; 1154 1155 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) 1156 hinic_destroy_sq(nic_dev->hwdev, q_id); 1157 } 1158 1159 /** 1160 * DPDK callback to stop the device. 1161 * 1162 * @param dev 1163 * Pointer to Ethernet device structure. 1164 */ 1165 static int hinic_dev_stop(struct rte_eth_dev *dev) 1166 { 1167 int rc; 1168 char *name; 1169 uint16_t port_id; 1170 struct hinic_nic_dev *nic_dev; 1171 struct rte_eth_link link; 1172 1173 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1174 name = dev->data->name; 1175 port_id = dev->data->port_id; 1176 1177 dev->data->dev_started = 0; 1178 1179 if (!rte_bit_relaxed_test_and_clear32(HINIC_DEV_START, 1180 &nic_dev->dev_status)) { 1181 PMD_DRV_LOG(INFO, "Device %s already stopped", name); 1182 return 0; 1183 } 1184 1185 /* just stop phy port and vport */ 1186 rc = hinic_set_port_enable(nic_dev->hwdev, false); 1187 if (rc) 1188 PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d", 1189 rc, name, port_id); 1190 1191 rc = hinic_set_vport_enable(nic_dev->hwdev, false); 1192 if (rc) 1193 PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d", 1194 rc, name, port_id); 1195 1196 /* Clear recorded link status */ 1197 memset(&link, 0, sizeof(link)); 1198 (void)rte_eth_linkstatus_set(dev, &link); 1199 1200 /* flush pending io request */ 1201 rc = hinic_rx_tx_flush(nic_dev->hwdev); 1202 if (rc) 1203 PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d", 1204 rc, name, port_id); 1205 1206 /* clean rss table and rx_mode */ 1207 hinic_remove_rxtx_configure(dev); 1208 1209 /* clean root context */ 1210 hinic_free_qp_ctxts(nic_dev->hwdev); 1211 1212 hinic_destroy_fdir_filter(dev); 1213 1214 /* free mbuf */ 1215 hinic_free_all_rx_mbuf(dev); 1216 hinic_free_all_tx_mbuf(dev); 1217 1218 return 0; 1219 } 1220 1221 static void hinic_disable_interrupt(struct rte_eth_dev *dev) 1222 { 1223 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1224 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1225 int ret, retries = 0; 1226 1227 rte_bit_relaxed_clear32(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 1228 1229 /* disable msix interrupt in hardware */ 1230 hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE); 1231 1232 /* disable rte interrupt */ 1233 ret = rte_intr_disable(pci_dev->intr_handle); 1234 if (ret) 1235 PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret); 1236 1237 do { 1238 ret = 1239 rte_intr_callback_unregister(pci_dev->intr_handle, 1240 hinic_dev_interrupt_handler, dev); 1241 if (ret >= 0) { 1242 break; 1243 } else if (ret == -EAGAIN) { 1244 rte_delay_ms(100); 1245 retries++; 1246 } else { 1247 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", 1248 ret); 1249 break; 1250 } 1251 } while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES); 1252 1253 if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES) 1254 PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries", 1255 retries); 1256 1257 rte_bit_relaxed_clear32(HINIC_DEV_INIT, &nic_dev->dev_status); 1258 } 1259 1260 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable) 1261 { 1262 u32 rx_mode_ctrl; 1263 int err; 1264 1265 err = hinic_mutex_lock(&nic_dev->rx_mode_mutex); 1266 if (err) 1267 return err; 1268 1269 rx_mode_ctrl = nic_dev->rx_mode_status; 1270 1271 if (enable) 1272 rx_mode_ctrl |= HINIC_RX_MODE_PROMISC; 1273 else 1274 rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC); 1275 1276 err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1277 1278 (void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex); 1279 1280 return err; 1281 } 1282 1283 /** 1284 * DPDK callback to get device statistics. 1285 * 1286 * @param dev 1287 * Pointer to Ethernet device structure. 1288 * @param stats 1289 * Stats structure output buffer. 1290 * 1291 * @return 1292 * 0 on success and stats is filled, 1293 * negative error value otherwise. 1294 */ 1295 static int 1296 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1297 { 1298 int i, err, q_num; 1299 u64 rx_discards_pmd = 0; 1300 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1301 struct hinic_vport_stats vport_stats; 1302 struct hinic_rxq *rxq = NULL; 1303 struct hinic_rxq_stats rxq_stats; 1304 struct hinic_txq *txq = NULL; 1305 struct hinic_txq_stats txq_stats; 1306 1307 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 1308 if (err) { 1309 PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s", 1310 nic_dev->proc_dev_name); 1311 return err; 1312 } 1313 1314 dev->data->rx_mbuf_alloc_failed = 0; 1315 1316 /* rx queue stats */ 1317 q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1318 nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1319 for (i = 0; i < q_num; i++) { 1320 rxq = nic_dev->rxqs[i]; 1321 hinic_rxq_get_stats(rxq, &rxq_stats); 1322 stats->q_ipackets[i] = rxq_stats.packets; 1323 stats->q_ibytes[i] = rxq_stats.bytes; 1324 stats->q_errors[i] = rxq_stats.rx_discards; 1325 1326 stats->ierrors += rxq_stats.errors; 1327 rx_discards_pmd += rxq_stats.rx_discards; 1328 dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf; 1329 } 1330 1331 /* tx queue stats */ 1332 q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1333 nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1334 for (i = 0; i < q_num; i++) { 1335 txq = nic_dev->txqs[i]; 1336 hinic_txq_get_stats(txq, &txq_stats); 1337 stats->q_opackets[i] = txq_stats.packets; 1338 stats->q_obytes[i] = txq_stats.bytes; 1339 stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs); 1340 } 1341 1342 /* vport stats */ 1343 stats->oerrors += vport_stats.tx_discard_vport; 1344 1345 stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd; 1346 1347 stats->ipackets = (vport_stats.rx_unicast_pkts_vport + 1348 vport_stats.rx_multicast_pkts_vport + 1349 vport_stats.rx_broadcast_pkts_vport - 1350 rx_discards_pmd); 1351 1352 stats->opackets = (vport_stats.tx_unicast_pkts_vport + 1353 vport_stats.tx_multicast_pkts_vport + 1354 vport_stats.tx_broadcast_pkts_vport); 1355 1356 stats->ibytes = (vport_stats.rx_unicast_bytes_vport + 1357 vport_stats.rx_multicast_bytes_vport + 1358 vport_stats.rx_broadcast_bytes_vport); 1359 1360 stats->obytes = (vport_stats.tx_unicast_bytes_vport + 1361 vport_stats.tx_multicast_bytes_vport + 1362 vport_stats.tx_broadcast_bytes_vport); 1363 return 0; 1364 } 1365 1366 /** 1367 * DPDK callback to clear device statistics. 1368 * 1369 * @param dev 1370 * Pointer to Ethernet device structure. 1371 */ 1372 static int hinic_dev_stats_reset(struct rte_eth_dev *dev) 1373 { 1374 int qid; 1375 struct hinic_rxq *rxq = NULL; 1376 struct hinic_txq *txq = NULL; 1377 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1378 int ret; 1379 1380 ret = hinic_clear_vport_stats(nic_dev->hwdev); 1381 if (ret != 0) 1382 return ret; 1383 1384 for (qid = 0; qid < nic_dev->num_rq; qid++) { 1385 rxq = nic_dev->rxqs[qid]; 1386 hinic_rxq_stats_reset(rxq); 1387 } 1388 1389 for (qid = 0; qid < nic_dev->num_sq; qid++) { 1390 txq = nic_dev->txqs[qid]; 1391 hinic_txq_stats_reset(txq); 1392 } 1393 1394 return 0; 1395 } 1396 1397 /** 1398 * DPDK callback to clear device extended statistics. 1399 * 1400 * @param dev 1401 * Pointer to Ethernet device structure. 1402 */ 1403 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev) 1404 { 1405 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1406 int ret; 1407 1408 ret = hinic_dev_stats_reset(dev); 1409 if (ret != 0) 1410 return ret; 1411 1412 if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) { 1413 ret = hinic_clear_phy_port_stats(nic_dev->hwdev); 1414 if (ret != 0) 1415 return ret; 1416 } 1417 1418 return 0; 1419 } 1420 1421 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr) 1422 { 1423 uint64_t random_value; 1424 1425 /* Set Organizationally Unique Identifier (OUI) prefix */ 1426 mac_addr->addr_bytes[0] = 0x00; 1427 mac_addr->addr_bytes[1] = 0x09; 1428 mac_addr->addr_bytes[2] = 0xC0; 1429 /* Force indication of locally assigned MAC address. */ 1430 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1431 /* Generate the last 3 bytes of the MAC address with a random number. */ 1432 random_value = rte_rand(); 1433 memcpy(&mac_addr->addr_bytes[3], &random_value, 3); 1434 } 1435 1436 /** 1437 * Init mac_vlan table in NIC. 1438 * 1439 * @param dev 1440 * Pointer to Ethernet device structure. 1441 * 1442 * @return 1443 * 0 on success and stats is filled, 1444 * negative error value otherwise. 1445 */ 1446 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev) 1447 { 1448 struct hinic_nic_dev *nic_dev = 1449 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1450 uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; 1451 u16 func_id = 0; 1452 int rc = 0; 1453 1454 rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes); 1455 if (rc) 1456 return rc; 1457 1458 rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes, 1459 ð_dev->data->mac_addrs[0]); 1460 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[0])) 1461 hinic_gen_random_mac_addr(ð_dev->data->mac_addrs[0]); 1462 1463 func_id = hinic_global_func_id(nic_dev->hwdev); 1464 rc = hinic_set_mac(nic_dev->hwdev, 1465 eth_dev->data->mac_addrs[0].addr_bytes, 1466 0, func_id); 1467 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1468 return rc; 1469 1470 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 1471 &nic_dev->default_addr); 1472 1473 return 0; 1474 } 1475 1476 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev) 1477 { 1478 u16 func_id; 1479 u32 i; 1480 1481 func_id = hinic_global_func_id(nic_dev->hwdev); 1482 1483 for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) { 1484 if (rte_is_zero_ether_addr(&nic_dev->mc_list[i])) 1485 break; 1486 1487 hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes, 1488 0, func_id); 1489 memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr)); 1490 } 1491 } 1492 1493 /** 1494 * Deinit mac_vlan table in NIC. 1495 * 1496 * @param dev 1497 * Pointer to Ethernet device structure. 1498 * 1499 * @return 1500 * 0 on success and stats is filled, 1501 * negative error value otherwise. 1502 */ 1503 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) 1504 { 1505 struct hinic_nic_dev *nic_dev = 1506 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1507 u16 func_id = 0; 1508 int rc; 1509 int i; 1510 1511 func_id = hinic_global_func_id(nic_dev->hwdev); 1512 1513 for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) { 1514 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[i])) 1515 continue; 1516 1517 rc = hinic_del_mac(nic_dev->hwdev, 1518 eth_dev->data->mac_addrs[i].addr_bytes, 1519 0, func_id); 1520 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1521 PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", 1522 eth_dev->data->name); 1523 1524 memset(ð_dev->data->mac_addrs[i], 0, 1525 sizeof(struct rte_ether_addr)); 1526 } 1527 1528 /* delete multicast mac addrs */ 1529 hinic_delete_mc_addr_list(nic_dev); 1530 1531 rte_free(nic_dev->mc_list); 1532 1533 } 1534 1535 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1536 { 1537 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1538 int ret; 1539 1540 PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d", 1541 dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu)); 1542 1543 ret = hinic_set_port_mtu(nic_dev->hwdev, mtu); 1544 if (ret) { 1545 PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret); 1546 return ret; 1547 } 1548 1549 nic_dev->mtu_size = mtu; 1550 1551 return ret; 1552 } 1553 1554 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, 1555 u16 vlan_id, bool on) 1556 { 1557 u32 vid_idx, vid_bit; 1558 1559 vid_idx = HINIC_VFTA_IDX(vlan_id); 1560 vid_bit = HINIC_VFTA_BIT(vlan_id); 1561 1562 if (on) 1563 nic_dev->vfta[vid_idx] |= vid_bit; 1564 else 1565 nic_dev->vfta[vid_idx] &= ~vid_bit; 1566 } 1567 1568 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev, 1569 uint16_t vlan_id) 1570 { 1571 u32 vid_idx, vid_bit; 1572 1573 vid_idx = HINIC_VFTA_IDX(vlan_id); 1574 vid_bit = HINIC_VFTA_BIT(vlan_id); 1575 1576 return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE; 1577 } 1578 1579 /** 1580 * DPDK callback to set vlan filter. 1581 * 1582 * @param dev 1583 * Pointer to Ethernet device structure. 1584 * @param vlan_id 1585 * vlan id is used to filter vlan packets 1586 * @param enable 1587 * enable disable or enable vlan filter function 1588 */ 1589 static int hinic_vlan_filter_set(struct rte_eth_dev *dev, 1590 uint16_t vlan_id, int enable) 1591 { 1592 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1593 int err = 0; 1594 u16 func_id; 1595 1596 if (vlan_id > RTE_ETHER_MAX_VLAN_ID) 1597 return -EINVAL; 1598 1599 if (vlan_id == 0) 1600 return 0; 1601 1602 func_id = hinic_global_func_id(nic_dev->hwdev); 1603 1604 if (enable) { 1605 /* If vlanid is already set, just return */ 1606 if (hinic_find_vlan_filter(nic_dev, vlan_id)) { 1607 PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s", 1608 vlan_id, nic_dev->proc_dev_name); 1609 return 0; 1610 } 1611 1612 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1613 func_id, TRUE); 1614 } else { 1615 /* If vlanid can't be found, just return */ 1616 if (!hinic_find_vlan_filter(nic_dev, vlan_id)) { 1617 PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s", 1618 vlan_id, nic_dev->proc_dev_name); 1619 return 0; 1620 } 1621 1622 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1623 func_id, FALSE); 1624 } 1625 1626 if (err) { 1627 PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d", 1628 enable ? "Add" : "Remove", func_id, vlan_id, err); 1629 return err; 1630 } 1631 1632 hinic_store_vlan_filter(nic_dev, vlan_id, enable); 1633 1634 PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s", 1635 enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name); 1636 return 0; 1637 } 1638 1639 /** 1640 * DPDK callback to enable or disable vlan offload. 1641 * 1642 * @param dev 1643 * Pointer to Ethernet device structure. 1644 * @param mask 1645 * Definitions used for VLAN setting 1646 */ 1647 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1648 { 1649 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1650 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1651 bool on; 1652 int err; 1653 1654 /* Enable or disable VLAN filter */ 1655 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1656 on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ? 1657 TRUE : FALSE; 1658 err = hinic_config_vlan_filter(nic_dev->hwdev, on); 1659 if (err == HINIC_MGMT_CMD_UNSUPPORTED) { 1660 PMD_DRV_LOG(WARNING, 1661 "Current matching version does not support vlan filter configuration, device: %s, port_id: %d", 1662 nic_dev->proc_dev_name, dev->data->port_id); 1663 } else if (err) { 1664 PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d", 1665 on ? "enable" : "disable", 1666 nic_dev->proc_dev_name, 1667 dev->data->port_id, err); 1668 return err; 1669 } 1670 1671 PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d", 1672 on ? "Enable" : "Disable", 1673 nic_dev->proc_dev_name, dev->data->port_id); 1674 } 1675 1676 /* Enable or disable VLAN stripping */ 1677 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1678 on = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ? 1679 TRUE : FALSE; 1680 err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on); 1681 if (err) { 1682 PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d", 1683 on ? "enable" : "disable", 1684 nic_dev->proc_dev_name, 1685 dev->data->port_id, err); 1686 return err; 1687 } 1688 1689 PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d", 1690 on ? "Enable" : "Disable", 1691 nic_dev->proc_dev_name, dev->data->port_id); 1692 } 1693 1694 return 0; 1695 } 1696 1697 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) 1698 { 1699 struct hinic_nic_dev *nic_dev = 1700 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1701 u16 func_id; 1702 int i; 1703 1704 func_id = hinic_global_func_id(nic_dev->hwdev); 1705 for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) { 1706 /* If can't find it, continue */ 1707 if (!hinic_find_vlan_filter(nic_dev, i)) 1708 continue; 1709 1710 (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE); 1711 hinic_store_vlan_filter(nic_dev, i, false); 1712 } 1713 } 1714 1715 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev, 1716 bool enable) 1717 { 1718 u32 rx_mode_ctrl; 1719 int err; 1720 1721 err = hinic_mutex_lock(&nic_dev->rx_mode_mutex); 1722 if (err) 1723 return err; 1724 1725 rx_mode_ctrl = nic_dev->rx_mode_status; 1726 1727 if (enable) 1728 rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL; 1729 else 1730 rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL); 1731 1732 err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1733 1734 (void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex); 1735 1736 return err; 1737 } 1738 1739 /** 1740 * DPDK callback to enable allmulticast mode. 1741 * 1742 * @param dev 1743 * Pointer to Ethernet device structure. 1744 * 1745 * @return 1746 * 0 on success, 1747 * negative error value otherwise. 1748 */ 1749 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev) 1750 { 1751 int ret = HINIC_OK; 1752 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1753 1754 ret = hinic_set_dev_allmulticast(nic_dev, true); 1755 if (ret) { 1756 PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret); 1757 return ret; 1758 } 1759 1760 PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d", 1761 nic_dev->proc_dev_name, dev->data->port_id); 1762 return 0; 1763 } 1764 1765 /** 1766 * DPDK callback to disable allmulticast mode. 1767 * 1768 * @param dev 1769 * Pointer to Ethernet device structure. 1770 * 1771 * @return 1772 * 0 on success, 1773 * negative error value otherwise. 1774 */ 1775 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev) 1776 { 1777 int ret = HINIC_OK; 1778 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1779 1780 ret = hinic_set_dev_allmulticast(nic_dev, false); 1781 if (ret) { 1782 PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret); 1783 return ret; 1784 } 1785 1786 PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d", 1787 nic_dev->proc_dev_name, dev->data->port_id); 1788 return 0; 1789 } 1790 1791 /** 1792 * DPDK callback to enable promiscuous mode. 1793 * 1794 * @param dev 1795 * Pointer to Ethernet device structure. 1796 * 1797 * @return 1798 * 0 on success, 1799 * negative error value otherwise. 1800 */ 1801 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev) 1802 { 1803 int rc = HINIC_OK; 1804 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1805 1806 PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1807 nic_dev->proc_dev_name, dev->data->port_id, 1808 dev->data->promiscuous); 1809 1810 rc = hinic_set_dev_promiscuous(nic_dev, true); 1811 if (rc) 1812 PMD_DRV_LOG(ERR, "Enable promiscuous failed"); 1813 1814 return rc; 1815 } 1816 1817 /** 1818 * DPDK callback to disable promiscuous mode. 1819 * 1820 * @param dev 1821 * Pointer to Ethernet device structure. 1822 * 1823 * @return 1824 * 0 on success, 1825 * negative error value otherwise. 1826 */ 1827 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev) 1828 { 1829 int rc = HINIC_OK; 1830 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1831 1832 PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1833 nic_dev->proc_dev_name, dev->data->port_id, 1834 dev->data->promiscuous); 1835 1836 rc = hinic_set_dev_promiscuous(nic_dev, false); 1837 if (rc) 1838 PMD_DRV_LOG(ERR, "Disable promiscuous failed"); 1839 1840 return rc; 1841 } 1842 1843 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev, 1844 struct rte_eth_fc_conf *fc_conf) 1845 { 1846 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1847 struct nic_pause_config nic_pause; 1848 int err; 1849 1850 memset(&nic_pause, 0, sizeof(nic_pause)); 1851 1852 err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); 1853 if (err) 1854 return err; 1855 1856 if (nic_dev->pause_set || !nic_pause.auto_neg) { 1857 nic_pause.rx_pause = nic_dev->nic_pause.rx_pause; 1858 nic_pause.tx_pause = nic_dev->nic_pause.tx_pause; 1859 } 1860 1861 fc_conf->autoneg = nic_pause.auto_neg; 1862 1863 if (nic_pause.tx_pause && nic_pause.rx_pause) 1864 fc_conf->mode = RTE_ETH_FC_FULL; 1865 else if (nic_pause.tx_pause) 1866 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 1867 else if (nic_pause.rx_pause) 1868 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 1869 else 1870 fc_conf->mode = RTE_ETH_FC_NONE; 1871 1872 return 0; 1873 } 1874 1875 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev, 1876 struct rte_eth_fc_conf *fc_conf) 1877 { 1878 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1879 struct nic_pause_config nic_pause; 1880 int err; 1881 1882 nic_pause.auto_neg = fc_conf->autoneg; 1883 1884 if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 1885 (fc_conf->mode & RTE_ETH_FC_TX_PAUSE)) 1886 nic_pause.tx_pause = true; 1887 else 1888 nic_pause.tx_pause = false; 1889 1890 if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 1891 (fc_conf->mode & RTE_ETH_FC_RX_PAUSE)) 1892 nic_pause.rx_pause = true; 1893 else 1894 nic_pause.rx_pause = false; 1895 1896 err = hinic_set_pause_config(nic_dev->hwdev, nic_pause); 1897 if (err) 1898 return err; 1899 1900 nic_dev->pause_set = true; 1901 nic_dev->nic_pause.auto_neg = nic_pause.auto_neg; 1902 nic_dev->nic_pause.rx_pause = nic_pause.rx_pause; 1903 nic_dev->nic_pause.tx_pause = nic_pause.tx_pause; 1904 1905 PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n", 1906 nic_pause.tx_pause ? "on" : "off", 1907 nic_pause.rx_pause ? "on" : "off", 1908 nic_pause.auto_neg ? "on" : "off"); 1909 1910 return 0; 1911 } 1912 1913 /** 1914 * DPDK callback to update the RSS hash key and RSS hash type. 1915 * 1916 * @param dev 1917 * Pointer to Ethernet device structure. 1918 * @param rss_conf 1919 * RSS configuration data. 1920 * 1921 * @return 1922 * 0 on success, negative error value otherwise. 1923 */ 1924 static int hinic_rss_hash_update(struct rte_eth_dev *dev, 1925 struct rte_eth_rss_conf *rss_conf) 1926 { 1927 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1928 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 1929 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 1930 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 1931 u64 rss_hf = rss_conf->rss_hf; 1932 struct nic_rss_type rss_type = {0}; 1933 int err = 0; 1934 1935 if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) { 1936 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 1937 return HINIC_OK; 1938 } 1939 1940 if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) { 1941 PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d", 1942 rss_conf->rss_key_len); 1943 return HINIC_ERROR; 1944 } 1945 1946 if (rss_conf->rss_key) { 1947 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len); 1948 err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, 1949 hashkey); 1950 if (err) { 1951 PMD_DRV_LOG(ERR, "Set rss template table failed"); 1952 goto disable_rss; 1953 } 1954 } 1955 1956 rss_type.ipv4 = (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4)) ? 1 : 0; 1957 rss_type.tcp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0; 1958 rss_type.ipv6 = (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6)) ? 1 : 0; 1959 rss_type.ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_EX) ? 1 : 0; 1960 rss_type.tcp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0; 1961 rss_type.tcp_ipv6_ext = (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) ? 1 : 0; 1962 rss_type.udp_ipv4 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0; 1963 rss_type.udp_ipv6 = (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0; 1964 1965 err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type); 1966 if (err) { 1967 PMD_DRV_LOG(ERR, "Set rss type table failed"); 1968 goto disable_rss; 1969 } 1970 1971 return 0; 1972 1973 disable_rss: 1974 memset(prio_tc, 0, sizeof(prio_tc)); 1975 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 1976 return err; 1977 } 1978 1979 /** 1980 * DPDK callback to get the RSS hash configuration. 1981 * 1982 * @param dev 1983 * Pointer to Ethernet device structure. 1984 * @param rss_conf 1985 * RSS configuration data. 1986 * 1987 * @return 1988 * 0 on success, negative error value otherwise. 1989 */ 1990 static int hinic_rss_conf_get(struct rte_eth_dev *dev, 1991 struct rte_eth_rss_conf *rss_conf) 1992 { 1993 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1994 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 1995 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 1996 struct nic_rss_type rss_type = {0}; 1997 int err; 1998 1999 if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) { 2000 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 2001 return HINIC_ERROR; 2002 } 2003 2004 err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey); 2005 if (err) 2006 return err; 2007 2008 if (rss_conf->rss_key && 2009 rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) { 2010 memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey)); 2011 rss_conf->rss_key_len = sizeof(hashkey); 2012 } 2013 2014 err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type); 2015 if (err) 2016 return err; 2017 2018 rss_conf->rss_hf = 0; 2019 rss_conf->rss_hf |= rss_type.ipv4 ? 2020 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4) : 0; 2021 rss_conf->rss_hf |= rss_type.tcp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_TCP : 0; 2022 rss_conf->rss_hf |= rss_type.ipv6 ? 2023 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6) : 0; 2024 rss_conf->rss_hf |= rss_type.ipv6_ext ? RTE_ETH_RSS_IPV6_EX : 0; 2025 rss_conf->rss_hf |= rss_type.tcp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_TCP : 0; 2026 rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? RTE_ETH_RSS_IPV6_TCP_EX : 0; 2027 rss_conf->rss_hf |= rss_type.udp_ipv4 ? RTE_ETH_RSS_NONFRAG_IPV4_UDP : 0; 2028 rss_conf->rss_hf |= rss_type.udp_ipv6 ? RTE_ETH_RSS_NONFRAG_IPV6_UDP : 0; 2029 2030 return HINIC_OK; 2031 } 2032 2033 /** 2034 * DPDK callback to update the RSS redirection table. 2035 * 2036 * @param dev 2037 * Pointer to Ethernet device structure. 2038 * @param reta_conf 2039 * Pointer to RSS reta configuration data. 2040 * @param reta_size 2041 * Size of the RETA table. 2042 * 2043 * @return 2044 * 0 on success, negative error value otherwise. 2045 */ 2046 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev, 2047 struct rte_eth_rss_reta_entry64 *reta_conf, 2048 uint16_t reta_size) 2049 { 2050 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2051 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2052 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 2053 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2054 int err = 0; 2055 u16 i = 0; 2056 u16 idx, shift; 2057 2058 if (!(nic_dev->flags & RTE_ETH_MQ_RX_RSS_FLAG)) 2059 return HINIC_OK; 2060 2061 if (reta_size != NIC_RSS_INDIR_SIZE) { 2062 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2063 return HINIC_ERROR; 2064 } 2065 2066 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2067 if (err) 2068 return err; 2069 2070 /* update rss indir_tbl */ 2071 for (i = 0; i < reta_size; i++) { 2072 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2073 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2074 2075 if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) { 2076 PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d " 2077 "exceeds the maximum rxq num: %d", i, 2078 reta_conf[idx].reta[shift], nic_dev->num_rq); 2079 return -EINVAL; 2080 } 2081 2082 if (reta_conf[idx].mask & (1ULL << shift)) 2083 indirtbl[i] = reta_conf[idx].reta[shift]; 2084 } 2085 2086 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2087 if (err) 2088 goto disable_rss; 2089 2090 nic_dev->rss_indir_flag = true; 2091 2092 return 0; 2093 2094 disable_rss: 2095 memset(prio_tc, 0, sizeof(prio_tc)); 2096 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 2097 2098 return HINIC_ERROR; 2099 } 2100 2101 /** 2102 * DPDK callback to get the RSS indirection table. 2103 * 2104 * @param dev 2105 * Pointer to Ethernet device structure. 2106 * @param reta_conf 2107 * Pointer to RSS reta configuration data. 2108 * @param reta_size 2109 * Size of the RETA table. 2110 * 2111 * @return 2112 * 0 on success, negative error value otherwise. 2113 */ 2114 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev, 2115 struct rte_eth_rss_reta_entry64 *reta_conf, 2116 uint16_t reta_size) 2117 { 2118 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2119 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2120 int err = 0; 2121 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2122 u16 idx, shift; 2123 u16 i = 0; 2124 2125 if (reta_size != NIC_RSS_INDIR_SIZE) { 2126 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2127 return HINIC_ERROR; 2128 } 2129 2130 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2131 if (err) { 2132 PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d", 2133 err); 2134 return err; 2135 } 2136 2137 for (i = 0; i < reta_size; i++) { 2138 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2139 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2140 if (reta_conf[idx].mask & (1ULL << shift)) 2141 reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i]; 2142 } 2143 2144 return HINIC_OK; 2145 } 2146 2147 /** 2148 * DPDK callback to get extended device statistics. 2149 * 2150 * @param dev 2151 * Pointer to Ethernet device. 2152 * @param xstats 2153 * Pointer to rte extended stats table. 2154 * @param n 2155 * The size of the stats table. 2156 * 2157 * @return 2158 * Number of extended stats on success and stats is filled, 2159 * negative error value otherwise. 2160 */ 2161 static int hinic_dev_xstats_get(struct rte_eth_dev *dev, 2162 struct rte_eth_xstat *xstats, 2163 unsigned int n) 2164 { 2165 u16 qid = 0; 2166 u32 i; 2167 int err, count; 2168 struct hinic_nic_dev *nic_dev; 2169 struct hinic_phy_port_stats port_stats; 2170 struct hinic_vport_stats vport_stats; 2171 struct hinic_rxq *rxq = NULL; 2172 struct hinic_rxq_stats rxq_stats; 2173 struct hinic_txq *txq = NULL; 2174 struct hinic_txq_stats txq_stats; 2175 2176 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2177 count = hinic_xstats_calc_num(nic_dev); 2178 if ((int)n < count) 2179 return count; 2180 2181 count = 0; 2182 2183 /* Get stats from hinic_rxq_stats */ 2184 for (qid = 0; qid < nic_dev->num_rq; qid++) { 2185 rxq = nic_dev->rxqs[qid]; 2186 hinic_rxq_get_stats(rxq, &rxq_stats); 2187 2188 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2189 xstats[count].value = 2190 *(uint64_t *)(((char *)&rxq_stats) + 2191 hinic_rxq_stats_strings[i].offset); 2192 xstats[count].id = count; 2193 count++; 2194 } 2195 } 2196 2197 /* Get stats from hinic_txq_stats */ 2198 for (qid = 0; qid < nic_dev->num_sq; qid++) { 2199 txq = nic_dev->txqs[qid]; 2200 hinic_txq_get_stats(txq, &txq_stats); 2201 2202 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2203 xstats[count].value = 2204 *(uint64_t *)(((char *)&txq_stats) + 2205 hinic_txq_stats_strings[i].offset); 2206 xstats[count].id = count; 2207 count++; 2208 } 2209 } 2210 2211 /* Get stats from hinic_vport_stats */ 2212 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 2213 if (err) 2214 return err; 2215 2216 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2217 xstats[count].value = 2218 *(uint64_t *)(((char *)&vport_stats) + 2219 hinic_vport_stats_strings[i].offset); 2220 xstats[count].id = count; 2221 count++; 2222 } 2223 2224 if (HINIC_IS_VF(nic_dev->hwdev)) 2225 return count; 2226 2227 /* Get stats from hinic_phy_port_stats */ 2228 err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats); 2229 if (err) 2230 return err; 2231 2232 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2233 xstats[count].value = *(uint64_t *)(((char *)&port_stats) + 2234 hinic_phyport_stats_strings[i].offset); 2235 xstats[count].id = count; 2236 count++; 2237 } 2238 2239 return count; 2240 } 2241 2242 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2243 struct rte_eth_rxq_info *qinfo) 2244 { 2245 struct hinic_rxq *rxq = dev->data->rx_queues[queue_id]; 2246 2247 qinfo->mp = rxq->mb_pool; 2248 qinfo->nb_desc = rxq->q_depth; 2249 } 2250 2251 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2252 struct rte_eth_txq_info *qinfo) 2253 { 2254 struct hinic_txq *txq = dev->data->tx_queues[queue_id]; 2255 2256 qinfo->nb_desc = txq->q_depth; 2257 } 2258 2259 /** 2260 * DPDK callback to retrieve names of extended device statistics 2261 * 2262 * @param dev 2263 * Pointer to Ethernet device structure. 2264 * @param xstats_names 2265 * Buffer to insert names into. 2266 * 2267 * @return 2268 * Number of xstats names. 2269 */ 2270 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, 2271 struct rte_eth_xstat_name *xstats_names, 2272 __rte_unused unsigned int limit) 2273 { 2274 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2275 int count = 0; 2276 u16 i = 0, q_num; 2277 2278 if (xstats_names == NULL) 2279 return hinic_xstats_calc_num(nic_dev); 2280 2281 /* get pmd rxq stats */ 2282 for (q_num = 0; q_num < nic_dev->num_rq; q_num++) { 2283 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2284 snprintf(xstats_names[count].name, 2285 sizeof(xstats_names[count].name), 2286 "rxq%d_%s_pmd", 2287 q_num, hinic_rxq_stats_strings[i].name); 2288 count++; 2289 } 2290 } 2291 2292 /* get pmd txq stats */ 2293 for (q_num = 0; q_num < nic_dev->num_sq; q_num++) { 2294 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2295 snprintf(xstats_names[count].name, 2296 sizeof(xstats_names[count].name), 2297 "txq%d_%s_pmd", 2298 q_num, hinic_txq_stats_strings[i].name); 2299 count++; 2300 } 2301 } 2302 2303 /* get vport stats */ 2304 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2305 snprintf(xstats_names[count].name, 2306 sizeof(xstats_names[count].name), 2307 "%s", hinic_vport_stats_strings[i].name); 2308 count++; 2309 } 2310 2311 if (HINIC_IS_VF(nic_dev->hwdev)) 2312 return count; 2313 2314 /* get phy port stats */ 2315 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2316 snprintf(xstats_names[count].name, 2317 sizeof(xstats_names[count].name), 2318 "%s", hinic_phyport_stats_strings[i].name); 2319 count++; 2320 } 2321 2322 return count; 2323 } 2324 2325 /** 2326 * DPDK callback to set mac address 2327 * 2328 * @param dev 2329 * Pointer to Ethernet device structure. 2330 * @param addr 2331 * Pointer to mac address 2332 * @return 2333 * 0 on success, negative error value otherwise. 2334 */ 2335 static int hinic_set_mac_addr(struct rte_eth_dev *dev, 2336 struct rte_ether_addr *addr) 2337 { 2338 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2339 u16 func_id; 2340 int err; 2341 2342 func_id = hinic_global_func_id(nic_dev->hwdev); 2343 err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes, 2344 addr->addr_bytes, 0, func_id); 2345 if (err) 2346 return err; 2347 2348 rte_ether_addr_copy(addr, &nic_dev->default_addr); 2349 2350 PMD_DRV_LOG(INFO, "Set new mac address " RTE_ETHER_ADDR_PRT_FMT, 2351 RTE_ETHER_ADDR_BYTES(addr)); 2352 2353 return 0; 2354 } 2355 2356 /** 2357 * DPDK callback to remove a MAC address. 2358 * 2359 * @param dev 2360 * Pointer to Ethernet device structure. 2361 * @param index 2362 * MAC address index, should less than 128. 2363 */ 2364 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 2365 { 2366 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2367 u16 func_id; 2368 int ret; 2369 2370 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2371 PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range", 2372 index); 2373 return; 2374 } 2375 2376 func_id = hinic_global_func_id(nic_dev->hwdev); 2377 ret = hinic_del_mac(nic_dev->hwdev, 2378 dev->data->mac_addrs[index].addr_bytes, 0, func_id); 2379 if (ret) 2380 return; 2381 2382 memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); 2383 } 2384 2385 /** 2386 * DPDK callback to add a MAC address. 2387 * 2388 * @param dev 2389 * Pointer to Ethernet device structure. 2390 * @param mac_addr 2391 * Pointer to MAC address 2392 * @param index 2393 * MAC address index, should less than 128. 2394 * @param vmdq 2395 * VMDq pool index(not used). 2396 * 2397 * @return 2398 * 0 on success, negative error value otherwise. 2399 */ 2400 static int hinic_mac_addr_add(struct rte_eth_dev *dev, 2401 struct rte_ether_addr *mac_addr, uint32_t index, 2402 __rte_unused uint32_t vmdq) 2403 { 2404 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2405 unsigned int i; 2406 u16 func_id; 2407 int ret; 2408 2409 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2410 PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index); 2411 return -EINVAL; 2412 } 2413 2414 /* First, make sure this address isn't already configured. */ 2415 for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) { 2416 /* Skip this index, it's going to be reconfigured. */ 2417 if (i == index) 2418 continue; 2419 2420 if (memcmp(&dev->data->mac_addrs[i], 2421 mac_addr, sizeof(*mac_addr))) 2422 continue; 2423 2424 PMD_DRV_LOG(INFO, "MAC address already configured"); 2425 return -EADDRINUSE; 2426 } 2427 2428 func_id = hinic_global_func_id(nic_dev->hwdev); 2429 ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id); 2430 if (ret) 2431 return ret; 2432 2433 dev->data->mac_addrs[index] = *mac_addr; 2434 return 0; 2435 } 2436 2437 /** 2438 * DPDK callback to set multicast mac address 2439 * 2440 * @param dev 2441 * Pointer to Ethernet device structure. 2442 * @param mc_addr_set 2443 * Pointer to multicast mac address 2444 * @param nb_mc_addr 2445 * mc addr count 2446 * @return 2447 * 0 on success, negative error value otherwise. 2448 */ 2449 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, 2450 struct rte_ether_addr *mc_addr_set, 2451 uint32_t nb_mc_addr) 2452 { 2453 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2454 u16 func_id; 2455 int ret; 2456 u32 i; 2457 2458 func_id = hinic_global_func_id(nic_dev->hwdev); 2459 2460 /* delete old multi_cast addrs firstly */ 2461 hinic_delete_mc_addr_list(nic_dev); 2462 2463 if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS) 2464 goto allmulti; 2465 2466 for (i = 0; i < nb_mc_addr; i++) { 2467 ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes, 2468 0, func_id); 2469 /* if add mc addr failed, set all multi_cast */ 2470 if (ret) { 2471 hinic_delete_mc_addr_list(nic_dev); 2472 goto allmulti; 2473 } 2474 2475 rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]); 2476 } 2477 2478 return 0; 2479 2480 allmulti: 2481 hinic_dev_allmulticast_enable(dev); 2482 2483 return 0; 2484 } 2485 2486 /** 2487 * DPDK callback to get flow operations 2488 * 2489 * @param dev 2490 * Pointer to Ethernet device structure. 2491 * @param ops 2492 * Pointer to operation-specific structure. 2493 * 2494 * @return 2495 * 0 on success, negative error value otherwise. 2496 */ 2497 static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 2498 const struct rte_flow_ops **ops) 2499 { 2500 *ops = &hinic_flow_ops; 2501 return 0; 2502 } 2503 2504 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) 2505 { 2506 struct nic_pause_config pause_config = {0}; 2507 int err; 2508 2509 pause_config.auto_neg = 0; 2510 pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2511 pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2512 2513 err = hinic_set_pause_config(nic_dev->hwdev, pause_config); 2514 if (err) 2515 return err; 2516 2517 nic_dev->pause_set = true; 2518 nic_dev->nic_pause.auto_neg = pause_config.auto_neg; 2519 nic_dev->nic_pause.rx_pause = pause_config.rx_pause; 2520 nic_dev->nic_pause.tx_pause = pause_config.tx_pause; 2521 2522 return 0; 2523 } 2524 2525 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev) 2526 { 2527 u8 up_tc[HINIC_DCB_UP_MAX] = {0}; 2528 u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; 2529 u8 up_bw[HINIC_DCB_UP_MAX] = {0}; 2530 u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; 2531 u8 up_strict[HINIC_DCB_UP_MAX] = {0}; 2532 int i = 0; 2533 2534 pg_bw[0] = 100; 2535 for (i = 0; i < HINIC_DCB_UP_MAX; i++) 2536 up_bw[i] = 100; 2537 2538 return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, 2539 up_pgid, up_bw, up_strict); 2540 } 2541 2542 static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) 2543 { 2544 u8 default_cos = 0; 2545 u8 valid_cos_bitmap; 2546 u8 i; 2547 2548 valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap; 2549 if (!valid_cos_bitmap) { 2550 PMD_DRV_LOG(ERR, "PF has none cos to support\n"); 2551 return -EFAULT; 2552 } 2553 2554 for (i = 0; i < NR_MAX_COS; i++) { 2555 if (valid_cos_bitmap & BIT(i)) 2556 default_cos = i; /* Find max cos id as default cos */ 2557 } 2558 2559 *cos_id = default_cos; 2560 2561 return 0; 2562 } 2563 2564 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev) 2565 { 2566 u8 cos_id = 0; 2567 int err; 2568 2569 if (!HINIC_IS_VF(nic_dev->hwdev)) { 2570 err = hinic_pf_get_default_cos(nic_dev->hwdev, &cos_id); 2571 if (err) { 2572 PMD_DRV_LOG(ERR, "Get PF default cos failed, err: %d", 2573 err); 2574 return HINIC_ERROR; 2575 } 2576 } else { 2577 err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id); 2578 if (err) { 2579 PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d", 2580 err); 2581 return HINIC_ERROR; 2582 } 2583 } 2584 2585 nic_dev->default_cos = cos_id; 2586 2587 PMD_DRV_LOG(INFO, "Default cos %d", nic_dev->default_cos); 2588 2589 return 0; 2590 } 2591 2592 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) 2593 { 2594 int err; 2595 2596 err = hinic_init_default_cos(nic_dev); 2597 if (err) 2598 return err; 2599 2600 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2601 return 0; 2602 2603 /* Restore DCB configure to default status */ 2604 err = hinic_set_default_dcb_feature(nic_dev); 2605 if (err) 2606 return err; 2607 2608 /* Set pause enable, and up will disable pfc. */ 2609 err = hinic_set_default_pause_feature(nic_dev); 2610 if (err) 2611 return err; 2612 2613 err = hinic_reset_port_link_cfg(nic_dev->hwdev); 2614 if (err) 2615 return err; 2616 2617 err = hinic_set_link_status_follow(nic_dev->hwdev, 2618 HINIC_LINK_FOLLOW_PORT); 2619 if (err == HINIC_MGMT_CMD_UNSUPPORTED) 2620 PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status"); 2621 else if (err) 2622 return err; 2623 2624 return hinic_set_anti_attack(nic_dev->hwdev, true); 2625 } 2626 2627 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev) 2628 { 2629 struct hinic_board_info info = { 0 }; 2630 int rc; 2631 2632 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2633 return 0; 2634 2635 rc = hinic_get_board_info(nic_dev->hwdev, &info); 2636 if (rc) 2637 return rc; 2638 2639 return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK : 2640 HINIC_ERROR); 2641 } 2642 2643 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev) 2644 { 2645 nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name); 2646 if (nic_dev->cpy_mpool == NULL) { 2647 nic_dev->cpy_mpool = 2648 rte_pktmbuf_pool_create(nic_dev->proc_dev_name, 2649 HINIC_COPY_MEMPOOL_DEPTH, 2650 0, 0, 2651 HINIC_COPY_MBUF_SIZE, 2652 rte_socket_id()); 2653 if (!nic_dev->cpy_mpool) { 2654 PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s", 2655 rte_errno, nic_dev->proc_dev_name); 2656 return -ENOMEM; 2657 } 2658 } 2659 2660 return 0; 2661 } 2662 2663 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev) 2664 { 2665 if (nic_dev->cpy_mpool != NULL) 2666 rte_mempool_free(nic_dev->cpy_mpool); 2667 } 2668 2669 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2670 { 2671 u32 txq_size; 2672 u32 rxq_size; 2673 2674 /* allocate software txq array */ 2675 txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs); 2676 nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL); 2677 if (!nic_dev->txqs) { 2678 PMD_DRV_LOG(ERR, "Allocate txqs failed"); 2679 return -ENOMEM; 2680 } 2681 2682 /* allocate software rxq array */ 2683 rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs); 2684 nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL); 2685 if (!nic_dev->rxqs) { 2686 /* free txqs */ 2687 kfree(nic_dev->txqs); 2688 nic_dev->txqs = NULL; 2689 2690 PMD_DRV_LOG(ERR, "Allocate rxqs failed"); 2691 return -ENOMEM; 2692 } 2693 2694 return HINIC_OK; 2695 } 2696 2697 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2698 { 2699 kfree(nic_dev->txqs); 2700 nic_dev->txqs = NULL; 2701 2702 kfree(nic_dev->rxqs); 2703 nic_dev->rxqs = NULL; 2704 } 2705 2706 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) 2707 { 2708 struct hinic_nic_dev *nic_dev = 2709 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2710 int rc; 2711 2712 nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev), 2713 RTE_CACHE_LINE_SIZE); 2714 if (!nic_dev->hwdev) { 2715 PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s", 2716 eth_dev->data->name); 2717 return -ENOMEM; 2718 } 2719 nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev); 2720 2721 /* init osdep*/ 2722 rc = hinic_osdep_init(nic_dev->hwdev); 2723 if (rc) { 2724 PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s", 2725 eth_dev->data->name); 2726 goto init_osdep_fail; 2727 } 2728 2729 /* init_hwif */ 2730 rc = hinic_hwif_res_init(nic_dev->hwdev); 2731 if (rc) { 2732 PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s", 2733 eth_dev->data->name); 2734 goto init_hwif_fail; 2735 } 2736 2737 /* init_cfg_mgmt */ 2738 rc = init_cfg_mgmt(nic_dev->hwdev); 2739 if (rc) { 2740 PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s", 2741 eth_dev->data->name); 2742 goto init_cfgmgnt_fail; 2743 } 2744 2745 /* init_aeqs */ 2746 rc = hinic_comm_aeqs_init(nic_dev->hwdev); 2747 if (rc) { 2748 PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s", 2749 eth_dev->data->name); 2750 goto init_aeqs_fail; 2751 } 2752 2753 /* init_pf_to_mgnt */ 2754 rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev); 2755 if (rc) { 2756 PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s", 2757 eth_dev->data->name); 2758 goto init_pf_to_mgmt_fail; 2759 } 2760 2761 /* init mailbox */ 2762 rc = hinic_comm_func_to_func_init(nic_dev->hwdev); 2763 if (rc) { 2764 PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s", 2765 eth_dev->data->name); 2766 goto init_func_to_func_fail; 2767 } 2768 2769 rc = hinic_card_workmode_check(nic_dev); 2770 if (rc) { 2771 PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s", 2772 eth_dev->data->name); 2773 goto workmode_check_fail; 2774 } 2775 2776 /* do l2nic reset to make chip clear */ 2777 rc = hinic_l2nic_reset(nic_dev->hwdev); 2778 if (rc) { 2779 PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s", 2780 eth_dev->data->name); 2781 goto l2nic_reset_fail; 2782 } 2783 2784 /* init dma and aeq msix attribute table */ 2785 (void)hinic_init_attr_table(nic_dev->hwdev); 2786 2787 /* init_cmdqs */ 2788 rc = hinic_comm_cmdqs_init(nic_dev->hwdev); 2789 if (rc) { 2790 PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s", 2791 eth_dev->data->name); 2792 goto init_cmdq_fail; 2793 } 2794 2795 /* set hardware state active */ 2796 rc = hinic_activate_hwdev_state(nic_dev->hwdev); 2797 if (rc) { 2798 PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s", 2799 eth_dev->data->name); 2800 goto init_resources_state_fail; 2801 } 2802 2803 /* init_capability */ 2804 rc = hinic_init_capability(nic_dev->hwdev); 2805 if (rc) { 2806 PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s", 2807 eth_dev->data->name); 2808 goto init_cap_fail; 2809 } 2810 2811 /* get nic capability */ 2812 if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) { 2813 PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s", 2814 eth_dev->data->name); 2815 rc = -EINVAL; 2816 goto nic_check_fail; 2817 } 2818 2819 /* init root cla and function table */ 2820 rc = hinic_init_nicio(nic_dev->hwdev); 2821 if (rc) { 2822 PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s", 2823 eth_dev->data->name); 2824 goto init_nicio_fail; 2825 } 2826 2827 /* init_software_txrxq */ 2828 rc = hinic_init_sw_rxtxqs(nic_dev); 2829 if (rc) { 2830 PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s", 2831 eth_dev->data->name); 2832 goto init_sw_rxtxqs_fail; 2833 } 2834 2835 rc = hinic_copy_mempool_init(nic_dev); 2836 if (rc) { 2837 PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s", 2838 eth_dev->data->name); 2839 goto init_mpool_fail; 2840 } 2841 2842 /* set hardware feature to default status */ 2843 rc = hinic_set_default_hw_feature(nic_dev); 2844 if (rc) { 2845 PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s", 2846 eth_dev->data->name); 2847 goto set_default_hw_feature_fail; 2848 } 2849 2850 return 0; 2851 2852 set_default_hw_feature_fail: 2853 hinic_copy_mempool_uninit(nic_dev); 2854 2855 init_mpool_fail: 2856 hinic_deinit_sw_rxtxqs(nic_dev); 2857 2858 init_sw_rxtxqs_fail: 2859 hinic_deinit_nicio(nic_dev->hwdev); 2860 2861 nic_check_fail: 2862 init_nicio_fail: 2863 init_cap_fail: 2864 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2865 2866 init_resources_state_fail: 2867 hinic_comm_cmdqs_free(nic_dev->hwdev); 2868 2869 init_cmdq_fail: 2870 l2nic_reset_fail: 2871 workmode_check_fail: 2872 hinic_comm_func_to_func_free(nic_dev->hwdev); 2873 2874 init_func_to_func_fail: 2875 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2876 2877 init_pf_to_mgmt_fail: 2878 hinic_comm_aeqs_free(nic_dev->hwdev); 2879 2880 init_aeqs_fail: 2881 free_cfg_mgmt(nic_dev->hwdev); 2882 2883 init_cfgmgnt_fail: 2884 hinic_hwif_res_free(nic_dev->hwdev); 2885 2886 init_hwif_fail: 2887 hinic_osdep_deinit(nic_dev->hwdev); 2888 2889 init_osdep_fail: 2890 rte_free(nic_dev->hwdev); 2891 nic_dev->hwdev = NULL; 2892 2893 return rc; 2894 } 2895 2896 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) 2897 { 2898 struct hinic_nic_dev *nic_dev = 2899 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2900 2901 (void)hinic_set_link_status_follow(nic_dev->hwdev, 2902 HINIC_LINK_FOLLOW_DEFAULT); 2903 hinic_copy_mempool_uninit(nic_dev); 2904 hinic_deinit_sw_rxtxqs(nic_dev); 2905 hinic_deinit_nicio(nic_dev->hwdev); 2906 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2907 hinic_comm_cmdqs_free(nic_dev->hwdev); 2908 hinic_comm_func_to_func_free(nic_dev->hwdev); 2909 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2910 hinic_comm_aeqs_free(nic_dev->hwdev); 2911 free_cfg_mgmt(nic_dev->hwdev); 2912 hinic_hwif_res_free(nic_dev->hwdev); 2913 hinic_osdep_deinit(nic_dev->hwdev); 2914 rte_free(nic_dev->hwdev); 2915 nic_dev->hwdev = NULL; 2916 } 2917 2918 /** 2919 * DPDK callback to close the device. 2920 * 2921 * @param dev 2922 * Pointer to Ethernet device structure. 2923 */ 2924 static int hinic_dev_close(struct rte_eth_dev *dev) 2925 { 2926 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2927 int ret; 2928 2929 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2930 return 0; 2931 2932 if (rte_bit_relaxed_test_and_set32(HINIC_DEV_CLOSE, 2933 &nic_dev->dev_status)) { 2934 PMD_DRV_LOG(WARNING, "Device %s already closed", 2935 dev->data->name); 2936 return 0; 2937 } 2938 2939 /* stop device first */ 2940 ret = hinic_dev_stop(dev); 2941 2942 /* rx_cqe, rx_info */ 2943 hinic_free_all_rx_resources(dev); 2944 2945 /* tx_info */ 2946 hinic_free_all_tx_resources(dev); 2947 2948 /* free wq, pi_dma_addr */ 2949 hinic_free_all_rq(nic_dev); 2950 2951 /* free wq, db_addr */ 2952 hinic_free_all_sq(nic_dev); 2953 2954 /* deinit mac vlan tbl */ 2955 hinic_deinit_mac_addr(dev); 2956 hinic_remove_all_vlanid(dev); 2957 2958 /* disable hardware and uio interrupt */ 2959 hinic_disable_interrupt(dev); 2960 2961 /* destroy rx mode mutex */ 2962 hinic_mutex_destroy(&nic_dev->rx_mode_mutex); 2963 2964 /* deinit nic hardware device */ 2965 hinic_nic_dev_destroy(dev); 2966 2967 return ret; 2968 } 2969 2970 static const struct eth_dev_ops hinic_pmd_ops = { 2971 .dev_configure = hinic_dev_configure, 2972 .dev_infos_get = hinic_dev_infos_get, 2973 .fw_version_get = hinic_fw_version_get, 2974 .rx_queue_setup = hinic_rx_queue_setup, 2975 .tx_queue_setup = hinic_tx_queue_setup, 2976 .dev_start = hinic_dev_start, 2977 .dev_set_link_up = hinic_dev_set_link_up, 2978 .dev_set_link_down = hinic_dev_set_link_down, 2979 .link_update = hinic_link_update, 2980 .rx_queue_release = hinic_rx_queue_release, 2981 .tx_queue_release = hinic_tx_queue_release, 2982 .dev_stop = hinic_dev_stop, 2983 .dev_close = hinic_dev_close, 2984 .mtu_set = hinic_dev_set_mtu, 2985 .vlan_filter_set = hinic_vlan_filter_set, 2986 .vlan_offload_set = hinic_vlan_offload_set, 2987 .allmulticast_enable = hinic_dev_allmulticast_enable, 2988 .allmulticast_disable = hinic_dev_allmulticast_disable, 2989 .promiscuous_enable = hinic_dev_promiscuous_enable, 2990 .promiscuous_disable = hinic_dev_promiscuous_disable, 2991 .flow_ctrl_get = hinic_flow_ctrl_get, 2992 .flow_ctrl_set = hinic_flow_ctrl_set, 2993 .rss_hash_update = hinic_rss_hash_update, 2994 .rss_hash_conf_get = hinic_rss_conf_get, 2995 .reta_update = hinic_rss_indirtbl_update, 2996 .reta_query = hinic_rss_indirtbl_query, 2997 .stats_get = hinic_dev_stats_get, 2998 .stats_reset = hinic_dev_stats_reset, 2999 .xstats_get = hinic_dev_xstats_get, 3000 .xstats_reset = hinic_dev_xstats_reset, 3001 .xstats_get_names = hinic_dev_xstats_get_names, 3002 .rxq_info_get = hinic_rxq_info_get, 3003 .txq_info_get = hinic_txq_info_get, 3004 .mac_addr_set = hinic_set_mac_addr, 3005 .mac_addr_remove = hinic_mac_addr_remove, 3006 .mac_addr_add = hinic_mac_addr_add, 3007 .set_mc_addr_list = hinic_set_mc_addr_list, 3008 .flow_ops_get = hinic_dev_flow_ops_get, 3009 }; 3010 3011 static const struct eth_dev_ops hinic_pmd_vf_ops = { 3012 .dev_configure = hinic_dev_configure, 3013 .dev_infos_get = hinic_dev_infos_get, 3014 .fw_version_get = hinic_fw_version_get, 3015 .rx_queue_setup = hinic_rx_queue_setup, 3016 .tx_queue_setup = hinic_tx_queue_setup, 3017 .dev_start = hinic_dev_start, 3018 .link_update = hinic_link_update, 3019 .rx_queue_release = hinic_rx_queue_release, 3020 .tx_queue_release = hinic_tx_queue_release, 3021 .dev_stop = hinic_dev_stop, 3022 .dev_close = hinic_dev_close, 3023 .mtu_set = hinic_dev_set_mtu, 3024 .vlan_filter_set = hinic_vlan_filter_set, 3025 .vlan_offload_set = hinic_vlan_offload_set, 3026 .allmulticast_enable = hinic_dev_allmulticast_enable, 3027 .allmulticast_disable = hinic_dev_allmulticast_disable, 3028 .rss_hash_update = hinic_rss_hash_update, 3029 .rss_hash_conf_get = hinic_rss_conf_get, 3030 .reta_update = hinic_rss_indirtbl_update, 3031 .reta_query = hinic_rss_indirtbl_query, 3032 .stats_get = hinic_dev_stats_get, 3033 .stats_reset = hinic_dev_stats_reset, 3034 .xstats_get = hinic_dev_xstats_get, 3035 .xstats_reset = hinic_dev_xstats_reset, 3036 .xstats_get_names = hinic_dev_xstats_get_names, 3037 .rxq_info_get = hinic_rxq_info_get, 3038 .txq_info_get = hinic_txq_info_get, 3039 .mac_addr_set = hinic_set_mac_addr, 3040 .mac_addr_remove = hinic_mac_addr_remove, 3041 .mac_addr_add = hinic_mac_addr_add, 3042 .set_mc_addr_list = hinic_set_mc_addr_list, 3043 .flow_ops_get = hinic_dev_flow_ops_get, 3044 }; 3045 3046 static const struct eth_dev_ops hinic_dev_sec_ops = { 3047 .dev_infos_get = hinic_dev_infos_get, 3048 }; 3049 3050 static int hinic_func_init(struct rte_eth_dev *eth_dev) 3051 { 3052 struct rte_pci_device *pci_dev; 3053 struct rte_ether_addr *eth_addr; 3054 struct hinic_nic_dev *nic_dev; 3055 struct hinic_filter_info *filter_info; 3056 struct hinic_tcam_info *tcam_info; 3057 u32 mac_size; 3058 int rc; 3059 3060 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3061 3062 /* EAL is SECONDARY and eth_dev is already created */ 3063 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3064 eth_dev->dev_ops = &hinic_dev_sec_ops; 3065 PMD_DRV_LOG(INFO, "Initialize %s in secondary process", 3066 eth_dev->data->name); 3067 3068 return 0; 3069 } 3070 3071 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 3072 3073 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 3074 memset(nic_dev, 0, sizeof(*nic_dev)); 3075 3076 snprintf(nic_dev->proc_dev_name, 3077 sizeof(nic_dev->proc_dev_name), 3078 "hinic-%.4x:%.2x:%.2x.%x", 3079 pci_dev->addr.domain, pci_dev->addr.bus, 3080 pci_dev->addr.devid, pci_dev->addr.function); 3081 3082 /* alloc mac_addrs */ 3083 mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3084 eth_addr = rte_zmalloc("hinic_mac", mac_size, 0); 3085 if (!eth_addr) { 3086 PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s", 3087 eth_dev->data->name); 3088 rc = -ENOMEM; 3089 goto eth_addr_fail; 3090 } 3091 eth_dev->data->mac_addrs = eth_addr; 3092 3093 mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3094 nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0); 3095 if (!nic_dev->mc_list) { 3096 PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s", 3097 eth_dev->data->name); 3098 rc = -ENOMEM; 3099 goto mc_addr_fail; 3100 } 3101 3102 /* create hardware nic_device */ 3103 rc = hinic_nic_dev_create(eth_dev); 3104 if (rc) { 3105 PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s", 3106 eth_dev->data->name); 3107 goto create_nic_dev_fail; 3108 } 3109 3110 if (HINIC_IS_VF(nic_dev->hwdev)) 3111 eth_dev->dev_ops = &hinic_pmd_vf_ops; 3112 else 3113 eth_dev->dev_ops = &hinic_pmd_ops; 3114 3115 rc = hinic_init_mac_addr(eth_dev); 3116 if (rc) { 3117 PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s", 3118 eth_dev->data->name); 3119 goto init_mac_fail; 3120 } 3121 3122 /* register callback func to eal lib */ 3123 rc = rte_intr_callback_register(pci_dev->intr_handle, 3124 hinic_dev_interrupt_handler, 3125 (void *)eth_dev); 3126 if (rc) { 3127 PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s", 3128 eth_dev->data->name); 3129 goto reg_intr_cb_fail; 3130 } 3131 3132 /* enable uio/vfio intr/eventfd mapping */ 3133 rc = rte_intr_enable(pci_dev->intr_handle); 3134 if (rc) { 3135 PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s", 3136 eth_dev->data->name); 3137 goto enable_intr_fail; 3138 } 3139 rte_bit_relaxed_set32(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 3140 3141 hinic_mutex_init(&nic_dev->rx_mode_mutex, NULL); 3142 3143 /* initialize filter info */ 3144 filter_info = &nic_dev->filter; 3145 tcam_info = &nic_dev->tcam; 3146 memset(filter_info, 0, sizeof(struct hinic_filter_info)); 3147 memset(tcam_info, 0, sizeof(struct hinic_tcam_info)); 3148 /* initialize 5tuple filter list */ 3149 TAILQ_INIT(&filter_info->fivetuple_list); 3150 TAILQ_INIT(&tcam_info->tcam_list); 3151 TAILQ_INIT(&nic_dev->filter_ntuple_list); 3152 TAILQ_INIT(&nic_dev->filter_ethertype_list); 3153 TAILQ_INIT(&nic_dev->filter_fdir_rule_list); 3154 TAILQ_INIT(&nic_dev->hinic_flow_list); 3155 3156 rte_bit_relaxed_set32(HINIC_DEV_INIT, &nic_dev->dev_status); 3157 PMD_DRV_LOG(INFO, "Initialize %s in primary successfully", 3158 eth_dev->data->name); 3159 3160 return 0; 3161 3162 enable_intr_fail: 3163 (void)rte_intr_callback_unregister(pci_dev->intr_handle, 3164 hinic_dev_interrupt_handler, 3165 (void *)eth_dev); 3166 3167 reg_intr_cb_fail: 3168 hinic_deinit_mac_addr(eth_dev); 3169 3170 init_mac_fail: 3171 eth_dev->dev_ops = NULL; 3172 hinic_nic_dev_destroy(eth_dev); 3173 3174 create_nic_dev_fail: 3175 rte_free(nic_dev->mc_list); 3176 nic_dev->mc_list = NULL; 3177 3178 mc_addr_fail: 3179 rte_free(eth_addr); 3180 eth_dev->data->mac_addrs = NULL; 3181 3182 eth_addr_fail: 3183 PMD_DRV_LOG(ERR, "Initialize %s in primary failed", 3184 eth_dev->data->name); 3185 return rc; 3186 } 3187 3188 static int hinic_dev_init(struct rte_eth_dev *eth_dev) 3189 { 3190 struct rte_pci_device *pci_dev; 3191 3192 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3193 3194 PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process", 3195 pci_dev->addr.domain, pci_dev->addr.bus, 3196 pci_dev->addr.devid, pci_dev->addr.function, 3197 (rte_eal_process_type() == RTE_PROC_PRIMARY) ? 3198 "primary" : "secondary"); 3199 3200 /* rte_eth_dev rx_burst and tx_burst */ 3201 eth_dev->rx_pkt_burst = hinic_recv_pkts; 3202 eth_dev->tx_pkt_burst = hinic_xmit_pkts; 3203 3204 return hinic_func_init(eth_dev); 3205 } 3206 3207 static int hinic_dev_uninit(struct rte_eth_dev *dev) 3208 { 3209 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3210 return 0; 3211 3212 hinic_dev_close(dev); 3213 3214 return HINIC_OK; 3215 } 3216 3217 static struct rte_pci_id pci_id_hinic_map[] = { 3218 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) }, 3219 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) }, 3220 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) }, 3221 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) }, 3222 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) }, 3223 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) }, 3224 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) }, 3225 {.vendor_id = 0}, 3226 }; 3227 3228 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3229 struct rte_pci_device *pci_dev) 3230 { 3231 return rte_eth_dev_pci_generic_probe(pci_dev, 3232 sizeof(struct hinic_nic_dev), hinic_dev_init); 3233 } 3234 3235 static int hinic_pci_remove(struct rte_pci_device *pci_dev) 3236 { 3237 return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit); 3238 } 3239 3240 static struct rte_pci_driver rte_hinic_pmd = { 3241 .id_table = pci_id_hinic_map, 3242 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 3243 .probe = hinic_pci_probe, 3244 .remove = hinic_pci_remove, 3245 }; 3246 3247 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd); 3248 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map); 3249 RTE_LOG_REGISTER_DEFAULT(hinic_logtype, INFO); 3250