1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Huawei Technologies Co., Ltd 3 */ 4 5 #include <rte_pci.h> 6 #include <rte_bus_pci.h> 7 #include <rte_ethdev_pci.h> 8 #include <rte_mbuf.h> 9 #include <rte_malloc.h> 10 #include <rte_memcpy.h> 11 #include <rte_mempool.h> 12 #include <rte_errno.h> 13 #include <rte_ether.h> 14 15 #include "base/hinic_compat.h" 16 #include "base/hinic_pmd_hwdev.h" 17 #include "base/hinic_pmd_hwif.h" 18 #include "base/hinic_pmd_wq.h" 19 #include "base/hinic_pmd_cfg.h" 20 #include "base/hinic_pmd_mgmt.h" 21 #include "base/hinic_pmd_cmdq.h" 22 #include "base/hinic_pmd_niccfg.h" 23 #include "base/hinic_pmd_nicio.h" 24 #include "base/hinic_pmd_mbox.h" 25 #include "hinic_pmd_ethdev.h" 26 #include "hinic_pmd_tx.h" 27 #include "hinic_pmd_rx.h" 28 29 /* Vendor ID used by Huawei devices */ 30 #define HINIC_HUAWEI_VENDOR_ID 0x19E5 31 32 /* Hinic devices */ 33 #define HINIC_DEV_ID_PRD 0x1822 34 #define HINIC_DEV_ID_VF 0x375E 35 #define HINIC_DEV_ID_VF_HV 0x379E 36 37 /* Mezz card for Blade Server */ 38 #define HINIC_DEV_ID_MEZZ_25GE 0x0210 39 #define HINIC_DEV_ID_MEZZ_100GE 0x0205 40 41 /* 2*25G and 2*100G card */ 42 #define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 43 #define HINIC_DEV_ID_1822_100GE 0x0200 44 45 #define HINIC_SERVICE_MODE_NIC 2 46 47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 48 49 #define DEFAULT_BASE_COS 4 50 #define NR_MAX_COS 8 51 52 #define HINIC_MIN_RX_BUF_SIZE 1024 53 #define HINIC_MAX_UC_MAC_ADDRS 128 54 #define HINIC_MAX_MC_MAC_ADDRS 2048 55 56 #define HINIC_DEFAULT_BURST_SIZE 32 57 #define HINIC_DEFAULT_NB_QUEUES 1 58 #define HINIC_DEFAULT_RING_SIZE 1024 59 #define HINIC_MAX_LRO_SIZE 65536 60 61 /* 62 * vlan_id is a 12 bit number. 63 * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. 64 * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. 65 * The higher 7 bit val specifies VFTA array index. 66 */ 67 #define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) 68 #define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5) 69 70 #define HINIC_VLAN_FILTER_EN (1U << 0) 71 72 #define HINIC_MTU_TO_PKTLEN(mtu) \ 73 ((mtu) + ETH_HLEN + ETH_CRC_LEN) 74 75 #define HINIC_PKTLEN_TO_MTU(pktlen) \ 76 ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) 77 78 /* lro numer limit for one packet */ 79 #define HINIC_LRO_WQE_NUM_DEFAULT 8 80 81 /* Driver-specific log messages type */ 82 int hinic_logtype; 83 84 struct hinic_xstats_name_off { 85 char name[RTE_ETH_XSTATS_NAME_SIZE]; 86 u32 offset; 87 }; 88 89 #define HINIC_FUNC_STAT(_stat_item) { \ 90 .name = #_stat_item, \ 91 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 92 } 93 94 #define HINIC_PORT_STAT(_stat_item) { \ 95 .name = #_stat_item, \ 96 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 97 } 98 99 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = { 100 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 101 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 102 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 103 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 104 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 105 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 106 107 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 108 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 109 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 110 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 111 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 112 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 113 114 HINIC_FUNC_STAT(tx_discard_vport), 115 HINIC_FUNC_STAT(rx_discard_vport), 116 HINIC_FUNC_STAT(tx_err_vport), 117 HINIC_FUNC_STAT(rx_err_vport), 118 }; 119 120 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \ 121 sizeof(hinic_vport_stats_strings[0])) 122 123 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = { 124 HINIC_PORT_STAT(mac_rx_total_pkt_num), 125 HINIC_PORT_STAT(mac_rx_total_oct_num), 126 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 127 HINIC_PORT_STAT(mac_rx_bad_oct_num), 128 HINIC_PORT_STAT(mac_rx_good_pkt_num), 129 HINIC_PORT_STAT(mac_rx_good_oct_num), 130 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 131 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 132 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 133 HINIC_PORT_STAT(mac_tx_total_pkt_num), 134 HINIC_PORT_STAT(mac_tx_total_oct_num), 135 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 136 HINIC_PORT_STAT(mac_tx_bad_oct_num), 137 HINIC_PORT_STAT(mac_tx_good_pkt_num), 138 HINIC_PORT_STAT(mac_tx_good_oct_num), 139 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 140 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 141 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 142 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 143 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 144 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 145 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 146 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 147 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 148 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 149 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 150 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 151 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 152 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 153 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 154 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 155 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 156 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 157 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 158 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 159 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 160 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 161 HINIC_PORT_STAT(mac_rx_mac_pause_num), 162 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 163 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 164 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 165 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 166 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 167 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 168 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 169 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 170 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 171 HINIC_PORT_STAT(mac_rx_mac_control_pkt_num), 172 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 173 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 174 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 175 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 176 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 177 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 178 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 179 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 180 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 181 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 182 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 183 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 184 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 185 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 186 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 187 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 188 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 189 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 190 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 191 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 192 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 193 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 194 HINIC_PORT_STAT(mac_trans_jabber_pkt_num), 195 HINIC_PORT_STAT(mac_tx_mac_pause_num), 196 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 197 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 198 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 199 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 200 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 201 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 202 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 203 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 204 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 205 HINIC_PORT_STAT(mac_tx_mac_control_pkt_num), 206 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 207 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 208 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 209 }; 210 211 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \ 212 sizeof(hinic_phyport_stats_strings[0])) 213 214 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = { 215 {"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)}, 216 {"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)}, 217 }; 218 219 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \ 220 sizeof(hinic_rxq_stats_strings[0])) 221 222 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = { 223 {"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)}, 224 {"offload_errors", offsetof(struct hinic_txq_stats, off_errs)}, 225 {"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)}, 226 {"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)}, 227 {"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)}, 228 {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)}, 229 {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)}, 230 }; 231 232 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \ 233 sizeof(hinic_txq_stats_strings[0])) 234 235 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) 236 { 237 if (HINIC_IS_VF(nic_dev->hwdev)) { 238 return (HINIC_VPORT_XSTATS_NUM + 239 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 240 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 241 } else { 242 return (HINIC_VPORT_XSTATS_NUM + 243 HINIC_PHYPORT_XSTATS_NUM + 244 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 245 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 246 } 247 } 248 249 static const struct rte_eth_desc_lim hinic_rx_desc_lim = { 250 .nb_max = HINIC_MAX_QUEUE_DEPTH, 251 .nb_min = HINIC_MIN_QUEUE_DEPTH, 252 .nb_align = HINIC_RXD_ALIGN, 253 }; 254 255 static const struct rte_eth_desc_lim hinic_tx_desc_lim = { 256 .nb_max = HINIC_MAX_QUEUE_DEPTH, 257 .nb_min = HINIC_MIN_QUEUE_DEPTH, 258 .nb_align = HINIC_TXD_ALIGN, 259 }; 260 261 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); 262 263 /** 264 * Interrupt handler triggered by NIC for handling 265 * specific event. 266 * 267 * @param: The address of parameter (struct rte_eth_dev *) regsitered before. 268 */ 269 static void hinic_dev_interrupt_handler(void *param) 270 { 271 struct rte_eth_dev *dev = param; 272 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 273 274 if (!hinic_test_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) { 275 PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d", 276 nic_dev->proc_dev_name, dev->data->port_id); 277 return; 278 } 279 280 /* aeq0 msg handler */ 281 hinic_dev_handle_aeq_event(nic_dev->hwdev, param); 282 } 283 284 /** 285 * Ethernet device configuration. 286 * 287 * Prepare the driver for a given number of TX and RX queues, mtu size 288 * and configure RSS. 289 * 290 * @param dev 291 * Pointer to Ethernet device structure. 292 * 293 * @return 294 * 0 on success, negative error value otherwise. 295 */ 296 static int hinic_dev_configure(struct rte_eth_dev *dev) 297 { 298 struct hinic_nic_dev *nic_dev; 299 struct hinic_nic_io *nic_io; 300 int err; 301 302 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 303 nic_io = nic_dev->hwdev->nic_io; 304 305 nic_dev->num_sq = dev->data->nb_tx_queues; 306 nic_dev->num_rq = dev->data->nb_rx_queues; 307 308 nic_io->num_sqs = dev->data->nb_tx_queues; 309 nic_io->num_rqs = dev->data->nb_rx_queues; 310 311 /* queue pair is max_num(sq, rq) */ 312 nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ? 313 nic_dev->num_sq : nic_dev->num_rq; 314 nic_io->num_qps = nic_dev->num_qps; 315 316 if (nic_dev->num_qps > nic_io->max_qps) { 317 PMD_DRV_LOG(ERR, 318 "Queue number out of range, get queue_num:%d, max_queue_num:%d", 319 nic_dev->num_qps, nic_io->max_qps); 320 return -EINVAL; 321 } 322 323 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 324 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 325 326 /* mtu size is 256~9600 */ 327 if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE || 328 dev->data->dev_conf.rxmode.max_rx_pkt_len > 329 HINIC_MAX_JUMBO_FRAME_SIZE) { 330 PMD_DRV_LOG(ERR, 331 "Max rx pkt len out of range, get max_rx_pkt_len:%d, " 332 "expect between %d and %d", 333 dev->data->dev_conf.rxmode.max_rx_pkt_len, 334 HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE); 335 return -EINVAL; 336 } 337 338 nic_dev->mtu_size = 339 HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len); 340 341 /* rss template */ 342 err = hinic_config_mq_mode(dev, TRUE); 343 if (err) { 344 PMD_DRV_LOG(ERR, "Config multi-queue failed"); 345 return err; 346 } 347 348 /* init vlan offoad */ 349 err = hinic_vlan_offload_set(dev, 350 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); 351 if (err) { 352 PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed"); 353 (void)hinic_config_mq_mode(dev, FALSE); 354 return err; 355 } 356 357 /*clear fdir filter flag in function table*/ 358 hinic_free_fdir_filter(nic_dev); 359 360 return HINIC_OK; 361 } 362 363 /** 364 * DPDK callback to create the receive queue. 365 * 366 * @param dev 367 * Pointer to Ethernet device structure. 368 * @param queue_idx 369 * RX queue index. 370 * @param nb_desc 371 * Number of descriptors for receive queue. 372 * @param socket_id 373 * NUMA socket on which memory must be allocated. 374 * @param rx_conf 375 * Thresholds parameters (unused_). 376 * @param mp 377 * Memory pool for buffer allocations. 378 * 379 * @return 380 * 0 on success, negative error value otherwise. 381 */ 382 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 383 uint16_t nb_desc, unsigned int socket_id, 384 __rte_unused const struct rte_eth_rxconf *rx_conf, 385 struct rte_mempool *mp) 386 { 387 int rc; 388 struct hinic_nic_dev *nic_dev; 389 struct hinic_hwdev *hwdev; 390 struct hinic_rxq *rxq; 391 u16 rq_depth, rx_free_thresh; 392 u32 buf_size; 393 394 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 395 hwdev = nic_dev->hwdev; 396 397 /* queue depth must be power of 2, otherwise will be aligned up */ 398 rq_depth = (nb_desc & (nb_desc - 1)) ? 399 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 400 401 /* 402 * Validate number of receive descriptors. 403 * It must not exceed hardware maximum and minimum. 404 */ 405 if (rq_depth > HINIC_MAX_QUEUE_DEPTH || 406 rq_depth < HINIC_MIN_QUEUE_DEPTH) { 407 PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 408 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 409 (int)nb_desc, (int)rq_depth, 410 (int)dev->data->port_id, (int)queue_idx); 411 return -EINVAL; 412 } 413 414 /* 415 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh 416 * descriptors are used or if the number of descriptors required 417 * to transmit a packet is greater than the number of free RX 418 * descriptors. 419 * The following constraints must be satisfied: 420 * rx_free_thresh must be greater than 0. 421 * rx_free_thresh must be less than the size of the ring minus 1. 422 * When set to zero use default values. 423 */ 424 rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ? 425 rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH); 426 if (rx_free_thresh >= (rq_depth - 1)) { 427 PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)", 428 (unsigned int)rx_free_thresh, 429 (int)dev->data->port_id, 430 (int)queue_idx); 431 return -EINVAL; 432 } 433 434 rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq), 435 RTE_CACHE_LINE_SIZE, socket_id); 436 if (!rxq) { 437 PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s", 438 queue_idx, dev->data->name); 439 return -ENOMEM; 440 } 441 nic_dev->rxqs[queue_idx] = rxq; 442 443 /* alloc rx sq hw wqepage*/ 444 rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id); 445 if (rc) { 446 PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d", 447 queue_idx, dev->data->name, rq_depth); 448 goto ceate_rq_fail; 449 } 450 451 /* mbuf pool must be assigned before setup rx resources */ 452 rxq->mb_pool = mp; 453 454 rc = 455 hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) - 456 RTE_PKTMBUF_HEADROOM, &buf_size); 457 if (rc) { 458 PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s", 459 dev->data->name); 460 goto adjust_bufsize_fail; 461 } 462 463 /* rx queue info, rearm control */ 464 rxq->wq = &hwdev->nic_io->rq_wq[queue_idx]; 465 rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr; 466 rxq->nic_dev = nic_dev; 467 rxq->q_id = queue_idx; 468 rxq->q_depth = rq_depth; 469 rxq->buf_len = (u16)buf_size; 470 rxq->rx_free_thresh = rx_free_thresh; 471 rxq->socket_id = socket_id; 472 473 /* the last point cant do mbuf rearm in bulk */ 474 rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh; 475 476 /* device port identifier */ 477 rxq->port_id = dev->data->port_id; 478 479 /* alloc rx_cqe and prepare rq_wqe */ 480 rc = hinic_setup_rx_resources(rxq); 481 if (rc) { 482 PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s", 483 queue_idx, dev->data->name); 484 goto setup_rx_res_err; 485 } 486 487 /* record nic_dev rxq in rte_eth rx_queues */ 488 dev->data->rx_queues[queue_idx] = rxq; 489 490 return 0; 491 492 setup_rx_res_err: 493 adjust_bufsize_fail: 494 hinic_destroy_rq(hwdev, queue_idx); 495 496 ceate_rq_fail: 497 rte_free(rxq); 498 499 return rc; 500 } 501 502 static void hinic_reset_rx_queue(struct rte_eth_dev *dev) 503 { 504 struct hinic_rxq *rxq; 505 struct hinic_nic_dev *nic_dev; 506 int q_id = 0; 507 508 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 509 510 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { 511 rxq = dev->data->rx_queues[q_id]; 512 513 rxq->wq->cons_idx = 0; 514 rxq->wq->prod_idx = 0; 515 rxq->wq->delta = rxq->q_depth; 516 rxq->wq->mask = rxq->q_depth - 1; 517 518 /* alloc mbuf to rq */ 519 hinic_rx_alloc_pkts(rxq); 520 } 521 } 522 523 /** 524 * DPDK callback to configure the transmit queue. 525 * 526 * @param dev 527 * Pointer to Ethernet device structure. 528 * @param queue_idx 529 * Transmit queue index. 530 * @param nb_desc 531 * Number of descriptors for transmit queue. 532 * @param socket_id 533 * NUMA socket on which memory must be allocated. 534 * @param tx_conf 535 * Tx queue configuration parameters. 536 * 537 * @return 538 * 0 on success, negative error value otherwise. 539 */ 540 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 541 uint16_t nb_desc, unsigned int socket_id, 542 __rte_unused const struct rte_eth_txconf *tx_conf) 543 { 544 int rc; 545 struct hinic_nic_dev *nic_dev; 546 struct hinic_hwdev *hwdev; 547 struct hinic_txq *txq; 548 u16 sq_depth, tx_free_thresh; 549 550 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 551 hwdev = nic_dev->hwdev; 552 553 /* queue depth must be power of 2, otherwise will be aligned up */ 554 sq_depth = (nb_desc & (nb_desc - 1)) ? 555 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 556 557 /* 558 * Validate number of transmit descriptors. 559 * It must not exceed hardware maximum and minimum. 560 */ 561 if (sq_depth > HINIC_MAX_QUEUE_DEPTH || 562 sq_depth < HINIC_MIN_QUEUE_DEPTH) { 563 PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 564 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 565 (int)nb_desc, (int)sq_depth, 566 (int)dev->data->port_id, (int)queue_idx); 567 return -EINVAL; 568 } 569 570 /* 571 * The TX descriptor ring will be cleaned after txq->tx_free_thresh 572 * descriptors are used or if the number of descriptors required 573 * to transmit a packet is greater than the number of free TX 574 * descriptors. 575 * The following constraints must be satisfied: 576 * tx_free_thresh must be greater than 0. 577 * tx_free_thresh must be less than the size of the ring minus 1. 578 * When set to zero use default values. 579 */ 580 tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ? 581 tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH); 582 if (tx_free_thresh >= (sq_depth - 1)) { 583 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)", 584 (unsigned int)tx_free_thresh, (int)dev->data->port_id, 585 (int)queue_idx); 586 return -EINVAL; 587 } 588 589 txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq), 590 RTE_CACHE_LINE_SIZE, socket_id); 591 if (!txq) { 592 PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s", 593 queue_idx, dev->data->name); 594 return -ENOMEM; 595 } 596 nic_dev->txqs[queue_idx] = txq; 597 598 /* alloc tx sq hw wqepage */ 599 rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id); 600 if (rc) { 601 PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d", 602 queue_idx, dev->data->name, sq_depth); 603 goto create_sq_fail; 604 } 605 606 txq->q_id = queue_idx; 607 txq->q_depth = sq_depth; 608 txq->port_id = dev->data->port_id; 609 txq->tx_free_thresh = tx_free_thresh; 610 txq->nic_dev = nic_dev; 611 txq->wq = &hwdev->nic_io->sq_wq[queue_idx]; 612 txq->sq = &hwdev->nic_io->qps[queue_idx].sq; 613 txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr; 614 txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq); 615 txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) - 616 sizeof(struct hinic_sq_bufdesc); 617 txq->cos = nic_dev->default_cos; 618 txq->socket_id = socket_id; 619 620 /* alloc software txinfo */ 621 rc = hinic_setup_tx_resources(txq); 622 if (rc) { 623 PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s", 624 queue_idx, dev->data->name); 625 goto setup_tx_res_fail; 626 } 627 628 /* record nic_dev txq in rte_eth tx_queues */ 629 dev->data->tx_queues[queue_idx] = txq; 630 631 return HINIC_OK; 632 633 setup_tx_res_fail: 634 hinic_destroy_sq(hwdev, queue_idx); 635 636 create_sq_fail: 637 rte_free(txq); 638 639 return rc; 640 } 641 642 static void hinic_reset_tx_queue(struct rte_eth_dev *dev) 643 { 644 struct hinic_nic_dev *nic_dev; 645 struct hinic_txq *txq; 646 struct hinic_nic_io *nic_io; 647 struct hinic_hwdev *hwdev; 648 volatile u32 *ci_addr; 649 int q_id = 0; 650 651 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 652 hwdev = nic_dev->hwdev; 653 nic_io = hwdev->nic_io; 654 655 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { 656 txq = dev->data->tx_queues[q_id]; 657 658 txq->wq->cons_idx = 0; 659 txq->wq->prod_idx = 0; 660 txq->wq->delta = txq->q_depth; 661 txq->wq->mask = txq->q_depth - 1; 662 663 /* clear hardware ci */ 664 ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, 665 q_id); 666 *ci_addr = 0; 667 } 668 } 669 670 /** 671 * Get link speed from NIC. 672 * 673 * @param dev 674 * Pointer to Ethernet device structure. 675 * @param speed_capa 676 * Pointer to link speed structure. 677 */ 678 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) 679 { 680 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 681 u32 supported_link, advertised_link; 682 int err; 683 684 #define HINIC_LINK_MODE_SUPPORT_1G (1U << HINIC_GE_BASE_KX) 685 686 #define HINIC_LINK_MODE_SUPPORT_10G (1U << HINIC_10GE_BASE_KR) 687 688 #define HINIC_LINK_MODE_SUPPORT_25G ((1U << HINIC_25GE_BASE_KR_S) | \ 689 (1U << HINIC_25GE_BASE_CR_S) | \ 690 (1U << HINIC_25GE_BASE_KR) | \ 691 (1U << HINIC_25GE_BASE_CR)) 692 693 #define HINIC_LINK_MODE_SUPPORT_40G ((1U << HINIC_40GE_BASE_KR4) | \ 694 (1U << HINIC_40GE_BASE_CR4)) 695 696 #define HINIC_LINK_MODE_SUPPORT_100G ((1U << HINIC_100GE_BASE_KR4) | \ 697 (1U << HINIC_100GE_BASE_CR4)) 698 699 err = hinic_get_link_mode(nic_dev->hwdev, 700 &supported_link, &advertised_link); 701 if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || 702 advertised_link == HINIC_SUPPORTED_UNKNOWN) { 703 PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u", 704 nic_dev->proc_dev_name, dev->data->port_id); 705 } else { 706 *speed_capa = 0; 707 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G)) 708 *speed_capa |= ETH_LINK_SPEED_1G; 709 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G)) 710 *speed_capa |= ETH_LINK_SPEED_10G; 711 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G)) 712 *speed_capa |= ETH_LINK_SPEED_25G; 713 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G)) 714 *speed_capa |= ETH_LINK_SPEED_40G; 715 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G)) 716 *speed_capa |= ETH_LINK_SPEED_100G; 717 } 718 } 719 720 /** 721 * DPDK callback to get information about the device. 722 * 723 * @param dev 724 * Pointer to Ethernet device structure. 725 * @param info 726 * Pointer to Info structure output buffer. 727 */ 728 static int 729 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 730 { 731 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 732 733 info->max_rx_queues = nic_dev->nic_cap.max_rqs; 734 info->max_tx_queues = nic_dev->nic_cap.max_sqs; 735 info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; 736 info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; 737 info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS; 738 info->min_mtu = HINIC_MIN_MTU_SIZE; 739 info->max_mtu = HINIC_MAX_MTU_SIZE; 740 info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE; 741 742 hinic_get_speed_capa(dev, &info->speed_capa); 743 info->rx_queue_offload_capa = 0; 744 info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 745 DEV_RX_OFFLOAD_IPV4_CKSUM | 746 DEV_RX_OFFLOAD_UDP_CKSUM | 747 DEV_RX_OFFLOAD_TCP_CKSUM | 748 DEV_RX_OFFLOAD_VLAN_FILTER | 749 DEV_RX_OFFLOAD_SCATTER | 750 DEV_RX_OFFLOAD_JUMBO_FRAME | 751 DEV_RX_OFFLOAD_TCP_LRO | 752 DEV_RX_OFFLOAD_RSS_HASH; 753 754 info->tx_queue_offload_capa = 0; 755 info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 756 DEV_TX_OFFLOAD_IPV4_CKSUM | 757 DEV_TX_OFFLOAD_UDP_CKSUM | 758 DEV_TX_OFFLOAD_TCP_CKSUM | 759 DEV_TX_OFFLOAD_SCTP_CKSUM | 760 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 761 DEV_TX_OFFLOAD_TCP_TSO | 762 DEV_TX_OFFLOAD_MULTI_SEGS; 763 764 info->hash_key_size = HINIC_RSS_KEY_SIZE; 765 info->reta_size = HINIC_RSS_INDIR_SIZE; 766 info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL; 767 info->rx_desc_lim = hinic_rx_desc_lim; 768 info->tx_desc_lim = hinic_tx_desc_lim; 769 770 /* Driver-preferred Rx/Tx parameters */ 771 info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 772 info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 773 info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 774 info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 775 info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 776 info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 777 778 return 0; 779 } 780 781 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 782 size_t fw_size) 783 { 784 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 785 char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 786 int err; 787 788 err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver); 789 if (err) { 790 PMD_DRV_LOG(ERR, "Failed to get fw version"); 791 return -EINVAL; 792 } 793 794 if (fw_size < strlen(fw_ver) + 1) 795 return (strlen(fw_ver) + 1); 796 797 snprintf(fw_version, fw_size, "%s", fw_ver); 798 799 return 0; 800 } 801 802 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl) 803 { 804 int err; 805 806 err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl); 807 if (err) { 808 PMD_DRV_LOG(ERR, "Failed to set rx mode"); 809 return -EINVAL; 810 } 811 nic_dev->rx_mode_status = rx_mode_ctrl; 812 813 return 0; 814 } 815 816 static int hinic_rxtx_configure(struct rte_eth_dev *dev) 817 { 818 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 819 int err; 820 821 /* rx configure, if rss enable, need to init default configuration */ 822 err = hinic_rx_configure(dev); 823 if (err) { 824 PMD_DRV_LOG(ERR, "Configure rss failed"); 825 return err; 826 } 827 828 /* rx mode init */ 829 err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE); 830 if (err) { 831 PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed", 832 HINIC_DEFAULT_RX_MODE); 833 goto set_rx_mode_fail; 834 } 835 836 return HINIC_OK; 837 838 set_rx_mode_fail: 839 hinic_rx_remove_configure(dev); 840 841 return err; 842 } 843 844 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev) 845 { 846 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 847 848 (void)hinic_config_rx_mode(nic_dev, 0); 849 hinic_rx_remove_configure(dev); 850 } 851 852 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev, 853 struct rte_eth_link *link) 854 { 855 int rc; 856 u8 port_link_status = 0; 857 struct nic_port_info port_link_info; 858 struct hinic_hwdev *nic_hwdev = nic_dev->hwdev; 859 uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M, 860 ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G, 861 ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G, 862 ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G}; 863 864 rc = hinic_get_link_status(nic_hwdev, &port_link_status); 865 if (rc) 866 return rc; 867 868 if (!port_link_status) { 869 link->link_status = ETH_LINK_DOWN; 870 link->link_speed = 0; 871 link->link_duplex = ETH_LINK_HALF_DUPLEX; 872 link->link_autoneg = ETH_LINK_FIXED; 873 return HINIC_OK; 874 } 875 876 memset(&port_link_info, 0, sizeof(port_link_info)); 877 rc = hinic_get_port_info(nic_hwdev, &port_link_info); 878 if (rc) 879 return rc; 880 881 link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX]; 882 link->link_duplex = port_link_info.duplex; 883 link->link_autoneg = port_link_info.autoneg_state; 884 link->link_status = port_link_status; 885 886 return HINIC_OK; 887 } 888 889 /** 890 * DPDK callback to retrieve physical link information. 891 * 892 * @param dev 893 * Pointer to Ethernet device structure. 894 * @param wait_to_complete 895 * Wait for request completion. 896 * 897 * @return 898 * 0 link status changed, -1 link status not changed 899 */ 900 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete) 901 { 902 #define CHECK_INTERVAL 10 /* 10ms */ 903 #define MAX_REPEAT_TIME 100 /* 1s (100 * 10ms) in total */ 904 int rc = HINIC_OK; 905 struct rte_eth_link link; 906 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 907 unsigned int rep_cnt = MAX_REPEAT_TIME; 908 909 memset(&link, 0, sizeof(link)); 910 do { 911 /* Get link status information from hardware */ 912 rc = hinic_priv_get_dev_link_status(nic_dev, &link); 913 if (rc != HINIC_OK) { 914 link.link_speed = ETH_SPEED_NUM_NONE; 915 link.link_duplex = ETH_LINK_FULL_DUPLEX; 916 PMD_DRV_LOG(ERR, "Get link status failed"); 917 goto out; 918 } 919 920 if (!wait_to_complete || link.link_status) 921 break; 922 923 rte_delay_ms(CHECK_INTERVAL); 924 } while (rep_cnt--); 925 926 out: 927 rc = rte_eth_linkstatus_set(dev, &link); 928 return rc; 929 } 930 931 /** 932 * DPDK callback to bring the link UP. 933 * 934 * @param dev 935 * Pointer to Ethernet device structure. 936 * 937 * @return 938 * 0 on success, negative errno value on failure. 939 */ 940 static int hinic_dev_set_link_up(struct rte_eth_dev *dev) 941 { 942 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 943 int ret; 944 945 ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true); 946 if (ret) { 947 PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d", 948 nic_dev->proc_dev_name, dev->data->port_id); 949 return ret; 950 } 951 952 /* link status follow phy port status, up will open pma */ 953 ret = hinic_set_port_enable(nic_dev->hwdev, true); 954 if (ret) 955 PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d", 956 nic_dev->proc_dev_name, dev->data->port_id); 957 958 return ret; 959 } 960 961 /** 962 * DPDK callback to bring the link DOWN. 963 * 964 * @param dev 965 * Pointer to Ethernet device structure. 966 * 967 * @return 968 * 0 on success, negative errno value on failure. 969 */ 970 static int hinic_dev_set_link_down(struct rte_eth_dev *dev) 971 { 972 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 973 int ret; 974 975 ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false); 976 if (ret) { 977 PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d", 978 nic_dev->proc_dev_name, dev->data->port_id); 979 return ret; 980 } 981 982 /* link status follow phy port status, up will close pma */ 983 ret = hinic_set_port_enable(nic_dev->hwdev, false); 984 if (ret) 985 PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d", 986 nic_dev->proc_dev_name, dev->data->port_id); 987 988 return ret; 989 } 990 991 /** 992 * DPDK callback to start the device. 993 * 994 * @param dev 995 * Pointer to Ethernet device structure. 996 * 997 * @return 998 * 0 on success, negative errno value on failure. 999 */ 1000 static int hinic_dev_start(struct rte_eth_dev *dev) 1001 { 1002 int rc; 1003 char *name; 1004 struct hinic_nic_dev *nic_dev; 1005 1006 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1007 name = dev->data->name; 1008 1009 /* reset rx and tx queue */ 1010 hinic_reset_rx_queue(dev); 1011 hinic_reset_tx_queue(dev); 1012 1013 /* get func rx buf size */ 1014 hinic_get_func_rx_buf_size(nic_dev); 1015 1016 /* init txq and rxq context */ 1017 rc = hinic_init_qp_ctxts(nic_dev->hwdev); 1018 if (rc) { 1019 PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s", 1020 name); 1021 goto init_qp_fail; 1022 } 1023 1024 /* rss template */ 1025 rc = hinic_config_mq_mode(dev, TRUE); 1026 if (rc) { 1027 PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s", 1028 name); 1029 goto cfg_mq_mode_fail; 1030 } 1031 1032 /* set default mtu */ 1033 rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size); 1034 if (rc) { 1035 PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s", 1036 nic_dev->mtu_size, name); 1037 goto set_mtu_fail; 1038 } 1039 1040 /* configure rss rx_mode and other rx or tx default feature */ 1041 rc = hinic_rxtx_configure(dev); 1042 if (rc) { 1043 PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s", 1044 name); 1045 goto cfg_rxtx_fail; 1046 } 1047 1048 /* reactive pf status, so that uP report asyn event */ 1049 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); 1050 1051 /* open virtual port and ready to start packet receiving */ 1052 rc = hinic_set_vport_enable(nic_dev->hwdev, true); 1053 if (rc) { 1054 PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name); 1055 goto en_vport_fail; 1056 } 1057 1058 /* open physical port and start packet receiving */ 1059 rc = hinic_set_port_enable(nic_dev->hwdev, true); 1060 if (rc) { 1061 PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s", 1062 name); 1063 goto en_port_fail; 1064 } 1065 1066 /* update eth_dev link status */ 1067 if (dev->data->dev_conf.intr_conf.lsc != 0) 1068 (void)hinic_link_update(dev, 0); 1069 1070 hinic_set_bit(HINIC_DEV_START, &nic_dev->dev_status); 1071 1072 return 0; 1073 1074 en_port_fail: 1075 (void)hinic_set_vport_enable(nic_dev->hwdev, false); 1076 1077 en_vport_fail: 1078 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT); 1079 1080 /* Flush tx && rx chip resources in case of set vport fake fail */ 1081 (void)hinic_flush_qp_res(nic_dev->hwdev); 1082 rte_delay_ms(100); 1083 1084 hinic_remove_rxtx_configure(dev); 1085 1086 cfg_rxtx_fail: 1087 set_mtu_fail: 1088 cfg_mq_mode_fail: 1089 hinic_free_qp_ctxts(nic_dev->hwdev); 1090 1091 init_qp_fail: 1092 hinic_free_all_rx_mbuf(dev); 1093 hinic_free_all_tx_mbuf(dev); 1094 1095 return rc; 1096 } 1097 1098 /** 1099 * DPDK callback to release the receive queue. 1100 * 1101 * @param queue 1102 * Generic receive queue pointer. 1103 */ 1104 static void hinic_rx_queue_release(void *queue) 1105 { 1106 struct hinic_rxq *rxq = queue; 1107 struct hinic_nic_dev *nic_dev; 1108 1109 if (!rxq) { 1110 PMD_DRV_LOG(WARNING, "Rxq is null when release"); 1111 return; 1112 } 1113 nic_dev = rxq->nic_dev; 1114 1115 /* free rxq_pkt mbuf */ 1116 hinic_free_all_rx_mbufs(rxq); 1117 1118 /* free rxq_cqe, rxq_info */ 1119 hinic_free_rx_resources(rxq); 1120 1121 /* free root rq wq */ 1122 hinic_destroy_rq(nic_dev->hwdev, rxq->q_id); 1123 1124 nic_dev->rxqs[rxq->q_id] = NULL; 1125 1126 /* free rxq */ 1127 rte_free(rxq); 1128 } 1129 1130 /** 1131 * DPDK callback to release the transmit queue. 1132 * 1133 * @param queue 1134 * Generic transmit queue pointer. 1135 */ 1136 static void hinic_tx_queue_release(void *queue) 1137 { 1138 struct hinic_txq *txq = queue; 1139 struct hinic_nic_dev *nic_dev; 1140 1141 if (!txq) { 1142 PMD_DRV_LOG(WARNING, "Txq is null when release"); 1143 return; 1144 } 1145 nic_dev = txq->nic_dev; 1146 1147 /* free txq_pkt mbuf */ 1148 hinic_free_all_tx_mbufs(txq); 1149 1150 /* free txq_info */ 1151 hinic_free_tx_resources(txq); 1152 1153 /* free root sq wq */ 1154 hinic_destroy_sq(nic_dev->hwdev, txq->q_id); 1155 nic_dev->txqs[txq->q_id] = NULL; 1156 1157 /* free txq */ 1158 rte_free(txq); 1159 } 1160 1161 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev) 1162 { 1163 u16 q_id; 1164 1165 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) 1166 hinic_destroy_rq(nic_dev->hwdev, q_id); 1167 } 1168 1169 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev) 1170 { 1171 u16 q_id; 1172 1173 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) 1174 hinic_destroy_sq(nic_dev->hwdev, q_id); 1175 } 1176 1177 /** 1178 * DPDK callback to stop the device. 1179 * 1180 * @param dev 1181 * Pointer to Ethernet device structure. 1182 */ 1183 static void hinic_dev_stop(struct rte_eth_dev *dev) 1184 { 1185 int rc; 1186 char *name; 1187 uint16_t port_id; 1188 struct hinic_nic_dev *nic_dev; 1189 struct rte_eth_link link; 1190 1191 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1192 name = dev->data->name; 1193 port_id = dev->data->port_id; 1194 1195 if (!hinic_test_and_clear_bit(HINIC_DEV_START, &nic_dev->dev_status)) { 1196 PMD_DRV_LOG(INFO, "Device %s already stopped", name); 1197 return; 1198 } 1199 1200 /* just stop phy port and vport */ 1201 rc = hinic_set_port_enable(nic_dev->hwdev, false); 1202 if (rc) 1203 PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d", 1204 rc, name, port_id); 1205 1206 rc = hinic_set_vport_enable(nic_dev->hwdev, false); 1207 if (rc) 1208 PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d", 1209 rc, name, port_id); 1210 1211 /* Clear recorded link status */ 1212 memset(&link, 0, sizeof(link)); 1213 (void)rte_eth_linkstatus_set(dev, &link); 1214 1215 /* flush pending io request */ 1216 rc = hinic_rx_tx_flush(nic_dev->hwdev); 1217 if (rc) 1218 PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d", 1219 rc, name, port_id); 1220 1221 /* clean rss table and rx_mode */ 1222 hinic_remove_rxtx_configure(dev); 1223 1224 /* clean root context */ 1225 hinic_free_qp_ctxts(nic_dev->hwdev); 1226 1227 hinic_destroy_fdir_filter(dev); 1228 1229 /* free mbuf */ 1230 hinic_free_all_rx_mbuf(dev); 1231 hinic_free_all_tx_mbuf(dev); 1232 } 1233 1234 static void hinic_disable_interrupt(struct rte_eth_dev *dev) 1235 { 1236 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1237 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1238 int ret, retries = 0; 1239 1240 hinic_clear_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 1241 1242 /* disable msix interrupt in hardware */ 1243 hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE); 1244 1245 /* disable rte interrupt */ 1246 ret = rte_intr_disable(&pci_dev->intr_handle); 1247 if (ret) 1248 PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret); 1249 1250 do { 1251 ret = 1252 rte_intr_callback_unregister(&pci_dev->intr_handle, 1253 hinic_dev_interrupt_handler, dev); 1254 if (ret >= 0) { 1255 break; 1256 } else if (ret == -EAGAIN) { 1257 rte_delay_ms(100); 1258 retries++; 1259 } else { 1260 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", 1261 ret); 1262 break; 1263 } 1264 } while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES); 1265 1266 if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES) 1267 PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries", 1268 retries); 1269 } 1270 1271 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable) 1272 { 1273 u32 rx_mode_ctrl = nic_dev->rx_mode_status; 1274 1275 if (enable) 1276 rx_mode_ctrl |= HINIC_RX_MODE_PROMISC; 1277 else 1278 rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC); 1279 1280 return hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1281 } 1282 1283 /** 1284 * DPDK callback to get device statistics. 1285 * 1286 * @param dev 1287 * Pointer to Ethernet device structure. 1288 * @param stats 1289 * Stats structure output buffer. 1290 * 1291 * @return 1292 * 0 on success and stats is filled, 1293 * negative error value otherwise. 1294 */ 1295 static int 1296 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1297 { 1298 int i, err, q_num; 1299 u64 rx_discards_pmd = 0; 1300 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1301 struct hinic_vport_stats vport_stats; 1302 struct hinic_rxq *rxq = NULL; 1303 struct hinic_rxq_stats rxq_stats; 1304 struct hinic_txq *txq = NULL; 1305 struct hinic_txq_stats txq_stats; 1306 1307 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 1308 if (err) { 1309 PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s", 1310 nic_dev->proc_dev_name); 1311 return err; 1312 } 1313 1314 /* rx queue stats */ 1315 q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1316 nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1317 for (i = 0; i < q_num; i++) { 1318 rxq = nic_dev->rxqs[i]; 1319 hinic_rxq_get_stats(rxq, &rxq_stats); 1320 stats->q_ipackets[i] = rxq_stats.packets; 1321 stats->q_ibytes[i] = rxq_stats.bytes; 1322 stats->q_errors[i] = rxq_stats.rx_discards; 1323 1324 stats->ierrors += rxq_stats.errors; 1325 rx_discards_pmd += rxq_stats.rx_discards; 1326 dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf; 1327 } 1328 1329 /* tx queue stats */ 1330 q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1331 nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1332 for (i = 0; i < q_num; i++) { 1333 txq = nic_dev->txqs[i]; 1334 hinic_txq_get_stats(txq, &txq_stats); 1335 stats->q_opackets[i] = txq_stats.packets; 1336 stats->q_obytes[i] = txq_stats.bytes; 1337 stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs); 1338 } 1339 1340 /* vport stats */ 1341 stats->oerrors += vport_stats.tx_discard_vport; 1342 1343 stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd; 1344 1345 stats->ipackets = (vport_stats.rx_unicast_pkts_vport + 1346 vport_stats.rx_multicast_pkts_vport + 1347 vport_stats.rx_broadcast_pkts_vport - 1348 rx_discards_pmd); 1349 1350 stats->opackets = (vport_stats.tx_unicast_pkts_vport + 1351 vport_stats.tx_multicast_pkts_vport + 1352 vport_stats.tx_broadcast_pkts_vport); 1353 1354 stats->ibytes = (vport_stats.rx_unicast_bytes_vport + 1355 vport_stats.rx_multicast_bytes_vport + 1356 vport_stats.rx_broadcast_bytes_vport); 1357 1358 stats->obytes = (vport_stats.tx_unicast_bytes_vport + 1359 vport_stats.tx_multicast_bytes_vport + 1360 vport_stats.tx_broadcast_bytes_vport); 1361 return 0; 1362 } 1363 1364 /** 1365 * DPDK callback to clear device statistics. 1366 * 1367 * @param dev 1368 * Pointer to Ethernet device structure. 1369 */ 1370 static int hinic_dev_stats_reset(struct rte_eth_dev *dev) 1371 { 1372 int qid; 1373 struct hinic_rxq *rxq = NULL; 1374 struct hinic_txq *txq = NULL; 1375 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1376 int ret; 1377 1378 ret = hinic_clear_vport_stats(nic_dev->hwdev); 1379 if (ret != 0) 1380 return ret; 1381 1382 for (qid = 0; qid < nic_dev->num_rq; qid++) { 1383 rxq = nic_dev->rxqs[qid]; 1384 hinic_rxq_stats_reset(rxq); 1385 } 1386 1387 for (qid = 0; qid < nic_dev->num_sq; qid++) { 1388 txq = nic_dev->txqs[qid]; 1389 hinic_txq_stats_reset(txq); 1390 } 1391 1392 return 0; 1393 } 1394 1395 /** 1396 * DPDK callback to clear device extended statistics. 1397 * 1398 * @param dev 1399 * Pointer to Ethernet device structure. 1400 */ 1401 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev) 1402 { 1403 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1404 int ret; 1405 1406 ret = hinic_dev_stats_reset(dev); 1407 if (ret != 0) 1408 return ret; 1409 1410 if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) { 1411 ret = hinic_clear_phy_port_stats(nic_dev->hwdev); 1412 if (ret != 0) 1413 return ret; 1414 } 1415 1416 return 0; 1417 } 1418 1419 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr) 1420 { 1421 uint64_t random_value; 1422 1423 /* Set Organizationally Unique Identifier (OUI) prefix */ 1424 mac_addr->addr_bytes[0] = 0x00; 1425 mac_addr->addr_bytes[1] = 0x09; 1426 mac_addr->addr_bytes[2] = 0xC0; 1427 /* Force indication of locally assigned MAC address. */ 1428 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1429 /* Generate the last 3 bytes of the MAC address with a random number. */ 1430 random_value = rte_rand(); 1431 memcpy(&mac_addr->addr_bytes[3], &random_value, 3); 1432 } 1433 1434 /** 1435 * Init mac_vlan table in NIC. 1436 * 1437 * @param dev 1438 * Pointer to Ethernet device structure. 1439 * 1440 * @return 1441 * 0 on success and stats is filled, 1442 * negative error value otherwise. 1443 */ 1444 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev) 1445 { 1446 struct hinic_nic_dev *nic_dev = 1447 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1448 uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; 1449 u16 func_id = 0; 1450 int rc = 0; 1451 1452 rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes); 1453 if (rc) 1454 return rc; 1455 1456 rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes, 1457 ð_dev->data->mac_addrs[0]); 1458 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[0])) 1459 hinic_gen_random_mac_addr(ð_dev->data->mac_addrs[0]); 1460 1461 func_id = hinic_global_func_id(nic_dev->hwdev); 1462 rc = hinic_set_mac(nic_dev->hwdev, 1463 eth_dev->data->mac_addrs[0].addr_bytes, 1464 0, func_id); 1465 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1466 return rc; 1467 1468 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 1469 &nic_dev->default_addr); 1470 1471 return 0; 1472 } 1473 1474 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev) 1475 { 1476 u16 func_id; 1477 u32 i; 1478 1479 func_id = hinic_global_func_id(nic_dev->hwdev); 1480 1481 for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) { 1482 if (rte_is_zero_ether_addr(&nic_dev->mc_list[i])) 1483 break; 1484 1485 hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes, 1486 0, func_id); 1487 memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr)); 1488 } 1489 } 1490 1491 /** 1492 * Deinit mac_vlan table in NIC. 1493 * 1494 * @param dev 1495 * Pointer to Ethernet device structure. 1496 * 1497 * @return 1498 * 0 on success and stats is filled, 1499 * negative error value otherwise. 1500 */ 1501 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) 1502 { 1503 struct hinic_nic_dev *nic_dev = 1504 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1505 u16 func_id = 0; 1506 int rc; 1507 int i; 1508 1509 func_id = hinic_global_func_id(nic_dev->hwdev); 1510 1511 for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) { 1512 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[i])) 1513 continue; 1514 1515 rc = hinic_del_mac(nic_dev->hwdev, 1516 eth_dev->data->mac_addrs[i].addr_bytes, 1517 0, func_id); 1518 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1519 PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", 1520 eth_dev->data->name); 1521 1522 memset(ð_dev->data->mac_addrs[i], 0, 1523 sizeof(struct rte_ether_addr)); 1524 } 1525 1526 /* delete multicast mac addrs */ 1527 hinic_delete_mc_addr_list(nic_dev); 1528 } 1529 1530 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1531 { 1532 int ret = 0; 1533 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1534 1535 PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d", 1536 dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu)); 1537 1538 if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) { 1539 PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d", 1540 mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE); 1541 return -EINVAL; 1542 } 1543 1544 ret = hinic_set_port_mtu(nic_dev->hwdev, mtu); 1545 if (ret) { 1546 PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret); 1547 return ret; 1548 } 1549 1550 /* update max frame size */ 1551 dev->data->dev_conf.rxmode.max_rx_pkt_len = HINIC_MTU_TO_PKTLEN(mtu); 1552 nic_dev->mtu_size = mtu; 1553 1554 return ret; 1555 } 1556 1557 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, 1558 u16 vlan_id, bool on) 1559 { 1560 u32 vid_idx, vid_bit; 1561 1562 vid_idx = HINIC_VFTA_IDX(vlan_id); 1563 vid_bit = HINIC_VFTA_BIT(vlan_id); 1564 1565 if (on) 1566 nic_dev->vfta[vid_idx] |= vid_bit; 1567 else 1568 nic_dev->vfta[vid_idx] &= ~vid_bit; 1569 } 1570 1571 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev, 1572 uint16_t vlan_id) 1573 { 1574 u32 vid_idx, vid_bit; 1575 1576 vid_idx = HINIC_VFTA_IDX(vlan_id); 1577 vid_bit = HINIC_VFTA_BIT(vlan_id); 1578 1579 return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE; 1580 } 1581 1582 /** 1583 * DPDK callback to set vlan filter. 1584 * 1585 * @param dev 1586 * Pointer to Ethernet device structure. 1587 * @param vlan_id 1588 * vlan id is used to filter vlan packets 1589 * @param enable 1590 * enable disable or enable vlan filter function 1591 */ 1592 static int hinic_vlan_filter_set(struct rte_eth_dev *dev, 1593 uint16_t vlan_id, int enable) 1594 { 1595 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1596 int err = 0; 1597 u16 func_id; 1598 1599 if (vlan_id > RTE_ETHER_MAX_VLAN_ID) 1600 return -EINVAL; 1601 1602 func_id = hinic_global_func_id(nic_dev->hwdev); 1603 1604 if (enable) { 1605 /* If vlanid is already set, just return */ 1606 if (hinic_find_vlan_filter(nic_dev, vlan_id)) { 1607 PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s", 1608 vlan_id, nic_dev->proc_dev_name); 1609 return 0; 1610 } 1611 1612 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1613 func_id, TRUE); 1614 } else { 1615 /* If vlanid can't be found, just return */ 1616 if (!hinic_find_vlan_filter(nic_dev, vlan_id)) { 1617 PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s", 1618 vlan_id, nic_dev->proc_dev_name); 1619 return 0; 1620 } 1621 1622 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1623 func_id, FALSE); 1624 } 1625 1626 if (err) { 1627 PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d", 1628 enable ? "Add" : "Remove", func_id, vlan_id, err); 1629 return err; 1630 } 1631 1632 hinic_store_vlan_filter(nic_dev, vlan_id, enable); 1633 1634 PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s", 1635 enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name); 1636 return 0; 1637 } 1638 1639 /** 1640 * DPDK callback to enable or disable vlan offload. 1641 * 1642 * @param dev 1643 * Pointer to Ethernet device structure. 1644 * @param mask 1645 * Definitions used for VLAN setting 1646 */ 1647 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1648 { 1649 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1650 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1651 bool on; 1652 int err; 1653 1654 /* Enable or disable VLAN filter */ 1655 if (mask & ETH_VLAN_FILTER_MASK) { 1656 on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ? 1657 TRUE : FALSE; 1658 err = hinic_config_vlan_filter(nic_dev->hwdev, on); 1659 if (err == HINIC_MGMT_CMD_UNSUPPORTED) { 1660 PMD_DRV_LOG(WARNING, 1661 "Current matching version does not support vlan filter configuration, device: %s, port_id: %d", 1662 nic_dev->proc_dev_name, dev->data->port_id); 1663 } else if (err) { 1664 PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d", 1665 on ? "enable" : "disable", 1666 nic_dev->proc_dev_name, 1667 dev->data->port_id, err); 1668 return err; 1669 } 1670 1671 PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d", 1672 on ? "Enable" : "Disable", 1673 nic_dev->proc_dev_name, dev->data->port_id); 1674 } 1675 1676 /* Enable or disable VLAN stripping */ 1677 if (mask & ETH_VLAN_STRIP_MASK) { 1678 on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ? 1679 TRUE : FALSE; 1680 err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on); 1681 if (err) { 1682 PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d", 1683 on ? "enable" : "disable", 1684 nic_dev->proc_dev_name, 1685 dev->data->port_id, err); 1686 return err; 1687 } 1688 1689 PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d", 1690 on ? "Enable" : "Disable", 1691 nic_dev->proc_dev_name, dev->data->port_id); 1692 } 1693 1694 if (mask & ETH_VLAN_EXTEND_MASK) { 1695 PMD_DRV_LOG(ERR, "Don't support vlan qinq, device: %s, port_id: %d", 1696 nic_dev->proc_dev_name, dev->data->port_id); 1697 return -ENOTSUP; 1698 } 1699 1700 return 0; 1701 } 1702 1703 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) 1704 { 1705 struct hinic_nic_dev *nic_dev = 1706 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1707 u16 func_id; 1708 int i; 1709 1710 func_id = hinic_global_func_id(nic_dev->hwdev); 1711 for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) { 1712 /* If can't find it, continue */ 1713 if (!hinic_find_vlan_filter(nic_dev, i)) 1714 continue; 1715 1716 (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE); 1717 hinic_store_vlan_filter(nic_dev, i, false); 1718 } 1719 } 1720 1721 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev, 1722 bool enable) 1723 { 1724 u32 rx_mode_ctrl = nic_dev->rx_mode_status; 1725 1726 if (enable) 1727 rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL; 1728 else 1729 rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL); 1730 1731 return hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1732 } 1733 1734 /** 1735 * DPDK callback to enable allmulticast mode. 1736 * 1737 * @param dev 1738 * Pointer to Ethernet device structure. 1739 * 1740 * @return 1741 * 0 on success, 1742 * negative error value otherwise. 1743 */ 1744 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev) 1745 { 1746 int ret = HINIC_OK; 1747 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1748 1749 ret = hinic_set_dev_allmulticast(nic_dev, true); 1750 if (ret) { 1751 PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret); 1752 return ret; 1753 } 1754 1755 PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d", 1756 nic_dev->proc_dev_name, dev->data->port_id); 1757 return 0; 1758 } 1759 1760 /** 1761 * DPDK callback to disable allmulticast mode. 1762 * 1763 * @param dev 1764 * Pointer to Ethernet device structure. 1765 * 1766 * @return 1767 * 0 on success, 1768 * negative error value otherwise. 1769 */ 1770 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev) 1771 { 1772 int ret = HINIC_OK; 1773 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1774 1775 ret = hinic_set_dev_allmulticast(nic_dev, false); 1776 if (ret) { 1777 PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret); 1778 return ret; 1779 } 1780 1781 PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d", 1782 nic_dev->proc_dev_name, dev->data->port_id); 1783 return 0; 1784 } 1785 1786 /** 1787 * DPDK callback to enable promiscuous mode. 1788 * 1789 * @param dev 1790 * Pointer to Ethernet device structure. 1791 * 1792 * @return 1793 * 0 on success, 1794 * negative error value otherwise. 1795 */ 1796 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev) 1797 { 1798 int rc = HINIC_OK; 1799 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1800 1801 PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1802 nic_dev->proc_dev_name, dev->data->port_id, 1803 dev->data->promiscuous); 1804 1805 rc = hinic_set_dev_promiscuous(nic_dev, true); 1806 if (rc) 1807 PMD_DRV_LOG(ERR, "Enable promiscuous failed"); 1808 1809 return rc; 1810 } 1811 1812 /** 1813 * DPDK callback to disable promiscuous mode. 1814 * 1815 * @param dev 1816 * Pointer to Ethernet device structure. 1817 * 1818 * @return 1819 * 0 on success, 1820 * negative error value otherwise. 1821 */ 1822 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev) 1823 { 1824 int rc = HINIC_OK; 1825 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1826 1827 PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1828 nic_dev->proc_dev_name, dev->data->port_id, 1829 dev->data->promiscuous); 1830 1831 rc = hinic_set_dev_promiscuous(nic_dev, false); 1832 if (rc) 1833 PMD_DRV_LOG(ERR, "Disable promiscuous failed"); 1834 1835 return rc; 1836 } 1837 1838 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev, 1839 struct rte_eth_fc_conf *fc_conf) 1840 { 1841 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1842 struct nic_pause_config nic_pause; 1843 int err; 1844 1845 memset(&nic_pause, 0, sizeof(nic_pause)); 1846 1847 err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); 1848 if (err) 1849 return err; 1850 1851 if (nic_dev->pause_set || !nic_pause.auto_neg) { 1852 nic_pause.rx_pause = nic_dev->nic_pause.rx_pause; 1853 nic_pause.tx_pause = nic_dev->nic_pause.tx_pause; 1854 } 1855 1856 fc_conf->autoneg = nic_pause.auto_neg; 1857 1858 if (nic_pause.tx_pause && nic_pause.rx_pause) 1859 fc_conf->mode = RTE_FC_FULL; 1860 else if (nic_pause.tx_pause) 1861 fc_conf->mode = RTE_FC_TX_PAUSE; 1862 else if (nic_pause.rx_pause) 1863 fc_conf->mode = RTE_FC_RX_PAUSE; 1864 else 1865 fc_conf->mode = RTE_FC_NONE; 1866 1867 return 0; 1868 } 1869 1870 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev, 1871 struct rte_eth_fc_conf *fc_conf) 1872 { 1873 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1874 struct nic_pause_config nic_pause; 1875 int err; 1876 1877 nic_pause.auto_neg = fc_conf->autoneg; 1878 1879 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1880 (fc_conf->mode & RTE_FC_TX_PAUSE)) 1881 nic_pause.tx_pause = true; 1882 else 1883 nic_pause.tx_pause = false; 1884 1885 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1886 (fc_conf->mode & RTE_FC_RX_PAUSE)) 1887 nic_pause.rx_pause = true; 1888 else 1889 nic_pause.rx_pause = false; 1890 1891 err = hinic_set_pause_config(nic_dev->hwdev, nic_pause); 1892 if (err) 1893 return err; 1894 1895 nic_dev->pause_set = true; 1896 nic_dev->nic_pause.auto_neg = nic_pause.auto_neg; 1897 nic_dev->nic_pause.rx_pause = nic_pause.rx_pause; 1898 nic_dev->nic_pause.tx_pause = nic_pause.tx_pause; 1899 1900 PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n", 1901 nic_pause.tx_pause ? "on" : "off", 1902 nic_pause.rx_pause ? "on" : "off", 1903 nic_pause.auto_neg ? "on" : "off"); 1904 1905 return 0; 1906 } 1907 1908 /** 1909 * DPDK callback to update the RSS hash key and RSS hash type. 1910 * 1911 * @param dev 1912 * Pointer to Ethernet device structure. 1913 * @param rss_conf 1914 * RSS configuration data. 1915 * 1916 * @return 1917 * 0 on success, negative error value otherwise. 1918 */ 1919 static int hinic_rss_hash_update(struct rte_eth_dev *dev, 1920 struct rte_eth_rss_conf *rss_conf) 1921 { 1922 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1923 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 1924 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 1925 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 1926 u64 rss_hf = rss_conf->rss_hf; 1927 struct nic_rss_type rss_type = {0}; 1928 int err = 0; 1929 1930 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { 1931 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 1932 return HINIC_OK; 1933 } 1934 1935 if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) { 1936 PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d", 1937 rss_conf->rss_key_len); 1938 return HINIC_ERROR; 1939 } 1940 1941 if (rss_conf->rss_key) { 1942 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len); 1943 err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, 1944 hashkey); 1945 if (err) { 1946 PMD_DRV_LOG(ERR, "Set rss template table failed"); 1947 goto disable_rss; 1948 } 1949 } 1950 1951 rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0; 1952 rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0; 1953 rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0; 1954 rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0; 1955 rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0; 1956 rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0; 1957 rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0; 1958 rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0; 1959 1960 err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type); 1961 if (err) { 1962 PMD_DRV_LOG(ERR, "Set rss type table failed"); 1963 goto disable_rss; 1964 } 1965 1966 return 0; 1967 1968 disable_rss: 1969 memset(prio_tc, 0, sizeof(prio_tc)); 1970 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 1971 return err; 1972 } 1973 1974 /** 1975 * DPDK callback to get the RSS hash configuration. 1976 * 1977 * @param dev 1978 * Pointer to Ethernet device structure. 1979 * @param rss_conf 1980 * RSS configuration data. 1981 * 1982 * @return 1983 * 0 on success, negative error value otherwise. 1984 */ 1985 static int hinic_rss_conf_get(struct rte_eth_dev *dev, 1986 struct rte_eth_rss_conf *rss_conf) 1987 { 1988 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1989 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 1990 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 1991 struct nic_rss_type rss_type = {0}; 1992 int err; 1993 1994 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { 1995 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 1996 return HINIC_ERROR; 1997 } 1998 1999 err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey); 2000 if (err) 2001 return err; 2002 2003 if (rss_conf->rss_key && 2004 rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) { 2005 memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey)); 2006 rss_conf->rss_key_len = sizeof(hashkey); 2007 } 2008 2009 err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type); 2010 if (err) 2011 return err; 2012 2013 rss_conf->rss_hf = 0; 2014 rss_conf->rss_hf |= rss_type.ipv4 ? 2015 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0; 2016 rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0; 2017 rss_conf->rss_hf |= rss_type.ipv6 ? 2018 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0; 2019 rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0; 2020 rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0; 2021 rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0; 2022 rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0; 2023 rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0; 2024 2025 return HINIC_OK; 2026 } 2027 2028 /** 2029 * DPDK callback to update the RETA indirection table. 2030 * 2031 * @param dev 2032 * Pointer to Ethernet device structure. 2033 * @param reta_conf 2034 * Pointer to RETA configuration structure array. 2035 * @param reta_size 2036 * Size of the RETA table. 2037 * 2038 * @return 2039 * 0 on success, negative error value otherwise. 2040 */ 2041 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev, 2042 struct rte_eth_rss_reta_entry64 *reta_conf, 2043 uint16_t reta_size) 2044 { 2045 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2046 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2047 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 2048 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2049 int err = 0; 2050 u16 i = 0; 2051 u16 idx, shift; 2052 2053 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) 2054 return HINIC_OK; 2055 2056 if (reta_size != NIC_RSS_INDIR_SIZE) { 2057 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2058 return HINIC_ERROR; 2059 } 2060 2061 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2062 if (err) 2063 return err; 2064 2065 /* update rss indir_tbl */ 2066 for (i = 0; i < reta_size; i++) { 2067 idx = i / RTE_RETA_GROUP_SIZE; 2068 shift = i % RTE_RETA_GROUP_SIZE; 2069 if (reta_conf[idx].mask & (1ULL << shift)) 2070 indirtbl[i] = reta_conf[idx].reta[shift]; 2071 } 2072 2073 for (i = 0 ; i < reta_size; i++) { 2074 if (indirtbl[i] >= nic_dev->num_rq) { 2075 PMD_DRV_LOG(ERR, "Invalid reta entry, index: %d, num_rq: %d", 2076 i, nic_dev->num_rq); 2077 goto disable_rss; 2078 } 2079 } 2080 2081 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2082 if (err) 2083 goto disable_rss; 2084 2085 nic_dev->rss_indir_flag = true; 2086 2087 return 0; 2088 2089 disable_rss: 2090 memset(prio_tc, 0, sizeof(prio_tc)); 2091 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 2092 2093 return HINIC_ERROR; 2094 } 2095 2096 2097 /** 2098 * DPDK callback to get the RETA indirection table. 2099 * 2100 * @param dev 2101 * Pointer to Ethernet device structure. 2102 * @param reta_conf 2103 * Pointer to RETA configuration structure array. 2104 * @param reta_size 2105 * Size of the RETA table. 2106 * 2107 * @return 2108 * 0 on success, negative error value otherwise. 2109 */ 2110 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev, 2111 struct rte_eth_rss_reta_entry64 *reta_conf, 2112 uint16_t reta_size) 2113 { 2114 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2115 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2116 int err = 0; 2117 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2118 u16 idx, shift; 2119 u16 i = 0; 2120 2121 if (reta_size != NIC_RSS_INDIR_SIZE) { 2122 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2123 return HINIC_ERROR; 2124 } 2125 2126 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2127 if (err) { 2128 PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d", 2129 err); 2130 return err; 2131 } 2132 2133 for (i = 0; i < reta_size; i++) { 2134 idx = i / RTE_RETA_GROUP_SIZE; 2135 shift = i % RTE_RETA_GROUP_SIZE; 2136 if (reta_conf[idx].mask & (1ULL << shift)) 2137 reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i]; 2138 } 2139 2140 return HINIC_OK; 2141 } 2142 2143 /** 2144 * DPDK callback to get extended device statistics. 2145 * 2146 * @param dev 2147 * Pointer to Ethernet device. 2148 * @param xstats 2149 * Pointer to rte extended stats table. 2150 * @param n 2151 * The size of the stats table. 2152 * 2153 * @return 2154 * Number of extended stats on success and stats is filled, 2155 * negative error value otherwise. 2156 */ 2157 static int hinic_dev_xstats_get(struct rte_eth_dev *dev, 2158 struct rte_eth_xstat *xstats, 2159 unsigned int n) 2160 { 2161 u16 qid = 0; 2162 u32 i; 2163 int err, count; 2164 struct hinic_nic_dev *nic_dev; 2165 struct hinic_phy_port_stats port_stats; 2166 struct hinic_vport_stats vport_stats; 2167 struct hinic_rxq *rxq = NULL; 2168 struct hinic_rxq_stats rxq_stats; 2169 struct hinic_txq *txq = NULL; 2170 struct hinic_txq_stats txq_stats; 2171 2172 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2173 count = hinic_xstats_calc_num(nic_dev); 2174 if ((int)n < count) 2175 return count; 2176 2177 count = 0; 2178 2179 /* Get stats from hinic_rxq_stats */ 2180 for (qid = 0; qid < nic_dev->num_rq; qid++) { 2181 rxq = nic_dev->rxqs[qid]; 2182 hinic_rxq_get_stats(rxq, &rxq_stats); 2183 2184 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2185 xstats[count].value = 2186 *(uint64_t *)(((char *)&rxq_stats) + 2187 hinic_rxq_stats_strings[i].offset); 2188 xstats[count].id = count; 2189 count++; 2190 } 2191 } 2192 2193 /* Get stats from hinic_txq_stats */ 2194 for (qid = 0; qid < nic_dev->num_sq; qid++) { 2195 txq = nic_dev->txqs[qid]; 2196 hinic_txq_get_stats(txq, &txq_stats); 2197 2198 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2199 xstats[count].value = 2200 *(uint64_t *)(((char *)&txq_stats) + 2201 hinic_txq_stats_strings[i].offset); 2202 xstats[count].id = count; 2203 count++; 2204 } 2205 } 2206 2207 /* Get stats from hinic_vport_stats */ 2208 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 2209 if (err) 2210 return err; 2211 2212 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2213 xstats[count].value = 2214 *(uint64_t *)(((char *)&vport_stats) + 2215 hinic_vport_stats_strings[i].offset); 2216 xstats[count].id = count; 2217 count++; 2218 } 2219 2220 if (HINIC_IS_VF(nic_dev->hwdev)) 2221 return count; 2222 2223 /* Get stats from hinic_phy_port_stats */ 2224 err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats); 2225 if (err) 2226 return err; 2227 2228 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2229 xstats[count].value = *(uint64_t *)(((char *)&port_stats) + 2230 hinic_phyport_stats_strings[i].offset); 2231 xstats[count].id = count; 2232 count++; 2233 } 2234 2235 return count; 2236 } 2237 2238 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2239 struct rte_eth_rxq_info *qinfo) 2240 { 2241 struct hinic_rxq *rxq = dev->data->rx_queues[queue_id]; 2242 2243 qinfo->mp = rxq->mb_pool; 2244 qinfo->nb_desc = rxq->q_depth; 2245 } 2246 2247 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2248 struct rte_eth_txq_info *qinfo) 2249 { 2250 struct hinic_txq *txq = dev->data->tx_queues[queue_id]; 2251 2252 qinfo->nb_desc = txq->q_depth; 2253 } 2254 2255 /** 2256 * DPDK callback to retrieve names of extended device statistics 2257 * 2258 * @param dev 2259 * Pointer to Ethernet device structure. 2260 * @param xstats_names 2261 * Buffer to insert names into. 2262 * 2263 * @return 2264 * Number of xstats names. 2265 */ 2266 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, 2267 struct rte_eth_xstat_name *xstats_names, 2268 __rte_unused unsigned int limit) 2269 { 2270 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2271 int count = 0; 2272 u16 i = 0, q_num; 2273 2274 if (xstats_names == NULL) 2275 return hinic_xstats_calc_num(nic_dev); 2276 2277 /* get pmd rxq stats */ 2278 for (q_num = 0; q_num < nic_dev->num_rq; q_num++) { 2279 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2280 snprintf(xstats_names[count].name, 2281 sizeof(xstats_names[count].name), 2282 "rxq%d_%s_pmd", 2283 q_num, hinic_rxq_stats_strings[i].name); 2284 count++; 2285 } 2286 } 2287 2288 /* get pmd txq stats */ 2289 for (q_num = 0; q_num < nic_dev->num_sq; q_num++) { 2290 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2291 snprintf(xstats_names[count].name, 2292 sizeof(xstats_names[count].name), 2293 "txq%d_%s_pmd", 2294 q_num, hinic_txq_stats_strings[i].name); 2295 count++; 2296 } 2297 } 2298 2299 /* get vport stats */ 2300 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2301 snprintf(xstats_names[count].name, 2302 sizeof(xstats_names[count].name), 2303 "%s", 2304 hinic_vport_stats_strings[i].name); 2305 count++; 2306 } 2307 2308 if (HINIC_IS_VF(nic_dev->hwdev)) 2309 return count; 2310 2311 /* get phy port stats */ 2312 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2313 snprintf(xstats_names[count].name, 2314 sizeof(xstats_names[count].name), 2315 "%s", 2316 hinic_phyport_stats_strings[i].name); 2317 count++; 2318 } 2319 2320 return count; 2321 } 2322 /** 2323 * DPDK callback to set mac address 2324 * 2325 * @param dev 2326 * Pointer to Ethernet device structure. 2327 * @param addr 2328 * Pointer to mac address 2329 * @return 2330 * 0 on success, negative error value otherwise. 2331 */ 2332 static int hinic_set_mac_addr(struct rte_eth_dev *dev, 2333 struct rte_ether_addr *addr) 2334 { 2335 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2336 u16 func_id; 2337 int err; 2338 2339 func_id = hinic_global_func_id(nic_dev->hwdev); 2340 err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes, 2341 addr->addr_bytes, 0, func_id); 2342 if (err) 2343 return err; 2344 2345 rte_ether_addr_copy(addr, &nic_dev->default_addr); 2346 2347 PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x", 2348 addr->addr_bytes[0], addr->addr_bytes[1], 2349 addr->addr_bytes[2], addr->addr_bytes[3], 2350 addr->addr_bytes[4], addr->addr_bytes[5]); 2351 2352 return 0; 2353 } 2354 2355 /** 2356 * DPDK callback to remove a MAC address. 2357 * 2358 * @param dev 2359 * Pointer to Ethernet device structure. 2360 * @param index 2361 * MAC address index, should less than 128. 2362 */ 2363 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 2364 { 2365 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2366 u16 func_id; 2367 int ret; 2368 2369 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2370 PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range", 2371 index); 2372 return; 2373 } 2374 2375 func_id = hinic_global_func_id(nic_dev->hwdev); 2376 ret = hinic_del_mac(nic_dev->hwdev, 2377 dev->data->mac_addrs[index].addr_bytes, 0, func_id); 2378 if (ret) 2379 return; 2380 2381 memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); 2382 } 2383 2384 /** 2385 * DPDK callback to add a MAC address. 2386 * 2387 * @param dev 2388 * Pointer to Ethernet device structure. 2389 * @param mac_addr 2390 * Pointer to MAC address 2391 * @param index 2392 * MAC address index, should less than 128. 2393 * @param vmdq 2394 * VMDq pool index(not used). 2395 * 2396 * @return 2397 * 0 on success, negative error value otherwise. 2398 */ 2399 static int hinic_mac_addr_add(struct rte_eth_dev *dev, 2400 struct rte_ether_addr *mac_addr, uint32_t index, 2401 __rte_unused uint32_t vmdq) 2402 { 2403 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2404 unsigned int i; 2405 u16 func_id; 2406 int ret; 2407 2408 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2409 PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index); 2410 return -EINVAL; 2411 } 2412 2413 /* First, make sure this address isn't already configured. */ 2414 for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) { 2415 /* Skip this index, it's going to be reconfigured. */ 2416 if (i == index) 2417 continue; 2418 2419 if (memcmp(&dev->data->mac_addrs[i], 2420 mac_addr, sizeof(*mac_addr))) 2421 continue; 2422 2423 PMD_DRV_LOG(INFO, "MAC address already configured"); 2424 return -EADDRINUSE; 2425 } 2426 2427 func_id = hinic_global_func_id(nic_dev->hwdev); 2428 ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id); 2429 if (ret) 2430 return ret; 2431 2432 dev->data->mac_addrs[index] = *mac_addr; 2433 return 0; 2434 } 2435 2436 /** 2437 * DPDK callback to set multicast mac address 2438 * 2439 * @param dev 2440 * Pointer to Ethernet device structure. 2441 * @param mc_addr_set 2442 * Pointer to multicast mac address 2443 * @param nb_mc_addr 2444 * mc addr count 2445 * @return 2446 * 0 on success, negative error value otherwise. 2447 */ 2448 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, 2449 struct rte_ether_addr *mc_addr_set, 2450 uint32_t nb_mc_addr) 2451 { 2452 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2453 u16 func_id; 2454 int ret; 2455 u32 i; 2456 2457 func_id = hinic_global_func_id(nic_dev->hwdev); 2458 2459 /* delete old multi_cast addrs firstly */ 2460 hinic_delete_mc_addr_list(nic_dev); 2461 2462 if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS) 2463 goto allmulti; 2464 2465 for (i = 0; i < nb_mc_addr; i++) { 2466 ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes, 2467 0, func_id); 2468 /* if add mc addr failed, set all multi_cast */ 2469 if (ret) { 2470 hinic_delete_mc_addr_list(nic_dev); 2471 goto allmulti; 2472 } 2473 2474 rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]); 2475 } 2476 2477 return 0; 2478 2479 allmulti: 2480 hinic_dev_allmulticast_enable(dev); 2481 2482 return 0; 2483 } 2484 2485 /** 2486 * DPDK callback to manage filter operations 2487 * 2488 * @param dev 2489 * Pointer to Ethernet device structure. 2490 * @param filter_type 2491 * Filter type. 2492 * @param filter_op 2493 * Operation to perform. 2494 * @param arg 2495 * Pointer to operation-specific structure. 2496 * 2497 * @return 2498 * 0 on success, negative errno value on failure. 2499 */ 2500 static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev, 2501 enum rte_filter_type filter_type, 2502 enum rte_filter_op filter_op, 2503 void *arg) 2504 { 2505 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2506 int func_id = hinic_global_func_id(nic_dev->hwdev); 2507 2508 switch (filter_type) { 2509 case RTE_ETH_FILTER_GENERIC: 2510 if (filter_op != RTE_ETH_FILTER_GET) 2511 return -EINVAL; 2512 *(const void **)arg = &hinic_flow_ops; 2513 break; 2514 default: 2515 PMD_DRV_LOG(INFO, "Filter type (%d) not supported", 2516 filter_type); 2517 return -EINVAL; 2518 } 2519 2520 PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x," 2521 "filter_op: 0x%x.", func_id, filter_type, filter_op); 2522 return 0; 2523 } 2524 2525 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) 2526 { 2527 struct nic_pause_config pause_config = {0}; 2528 int err; 2529 2530 pause_config.auto_neg = 0; 2531 pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2532 pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2533 2534 err = hinic_set_pause_config(nic_dev->hwdev, pause_config); 2535 if (err) 2536 return err; 2537 2538 nic_dev->pause_set = true; 2539 nic_dev->nic_pause.auto_neg = pause_config.auto_neg; 2540 nic_dev->nic_pause.rx_pause = pause_config.rx_pause; 2541 nic_dev->nic_pause.tx_pause = pause_config.tx_pause; 2542 2543 return 0; 2544 } 2545 2546 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev) 2547 { 2548 u8 up_tc[HINIC_DCB_UP_MAX] = {0}; 2549 u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; 2550 u8 up_bw[HINIC_DCB_UP_MAX] = {0}; 2551 u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; 2552 u8 up_strict[HINIC_DCB_UP_MAX] = {0}; 2553 int i = 0; 2554 2555 pg_bw[0] = 100; 2556 for (i = 0; i < HINIC_DCB_UP_MAX; i++) 2557 up_bw[i] = 100; 2558 2559 return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, 2560 up_pgid, up_bw, up_strict); 2561 } 2562 2563 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev) 2564 { 2565 u8 cos_id = 0; 2566 int err; 2567 2568 if (!HINIC_IS_VF(nic_dev->hwdev)) { 2569 nic_dev->default_cos = 2570 (hinic_global_func_id(nic_dev->hwdev) + 2571 DEFAULT_BASE_COS) % NR_MAX_COS; 2572 } else { 2573 err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id); 2574 if (err) { 2575 PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d", 2576 err); 2577 return HINIC_ERROR; 2578 } 2579 2580 nic_dev->default_cos = cos_id; 2581 } 2582 2583 return 0; 2584 } 2585 2586 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) 2587 { 2588 int err; 2589 2590 err = hinic_init_default_cos(nic_dev); 2591 if (err) 2592 return err; 2593 2594 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2595 return 0; 2596 2597 /* Restore DCB configure to default status */ 2598 err = hinic_set_default_dcb_feature(nic_dev); 2599 if (err) 2600 return err; 2601 2602 /* Set pause enable, and up will disable pfc. */ 2603 err = hinic_set_default_pause_feature(nic_dev); 2604 if (err) 2605 return err; 2606 2607 err = hinic_reset_port_link_cfg(nic_dev->hwdev); 2608 if (err) 2609 return err; 2610 2611 err = hinic_set_link_status_follow(nic_dev->hwdev, 2612 HINIC_LINK_FOLLOW_PORT); 2613 if (err == HINIC_MGMT_CMD_UNSUPPORTED) 2614 PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status"); 2615 else if (err) 2616 return err; 2617 2618 return hinic_set_anti_attack(nic_dev->hwdev, true); 2619 } 2620 2621 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev) 2622 { 2623 struct hinic_board_info info = { 0 }; 2624 int rc; 2625 2626 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2627 return 0; 2628 2629 rc = hinic_get_board_info(nic_dev->hwdev, &info); 2630 if (rc) 2631 return rc; 2632 2633 return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK : 2634 HINIC_ERROR); 2635 } 2636 2637 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev) 2638 { 2639 nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name); 2640 if (nic_dev->cpy_mpool == NULL) { 2641 nic_dev->cpy_mpool = 2642 rte_pktmbuf_pool_create(nic_dev->proc_dev_name, 2643 HINIC_COPY_MEMPOOL_DEPTH, 2644 0, 0, 2645 HINIC_COPY_MBUF_SIZE, 2646 rte_socket_id()); 2647 if (!nic_dev->cpy_mpool) { 2648 PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s", 2649 rte_errno, nic_dev->proc_dev_name); 2650 return -ENOMEM; 2651 } 2652 } 2653 2654 return 0; 2655 } 2656 2657 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev) 2658 { 2659 if (nic_dev->cpy_mpool != NULL) 2660 rte_mempool_free(nic_dev->cpy_mpool); 2661 } 2662 2663 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2664 { 2665 u32 txq_size; 2666 u32 rxq_size; 2667 2668 /* allocate software txq array */ 2669 txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs); 2670 nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL); 2671 if (!nic_dev->txqs) { 2672 PMD_DRV_LOG(ERR, "Allocate txqs failed"); 2673 return -ENOMEM; 2674 } 2675 2676 /* allocate software rxq array */ 2677 rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs); 2678 nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL); 2679 if (!nic_dev->rxqs) { 2680 /* free txqs */ 2681 kfree(nic_dev->txqs); 2682 nic_dev->txqs = NULL; 2683 2684 PMD_DRV_LOG(ERR, "Allocate rxqs failed"); 2685 return -ENOMEM; 2686 } 2687 2688 return HINIC_OK; 2689 } 2690 2691 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2692 { 2693 kfree(nic_dev->txqs); 2694 nic_dev->txqs = NULL; 2695 2696 kfree(nic_dev->rxqs); 2697 nic_dev->rxqs = NULL; 2698 } 2699 2700 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) 2701 { 2702 struct hinic_nic_dev *nic_dev = 2703 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2704 int rc; 2705 2706 nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev), 2707 RTE_CACHE_LINE_SIZE); 2708 if (!nic_dev->hwdev) { 2709 PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s", 2710 eth_dev->data->name); 2711 return -ENOMEM; 2712 } 2713 nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev); 2714 2715 /* init osdep*/ 2716 rc = hinic_osdep_init(nic_dev->hwdev); 2717 if (rc) { 2718 PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s", 2719 eth_dev->data->name); 2720 goto init_osdep_fail; 2721 } 2722 2723 /* init_hwif */ 2724 rc = hinic_hwif_res_init(nic_dev->hwdev); 2725 if (rc) { 2726 PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s", 2727 eth_dev->data->name); 2728 goto init_hwif_fail; 2729 } 2730 2731 /* init_cfg_mgmt */ 2732 rc = init_cfg_mgmt(nic_dev->hwdev); 2733 if (rc) { 2734 PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s", 2735 eth_dev->data->name); 2736 goto init_cfgmgnt_fail; 2737 } 2738 2739 /* init_aeqs */ 2740 rc = hinic_comm_aeqs_init(nic_dev->hwdev); 2741 if (rc) { 2742 PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s", 2743 eth_dev->data->name); 2744 goto init_aeqs_fail; 2745 } 2746 2747 /* init_pf_to_mgnt */ 2748 rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev); 2749 if (rc) { 2750 PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s", 2751 eth_dev->data->name); 2752 goto init_pf_to_mgmt_fail; 2753 } 2754 2755 /* init mailbox */ 2756 rc = hinic_comm_func_to_func_init(nic_dev->hwdev); 2757 if (rc) { 2758 PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s", 2759 eth_dev->data->name); 2760 goto init_func_to_func_fail; 2761 } 2762 2763 rc = hinic_card_workmode_check(nic_dev); 2764 if (rc) { 2765 PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s", 2766 eth_dev->data->name); 2767 goto workmode_check_fail; 2768 } 2769 2770 /* do l2nic reset to make chip clear */ 2771 rc = hinic_l2nic_reset(nic_dev->hwdev); 2772 if (rc) { 2773 PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s", 2774 eth_dev->data->name); 2775 goto l2nic_reset_fail; 2776 } 2777 2778 /* init dma and aeq msix attribute table */ 2779 (void)hinic_init_attr_table(nic_dev->hwdev); 2780 2781 /* init_cmdqs */ 2782 rc = hinic_comm_cmdqs_init(nic_dev->hwdev); 2783 if (rc) { 2784 PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s", 2785 eth_dev->data->name); 2786 goto init_cmdq_fail; 2787 } 2788 2789 /* set hardware state active */ 2790 rc = hinic_activate_hwdev_state(nic_dev->hwdev); 2791 if (rc) { 2792 PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s", 2793 eth_dev->data->name); 2794 goto init_resources_state_fail; 2795 } 2796 2797 /* init_capability */ 2798 rc = hinic_init_capability(nic_dev->hwdev); 2799 if (rc) { 2800 PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s", 2801 eth_dev->data->name); 2802 goto init_cap_fail; 2803 } 2804 2805 /* get nic capability */ 2806 if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) 2807 goto nic_check_fail; 2808 2809 /* init root cla and function table */ 2810 rc = hinic_init_nicio(nic_dev->hwdev); 2811 if (rc) { 2812 PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s", 2813 eth_dev->data->name); 2814 goto init_nicio_fail; 2815 } 2816 2817 /* init_software_txrxq */ 2818 rc = hinic_init_sw_rxtxqs(nic_dev); 2819 if (rc) { 2820 PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s", 2821 eth_dev->data->name); 2822 goto init_sw_rxtxqs_fail; 2823 } 2824 2825 rc = hinic_copy_mempool_init(nic_dev); 2826 if (rc) { 2827 PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s", 2828 eth_dev->data->name); 2829 goto init_mpool_fail; 2830 } 2831 2832 /* set hardware feature to default status */ 2833 rc = hinic_set_default_hw_feature(nic_dev); 2834 if (rc) { 2835 PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s", 2836 eth_dev->data->name); 2837 goto set_default_hw_feature_fail; 2838 } 2839 2840 return 0; 2841 2842 set_default_hw_feature_fail: 2843 hinic_copy_mempool_uninit(nic_dev); 2844 2845 init_mpool_fail: 2846 hinic_deinit_sw_rxtxqs(nic_dev); 2847 2848 init_sw_rxtxqs_fail: 2849 hinic_deinit_nicio(nic_dev->hwdev); 2850 2851 nic_check_fail: 2852 init_nicio_fail: 2853 init_cap_fail: 2854 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2855 2856 init_resources_state_fail: 2857 hinic_comm_cmdqs_free(nic_dev->hwdev); 2858 2859 init_cmdq_fail: 2860 l2nic_reset_fail: 2861 workmode_check_fail: 2862 hinic_comm_func_to_func_free(nic_dev->hwdev); 2863 2864 init_func_to_func_fail: 2865 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2866 2867 init_pf_to_mgmt_fail: 2868 hinic_comm_aeqs_free(nic_dev->hwdev); 2869 2870 init_aeqs_fail: 2871 free_cfg_mgmt(nic_dev->hwdev); 2872 2873 init_cfgmgnt_fail: 2874 hinic_hwif_res_free(nic_dev->hwdev); 2875 2876 init_hwif_fail: 2877 hinic_osdep_deinit(nic_dev->hwdev); 2878 2879 init_osdep_fail: 2880 rte_free(nic_dev->hwdev); 2881 nic_dev->hwdev = NULL; 2882 2883 return rc; 2884 } 2885 2886 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) 2887 { 2888 struct hinic_nic_dev *nic_dev = 2889 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2890 2891 (void)hinic_set_link_status_follow(nic_dev->hwdev, 2892 HINIC_LINK_FOLLOW_DEFAULT); 2893 hinic_copy_mempool_uninit(nic_dev); 2894 hinic_deinit_sw_rxtxqs(nic_dev); 2895 hinic_deinit_nicio(nic_dev->hwdev); 2896 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2897 hinic_comm_cmdqs_free(nic_dev->hwdev); 2898 hinic_comm_func_to_func_free(nic_dev->hwdev); 2899 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2900 hinic_comm_aeqs_free(nic_dev->hwdev); 2901 free_cfg_mgmt(nic_dev->hwdev); 2902 hinic_hwif_res_free(nic_dev->hwdev); 2903 hinic_osdep_deinit(nic_dev->hwdev); 2904 rte_free(nic_dev->hwdev); 2905 nic_dev->hwdev = NULL; 2906 } 2907 2908 /** 2909 * DPDK callback to close the device. 2910 * 2911 * @param dev 2912 * Pointer to Ethernet device structure. 2913 */ 2914 static void hinic_dev_close(struct rte_eth_dev *dev) 2915 { 2916 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2917 2918 if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) { 2919 PMD_DRV_LOG(WARNING, "Device %s already closed", 2920 dev->data->name); 2921 return; 2922 } 2923 2924 /* stop device first */ 2925 hinic_dev_stop(dev); 2926 2927 /* rx_cqe, rx_info */ 2928 hinic_free_all_rx_resources(dev); 2929 2930 /* tx_info */ 2931 hinic_free_all_tx_resources(dev); 2932 2933 /* free wq, pi_dma_addr */ 2934 hinic_free_all_rq(nic_dev); 2935 2936 /* free wq, db_addr */ 2937 hinic_free_all_sq(nic_dev); 2938 2939 /* deinit mac vlan tbl */ 2940 hinic_deinit_mac_addr(dev); 2941 hinic_remove_all_vlanid(dev); 2942 2943 /* disable hardware and uio interrupt */ 2944 hinic_disable_interrupt(dev); 2945 2946 /* deinit nic hardware device */ 2947 hinic_nic_dev_destroy(dev); 2948 } 2949 2950 static const struct eth_dev_ops hinic_pmd_ops = { 2951 .dev_configure = hinic_dev_configure, 2952 .dev_infos_get = hinic_dev_infos_get, 2953 .fw_version_get = hinic_fw_version_get, 2954 .rx_queue_setup = hinic_rx_queue_setup, 2955 .tx_queue_setup = hinic_tx_queue_setup, 2956 .dev_start = hinic_dev_start, 2957 .dev_set_link_up = hinic_dev_set_link_up, 2958 .dev_set_link_down = hinic_dev_set_link_down, 2959 .link_update = hinic_link_update, 2960 .rx_queue_release = hinic_rx_queue_release, 2961 .tx_queue_release = hinic_tx_queue_release, 2962 .dev_stop = hinic_dev_stop, 2963 .dev_close = hinic_dev_close, 2964 .mtu_set = hinic_dev_set_mtu, 2965 .vlan_filter_set = hinic_vlan_filter_set, 2966 .vlan_offload_set = hinic_vlan_offload_set, 2967 .allmulticast_enable = hinic_dev_allmulticast_enable, 2968 .allmulticast_disable = hinic_dev_allmulticast_disable, 2969 .promiscuous_enable = hinic_dev_promiscuous_enable, 2970 .promiscuous_disable = hinic_dev_promiscuous_disable, 2971 .flow_ctrl_get = hinic_flow_ctrl_get, 2972 .flow_ctrl_set = hinic_flow_ctrl_set, 2973 .rss_hash_update = hinic_rss_hash_update, 2974 .rss_hash_conf_get = hinic_rss_conf_get, 2975 .reta_update = hinic_rss_indirtbl_update, 2976 .reta_query = hinic_rss_indirtbl_query, 2977 .stats_get = hinic_dev_stats_get, 2978 .stats_reset = hinic_dev_stats_reset, 2979 .xstats_get = hinic_dev_xstats_get, 2980 .xstats_reset = hinic_dev_xstats_reset, 2981 .xstats_get_names = hinic_dev_xstats_get_names, 2982 .rxq_info_get = hinic_rxq_info_get, 2983 .txq_info_get = hinic_txq_info_get, 2984 .mac_addr_set = hinic_set_mac_addr, 2985 .mac_addr_remove = hinic_mac_addr_remove, 2986 .mac_addr_add = hinic_mac_addr_add, 2987 .set_mc_addr_list = hinic_set_mc_addr_list, 2988 .filter_ctrl = hinic_dev_filter_ctrl, 2989 }; 2990 2991 static const struct eth_dev_ops hinic_pmd_vf_ops = { 2992 .dev_configure = hinic_dev_configure, 2993 .dev_infos_get = hinic_dev_infos_get, 2994 .fw_version_get = hinic_fw_version_get, 2995 .rx_queue_setup = hinic_rx_queue_setup, 2996 .tx_queue_setup = hinic_tx_queue_setup, 2997 .dev_start = hinic_dev_start, 2998 .link_update = hinic_link_update, 2999 .rx_queue_release = hinic_rx_queue_release, 3000 .tx_queue_release = hinic_tx_queue_release, 3001 .dev_stop = hinic_dev_stop, 3002 .dev_close = hinic_dev_close, 3003 .mtu_set = hinic_dev_set_mtu, 3004 .vlan_filter_set = hinic_vlan_filter_set, 3005 .vlan_offload_set = hinic_vlan_offload_set, 3006 .allmulticast_enable = hinic_dev_allmulticast_enable, 3007 .allmulticast_disable = hinic_dev_allmulticast_disable, 3008 .rss_hash_update = hinic_rss_hash_update, 3009 .rss_hash_conf_get = hinic_rss_conf_get, 3010 .reta_update = hinic_rss_indirtbl_update, 3011 .reta_query = hinic_rss_indirtbl_query, 3012 .stats_get = hinic_dev_stats_get, 3013 .stats_reset = hinic_dev_stats_reset, 3014 .xstats_get = hinic_dev_xstats_get, 3015 .xstats_reset = hinic_dev_xstats_reset, 3016 .xstats_get_names = hinic_dev_xstats_get_names, 3017 .rxq_info_get = hinic_rxq_info_get, 3018 .txq_info_get = hinic_txq_info_get, 3019 .mac_addr_set = hinic_set_mac_addr, 3020 .mac_addr_remove = hinic_mac_addr_remove, 3021 .mac_addr_add = hinic_mac_addr_add, 3022 .set_mc_addr_list = hinic_set_mc_addr_list, 3023 .filter_ctrl = hinic_dev_filter_ctrl, 3024 }; 3025 3026 static int hinic_func_init(struct rte_eth_dev *eth_dev) 3027 { 3028 struct rte_pci_device *pci_dev; 3029 struct rte_ether_addr *eth_addr; 3030 struct hinic_nic_dev *nic_dev; 3031 struct hinic_filter_info *filter_info; 3032 struct hinic_tcam_info *tcam_info; 3033 u32 mac_size; 3034 int rc; 3035 3036 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3037 3038 /* EAL is SECONDARY and eth_dev is already created */ 3039 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3040 PMD_DRV_LOG(INFO, "Initialize %s in secondary process", 3041 eth_dev->data->name); 3042 3043 return 0; 3044 } 3045 3046 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 3047 memset(nic_dev, 0, sizeof(*nic_dev)); 3048 3049 snprintf(nic_dev->proc_dev_name, 3050 sizeof(nic_dev->proc_dev_name), 3051 "hinic-%.4x:%.2x:%.2x.%x", 3052 pci_dev->addr.domain, pci_dev->addr.bus, 3053 pci_dev->addr.devid, pci_dev->addr.function); 3054 3055 /* alloc mac_addrs */ 3056 mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3057 eth_addr = rte_zmalloc("hinic_mac", mac_size, 0); 3058 if (!eth_addr) { 3059 PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s", 3060 eth_dev->data->name); 3061 rc = -ENOMEM; 3062 goto eth_addr_fail; 3063 } 3064 eth_dev->data->mac_addrs = eth_addr; 3065 3066 mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3067 nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0); 3068 if (!nic_dev->mc_list) { 3069 PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s", 3070 eth_dev->data->name); 3071 rc = -ENOMEM; 3072 goto mc_addr_fail; 3073 } 3074 3075 /* 3076 * Pass the information to the rte_eth_dev_close() that it should also 3077 * release the private port resources. 3078 */ 3079 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 3080 3081 /* create hardware nic_device */ 3082 rc = hinic_nic_dev_create(eth_dev); 3083 if (rc) { 3084 PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s", 3085 eth_dev->data->name); 3086 goto create_nic_dev_fail; 3087 } 3088 3089 if (HINIC_IS_VF(nic_dev->hwdev)) 3090 eth_dev->dev_ops = &hinic_pmd_vf_ops; 3091 else 3092 eth_dev->dev_ops = &hinic_pmd_ops; 3093 3094 rc = hinic_init_mac_addr(eth_dev); 3095 if (rc) { 3096 PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s", 3097 eth_dev->data->name); 3098 goto init_mac_fail; 3099 } 3100 3101 /* register callback func to eal lib */ 3102 rc = rte_intr_callback_register(&pci_dev->intr_handle, 3103 hinic_dev_interrupt_handler, 3104 (void *)eth_dev); 3105 if (rc) { 3106 PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s", 3107 eth_dev->data->name); 3108 goto reg_intr_cb_fail; 3109 } 3110 3111 /* enable uio/vfio intr/eventfd mapping */ 3112 rc = rte_intr_enable(&pci_dev->intr_handle); 3113 if (rc) { 3114 PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s", 3115 eth_dev->data->name); 3116 goto enable_intr_fail; 3117 } 3118 hinic_set_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 3119 3120 /* initialize filter info */ 3121 filter_info = &nic_dev->filter; 3122 tcam_info = &nic_dev->tcam; 3123 memset(filter_info, 0, sizeof(struct hinic_filter_info)); 3124 memset(tcam_info, 0, sizeof(struct hinic_tcam_info)); 3125 /* initialize 5tuple filter list */ 3126 TAILQ_INIT(&filter_info->fivetuple_list); 3127 TAILQ_INIT(&tcam_info->tcam_list); 3128 TAILQ_INIT(&nic_dev->filter_ntuple_list); 3129 TAILQ_INIT(&nic_dev->filter_ethertype_list); 3130 TAILQ_INIT(&nic_dev->filter_fdir_rule_list); 3131 TAILQ_INIT(&nic_dev->hinic_flow_list); 3132 3133 hinic_set_bit(HINIC_DEV_INIT, &nic_dev->dev_status); 3134 PMD_DRV_LOG(INFO, "Initialize %s in primary successfully", 3135 eth_dev->data->name); 3136 3137 return 0; 3138 3139 enable_intr_fail: 3140 (void)rte_intr_callback_unregister(&pci_dev->intr_handle, 3141 hinic_dev_interrupt_handler, 3142 (void *)eth_dev); 3143 3144 reg_intr_cb_fail: 3145 hinic_deinit_mac_addr(eth_dev); 3146 3147 init_mac_fail: 3148 eth_dev->dev_ops = NULL; 3149 hinic_nic_dev_destroy(eth_dev); 3150 3151 create_nic_dev_fail: 3152 rte_free(nic_dev->mc_list); 3153 nic_dev->mc_list = NULL; 3154 3155 mc_addr_fail: 3156 rte_free(eth_addr); 3157 eth_dev->data->mac_addrs = NULL; 3158 3159 eth_addr_fail: 3160 PMD_DRV_LOG(ERR, "Initialize %s in primary failed", 3161 eth_dev->data->name); 3162 return rc; 3163 } 3164 3165 static int hinic_dev_init(struct rte_eth_dev *eth_dev) 3166 { 3167 struct rte_pci_device *pci_dev; 3168 3169 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3170 3171 PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process", 3172 pci_dev->addr.domain, pci_dev->addr.bus, 3173 pci_dev->addr.devid, pci_dev->addr.function, 3174 (rte_eal_process_type() == RTE_PROC_PRIMARY) ? 3175 "primary" : "secondary"); 3176 3177 /* rte_eth_dev rx_burst and tx_burst */ 3178 eth_dev->rx_pkt_burst = hinic_recv_pkts; 3179 eth_dev->tx_pkt_burst = hinic_xmit_pkts; 3180 3181 return hinic_func_init(eth_dev); 3182 } 3183 3184 static int hinic_dev_uninit(struct rte_eth_dev *dev) 3185 { 3186 struct hinic_nic_dev *nic_dev; 3187 3188 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 3189 hinic_clear_bit(HINIC_DEV_INIT, &nic_dev->dev_status); 3190 3191 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3192 return 0; 3193 3194 hinic_dev_close(dev); 3195 3196 dev->dev_ops = NULL; 3197 dev->rx_pkt_burst = NULL; 3198 dev->tx_pkt_burst = NULL; 3199 3200 rte_free(nic_dev->mc_list); 3201 3202 rte_free(dev->data->mac_addrs); 3203 dev->data->mac_addrs = NULL; 3204 3205 return HINIC_OK; 3206 } 3207 3208 static struct rte_pci_id pci_id_hinic_map[] = { 3209 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) }, 3210 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) }, 3211 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) }, 3212 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) }, 3213 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) }, 3214 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) }, 3215 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) }, 3216 {.vendor_id = 0}, 3217 }; 3218 3219 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3220 struct rte_pci_device *pci_dev) 3221 { 3222 return rte_eth_dev_pci_generic_probe(pci_dev, 3223 sizeof(struct hinic_nic_dev), hinic_dev_init); 3224 } 3225 3226 static int hinic_pci_remove(struct rte_pci_device *pci_dev) 3227 { 3228 return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit); 3229 } 3230 3231 static struct rte_pci_driver rte_hinic_pmd = { 3232 .id_table = pci_id_hinic_map, 3233 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 3234 .probe = hinic_pci_probe, 3235 .remove = hinic_pci_remove, 3236 }; 3237 3238 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd); 3239 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map); 3240 3241 RTE_INIT(hinic_init_log) 3242 { 3243 hinic_logtype = rte_log_register("pmd.net.hinic"); 3244 if (hinic_logtype >= 0) 3245 rte_log_set_level(hinic_logtype, RTE_LOG_INFO); 3246 } 3247