1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Huawei Technologies Co., Ltd 3 */ 4 5 #include <rte_pci.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_mbuf.h> 9 #include <rte_malloc.h> 10 #include <rte_memcpy.h> 11 #include <rte_mempool.h> 12 #include <rte_errno.h> 13 #include <rte_ether.h> 14 15 #include "base/hinic_compat.h" 16 #include "base/hinic_pmd_hwdev.h" 17 #include "base/hinic_pmd_hwif.h" 18 #include "base/hinic_pmd_wq.h" 19 #include "base/hinic_pmd_cfg.h" 20 #include "base/hinic_pmd_mgmt.h" 21 #include "base/hinic_pmd_cmdq.h" 22 #include "base/hinic_pmd_niccfg.h" 23 #include "base/hinic_pmd_nicio.h" 24 #include "base/hinic_pmd_mbox.h" 25 #include "hinic_pmd_ethdev.h" 26 #include "hinic_pmd_tx.h" 27 #include "hinic_pmd_rx.h" 28 29 /* Vendor ID used by Huawei devices */ 30 #define HINIC_HUAWEI_VENDOR_ID 0x19E5 31 32 /* Hinic devices */ 33 #define HINIC_DEV_ID_PRD 0x1822 34 #define HINIC_DEV_ID_VF 0x375E 35 #define HINIC_DEV_ID_VF_HV 0x379E 36 37 /* Mezz card for Blade Server */ 38 #define HINIC_DEV_ID_MEZZ_25GE 0x0210 39 #define HINIC_DEV_ID_MEZZ_100GE 0x0205 40 41 /* 2*25G and 2*100G card */ 42 #define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 43 #define HINIC_DEV_ID_1822_100GE 0x0200 44 45 #define HINIC_SERVICE_MODE_NIC 2 46 47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 48 49 #define DEFAULT_BASE_COS 4 50 #define NR_MAX_COS 8 51 52 #define HINIC_MIN_RX_BUF_SIZE 1024 53 #define HINIC_MAX_UC_MAC_ADDRS 128 54 #define HINIC_MAX_MC_MAC_ADDRS 2048 55 56 #define HINIC_DEFAULT_BURST_SIZE 32 57 #define HINIC_DEFAULT_NB_QUEUES 1 58 #define HINIC_DEFAULT_RING_SIZE 1024 59 #define HINIC_MAX_LRO_SIZE 65536 60 61 /* 62 * vlan_id is a 12 bit number. 63 * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. 64 * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. 65 * The higher 7 bit val specifies VFTA array index. 66 */ 67 #define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) 68 #define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5) 69 70 #define HINIC_VLAN_FILTER_EN (1U << 0) 71 72 /* lro numer limit for one packet */ 73 #define HINIC_LRO_WQE_NUM_DEFAULT 8 74 75 struct hinic_xstats_name_off { 76 char name[RTE_ETH_XSTATS_NAME_SIZE]; 77 u32 offset; 78 }; 79 80 #define HINIC_FUNC_STAT(_stat_item) { \ 81 .name = #_stat_item, \ 82 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 83 } 84 85 #define HINIC_PORT_STAT(_stat_item) { \ 86 .name = #_stat_item, \ 87 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 88 } 89 90 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = { 91 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 92 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 93 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 94 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 95 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 96 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 97 98 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 99 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 100 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 101 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 102 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 103 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 104 105 HINIC_FUNC_STAT(tx_discard_vport), 106 HINIC_FUNC_STAT(rx_discard_vport), 107 HINIC_FUNC_STAT(tx_err_vport), 108 HINIC_FUNC_STAT(rx_err_vport), 109 }; 110 111 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \ 112 sizeof(hinic_vport_stats_strings[0])) 113 114 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = { 115 HINIC_PORT_STAT(mac_rx_total_pkt_num), 116 HINIC_PORT_STAT(mac_rx_total_oct_num), 117 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 118 HINIC_PORT_STAT(mac_rx_bad_oct_num), 119 HINIC_PORT_STAT(mac_rx_good_pkt_num), 120 HINIC_PORT_STAT(mac_rx_good_oct_num), 121 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 122 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 123 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 124 HINIC_PORT_STAT(mac_tx_total_pkt_num), 125 HINIC_PORT_STAT(mac_tx_total_oct_num), 126 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 127 HINIC_PORT_STAT(mac_tx_bad_oct_num), 128 HINIC_PORT_STAT(mac_tx_good_pkt_num), 129 HINIC_PORT_STAT(mac_tx_good_oct_num), 130 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 131 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 132 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 133 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 134 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 135 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 136 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 137 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 138 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 139 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 140 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 141 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 142 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 143 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 144 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 145 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 146 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 147 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 148 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 149 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 150 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 151 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 152 HINIC_PORT_STAT(mac_rx_mac_pause_num), 153 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 154 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 155 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 156 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 157 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 158 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 159 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 160 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 161 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 162 HINIC_PORT_STAT(mac_rx_mac_control_pkt_num), 163 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 164 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 165 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 166 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 167 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 168 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 169 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 170 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 171 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 172 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 173 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 174 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 175 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 176 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 177 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 178 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 179 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 180 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 181 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 182 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 183 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 184 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 185 HINIC_PORT_STAT(mac_trans_jabber_pkt_num), 186 HINIC_PORT_STAT(mac_tx_mac_pause_num), 187 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 188 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 189 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 190 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 191 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 192 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 193 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 194 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 195 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 196 HINIC_PORT_STAT(mac_tx_mac_control_pkt_num), 197 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 198 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 199 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 200 }; 201 202 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \ 203 sizeof(hinic_phyport_stats_strings[0])) 204 205 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = { 206 {"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)}, 207 {"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)}, 208 }; 209 210 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \ 211 sizeof(hinic_rxq_stats_strings[0])) 212 213 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = { 214 {"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)}, 215 {"offload_errors", offsetof(struct hinic_txq_stats, off_errs)}, 216 {"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)}, 217 {"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)}, 218 {"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)}, 219 {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)}, 220 {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)}, 221 }; 222 223 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \ 224 sizeof(hinic_txq_stats_strings[0])) 225 226 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) 227 { 228 if (HINIC_IS_VF(nic_dev->hwdev)) { 229 return (HINIC_VPORT_XSTATS_NUM + 230 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 231 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 232 } else { 233 return (HINIC_VPORT_XSTATS_NUM + 234 HINIC_PHYPORT_XSTATS_NUM + 235 HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + 236 HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); 237 } 238 } 239 240 static const struct rte_eth_desc_lim hinic_rx_desc_lim = { 241 .nb_max = HINIC_MAX_QUEUE_DEPTH, 242 .nb_min = HINIC_MIN_QUEUE_DEPTH, 243 .nb_align = HINIC_RXD_ALIGN, 244 }; 245 246 static const struct rte_eth_desc_lim hinic_tx_desc_lim = { 247 .nb_max = HINIC_MAX_QUEUE_DEPTH, 248 .nb_min = HINIC_MIN_QUEUE_DEPTH, 249 .nb_align = HINIC_TXD_ALIGN, 250 }; 251 252 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); 253 254 /** 255 * Interrupt handler triggered by NIC for handling 256 * specific event. 257 * 258 * @param: The address of parameter (struct rte_eth_dev *) regsitered before. 259 */ 260 static void hinic_dev_interrupt_handler(void *param) 261 { 262 struct rte_eth_dev *dev = param; 263 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 264 265 if (!rte_bit_relaxed_get32(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) { 266 PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d", 267 nic_dev->proc_dev_name, dev->data->port_id); 268 return; 269 } 270 271 /* aeq0 msg handler */ 272 hinic_dev_handle_aeq_event(nic_dev->hwdev, param); 273 } 274 275 /** 276 * Ethernet device configuration. 277 * 278 * Prepare the driver for a given number of TX and RX queues, mtu size 279 * and configure RSS. 280 * 281 * @param dev 282 * Pointer to Ethernet device structure. 283 * 284 * @return 285 * 0 on success, negative error value otherwise. 286 */ 287 static int hinic_dev_configure(struct rte_eth_dev *dev) 288 { 289 struct hinic_nic_dev *nic_dev; 290 struct hinic_nic_io *nic_io; 291 int err; 292 293 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 294 nic_io = nic_dev->hwdev->nic_io; 295 296 nic_dev->num_sq = dev->data->nb_tx_queues; 297 nic_dev->num_rq = dev->data->nb_rx_queues; 298 299 nic_io->num_sqs = dev->data->nb_tx_queues; 300 nic_io->num_rqs = dev->data->nb_rx_queues; 301 302 /* queue pair is max_num(sq, rq) */ 303 nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ? 304 nic_dev->num_sq : nic_dev->num_rq; 305 nic_io->num_qps = nic_dev->num_qps; 306 307 if (nic_dev->num_qps > nic_io->max_qps) { 308 PMD_DRV_LOG(ERR, 309 "Queue number out of range, get queue_num:%d, max_queue_num:%d", 310 nic_dev->num_qps, nic_io->max_qps); 311 return -EINVAL; 312 } 313 314 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 315 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 316 317 /* mtu size is 256~9600 */ 318 if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE || 319 dev->data->dev_conf.rxmode.max_rx_pkt_len > 320 HINIC_MAX_JUMBO_FRAME_SIZE) { 321 PMD_DRV_LOG(ERR, 322 "Max rx pkt len out of range, get max_rx_pkt_len:%d, " 323 "expect between %d and %d", 324 dev->data->dev_conf.rxmode.max_rx_pkt_len, 325 HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE); 326 return -EINVAL; 327 } 328 329 nic_dev->mtu_size = 330 HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len); 331 332 /* rss template */ 333 err = hinic_config_mq_mode(dev, TRUE); 334 if (err) { 335 PMD_DRV_LOG(ERR, "Config multi-queue failed"); 336 return err; 337 } 338 339 /* init vlan offoad */ 340 err = hinic_vlan_offload_set(dev, 341 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); 342 if (err) { 343 PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed"); 344 (void)hinic_config_mq_mode(dev, FALSE); 345 return err; 346 } 347 348 /* clear fdir filter flag in function table */ 349 hinic_free_fdir_filter(nic_dev); 350 351 return HINIC_OK; 352 } 353 354 /** 355 * DPDK callback to create the receive queue. 356 * 357 * @param dev 358 * Pointer to Ethernet device structure. 359 * @param queue_idx 360 * RX queue index. 361 * @param nb_desc 362 * Number of descriptors for receive queue. 363 * @param socket_id 364 * NUMA socket on which memory must be allocated. 365 * @param rx_conf 366 * Thresholds parameters (unused_). 367 * @param mp 368 * Memory pool for buffer allocations. 369 * 370 * @return 371 * 0 on success, negative error value otherwise. 372 */ 373 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 374 uint16_t nb_desc, unsigned int socket_id, 375 __rte_unused const struct rte_eth_rxconf *rx_conf, 376 struct rte_mempool *mp) 377 { 378 int rc; 379 struct hinic_nic_dev *nic_dev; 380 struct hinic_hwdev *hwdev; 381 struct hinic_rxq *rxq; 382 u16 rq_depth, rx_free_thresh; 383 u32 buf_size; 384 385 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 386 hwdev = nic_dev->hwdev; 387 388 /* queue depth must be power of 2, otherwise will be aligned up */ 389 rq_depth = (nb_desc & (nb_desc - 1)) ? 390 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 391 392 /* 393 * Validate number of receive descriptors. 394 * It must not exceed hardware maximum and minimum. 395 */ 396 if (rq_depth > HINIC_MAX_QUEUE_DEPTH || 397 rq_depth < HINIC_MIN_QUEUE_DEPTH) { 398 PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 399 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 400 (int)nb_desc, (int)rq_depth, 401 (int)dev->data->port_id, (int)queue_idx); 402 return -EINVAL; 403 } 404 405 /* 406 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh 407 * descriptors are used or if the number of descriptors required 408 * to transmit a packet is greater than the number of free RX 409 * descriptors. 410 * The following constraints must be satisfied: 411 * rx_free_thresh must be greater than 0. 412 * rx_free_thresh must be less than the size of the ring minus 1. 413 * When set to zero use default values. 414 */ 415 rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ? 416 rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH); 417 if (rx_free_thresh >= (rq_depth - 1)) { 418 PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)", 419 (unsigned int)rx_free_thresh, 420 (int)dev->data->port_id, 421 (int)queue_idx); 422 return -EINVAL; 423 } 424 425 rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq), 426 RTE_CACHE_LINE_SIZE, socket_id); 427 if (!rxq) { 428 PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s", 429 queue_idx, dev->data->name); 430 return -ENOMEM; 431 } 432 nic_dev->rxqs[queue_idx] = rxq; 433 434 /* alloc rx sq hw wqe page */ 435 rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id); 436 if (rc) { 437 PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d", 438 queue_idx, dev->data->name, rq_depth); 439 goto ceate_rq_fail; 440 } 441 442 /* mbuf pool must be assigned before setup rx resources */ 443 rxq->mb_pool = mp; 444 445 rc = 446 hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) - 447 RTE_PKTMBUF_HEADROOM, &buf_size); 448 if (rc) { 449 PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s", 450 dev->data->name); 451 goto adjust_bufsize_fail; 452 } 453 454 /* rx queue info, rearm control */ 455 rxq->wq = &hwdev->nic_io->rq_wq[queue_idx]; 456 rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr; 457 rxq->nic_dev = nic_dev; 458 rxq->q_id = queue_idx; 459 rxq->q_depth = rq_depth; 460 rxq->buf_len = (u16)buf_size; 461 rxq->rx_free_thresh = rx_free_thresh; 462 rxq->socket_id = socket_id; 463 464 /* the last point cant do mbuf rearm in bulk */ 465 rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh; 466 467 /* device port identifier */ 468 rxq->port_id = dev->data->port_id; 469 470 /* alloc rx_cqe and prepare rq_wqe */ 471 rc = hinic_setup_rx_resources(rxq); 472 if (rc) { 473 PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s", 474 queue_idx, dev->data->name); 475 goto setup_rx_res_err; 476 } 477 478 /* record nic_dev rxq in rte_eth rx_queues */ 479 dev->data->rx_queues[queue_idx] = rxq; 480 481 return 0; 482 483 setup_rx_res_err: 484 adjust_bufsize_fail: 485 hinic_destroy_rq(hwdev, queue_idx); 486 487 ceate_rq_fail: 488 rte_free(rxq); 489 490 return rc; 491 } 492 493 static void hinic_reset_rx_queue(struct rte_eth_dev *dev) 494 { 495 struct hinic_rxq *rxq; 496 struct hinic_nic_dev *nic_dev; 497 int q_id = 0; 498 499 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 500 501 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { 502 rxq = dev->data->rx_queues[q_id]; 503 504 rxq->wq->cons_idx = 0; 505 rxq->wq->prod_idx = 0; 506 rxq->wq->delta = rxq->q_depth; 507 rxq->wq->mask = rxq->q_depth - 1; 508 509 /* alloc mbuf to rq */ 510 hinic_rx_alloc_pkts(rxq); 511 } 512 } 513 514 /** 515 * DPDK callback to configure the transmit queue. 516 * 517 * @param dev 518 * Pointer to Ethernet device structure. 519 * @param queue_idx 520 * Transmit queue index. 521 * @param nb_desc 522 * Number of descriptors for transmit queue. 523 * @param socket_id 524 * NUMA socket on which memory must be allocated. 525 * @param tx_conf 526 * Tx queue configuration parameters. 527 * 528 * @return 529 * 0 on success, negative error value otherwise. 530 */ 531 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 532 uint16_t nb_desc, unsigned int socket_id, 533 __rte_unused const struct rte_eth_txconf *tx_conf) 534 { 535 int rc; 536 struct hinic_nic_dev *nic_dev; 537 struct hinic_hwdev *hwdev; 538 struct hinic_txq *txq; 539 u16 sq_depth, tx_free_thresh; 540 541 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 542 hwdev = nic_dev->hwdev; 543 544 /* queue depth must be power of 2, otherwise will be aligned up */ 545 sq_depth = (nb_desc & (nb_desc - 1)) ? 546 ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; 547 548 /* 549 * Validate number of transmit descriptors. 550 * It must not exceed hardware maximum and minimum. 551 */ 552 if (sq_depth > HINIC_MAX_QUEUE_DEPTH || 553 sq_depth < HINIC_MIN_QUEUE_DEPTH) { 554 PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", 555 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, 556 (int)nb_desc, (int)sq_depth, 557 (int)dev->data->port_id, (int)queue_idx); 558 return -EINVAL; 559 } 560 561 /* 562 * The TX descriptor ring will be cleaned after txq->tx_free_thresh 563 * descriptors are used or if the number of descriptors required 564 * to transmit a packet is greater than the number of free TX 565 * descriptors. 566 * The following constraints must be satisfied: 567 * tx_free_thresh must be greater than 0. 568 * tx_free_thresh must be less than the size of the ring minus 1. 569 * When set to zero use default values. 570 */ 571 tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ? 572 tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH); 573 if (tx_free_thresh >= (sq_depth - 1)) { 574 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)", 575 (unsigned int)tx_free_thresh, (int)dev->data->port_id, 576 (int)queue_idx); 577 return -EINVAL; 578 } 579 580 txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq), 581 RTE_CACHE_LINE_SIZE, socket_id); 582 if (!txq) { 583 PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s", 584 queue_idx, dev->data->name); 585 return -ENOMEM; 586 } 587 nic_dev->txqs[queue_idx] = txq; 588 589 /* alloc tx sq hw wqepage */ 590 rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id); 591 if (rc) { 592 PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d", 593 queue_idx, dev->data->name, sq_depth); 594 goto create_sq_fail; 595 } 596 597 txq->q_id = queue_idx; 598 txq->q_depth = sq_depth; 599 txq->port_id = dev->data->port_id; 600 txq->tx_free_thresh = tx_free_thresh; 601 txq->nic_dev = nic_dev; 602 txq->wq = &hwdev->nic_io->sq_wq[queue_idx]; 603 txq->sq = &hwdev->nic_io->qps[queue_idx].sq; 604 txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr; 605 txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq); 606 txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) - 607 sizeof(struct hinic_sq_bufdesc); 608 txq->cos = nic_dev->default_cos; 609 txq->socket_id = socket_id; 610 611 /* alloc software txinfo */ 612 rc = hinic_setup_tx_resources(txq); 613 if (rc) { 614 PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s", 615 queue_idx, dev->data->name); 616 goto setup_tx_res_fail; 617 } 618 619 /* record nic_dev txq in rte_eth tx_queues */ 620 dev->data->tx_queues[queue_idx] = txq; 621 622 return HINIC_OK; 623 624 setup_tx_res_fail: 625 hinic_destroy_sq(hwdev, queue_idx); 626 627 create_sq_fail: 628 rte_free(txq); 629 630 return rc; 631 } 632 633 static void hinic_reset_tx_queue(struct rte_eth_dev *dev) 634 { 635 struct hinic_nic_dev *nic_dev; 636 struct hinic_txq *txq; 637 struct hinic_nic_io *nic_io; 638 struct hinic_hwdev *hwdev; 639 volatile u32 *ci_addr; 640 int q_id = 0; 641 642 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 643 hwdev = nic_dev->hwdev; 644 nic_io = hwdev->nic_io; 645 646 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { 647 txq = dev->data->tx_queues[q_id]; 648 649 txq->wq->cons_idx = 0; 650 txq->wq->prod_idx = 0; 651 txq->wq->delta = txq->q_depth; 652 txq->wq->mask = txq->q_depth - 1; 653 654 /* clear hardware ci */ 655 ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, 656 q_id); 657 *ci_addr = 0; 658 } 659 } 660 661 /** 662 * Get link speed from NIC. 663 * 664 * @param dev 665 * Pointer to Ethernet device structure. 666 * @param speed_capa 667 * Pointer to link speed structure. 668 */ 669 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) 670 { 671 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 672 u32 supported_link, advertised_link; 673 int err; 674 675 #define HINIC_LINK_MODE_SUPPORT_1G (1U << HINIC_GE_BASE_KX) 676 677 #define HINIC_LINK_MODE_SUPPORT_10G (1U << HINIC_10GE_BASE_KR) 678 679 #define HINIC_LINK_MODE_SUPPORT_25G ((1U << HINIC_25GE_BASE_KR_S) | \ 680 (1U << HINIC_25GE_BASE_CR_S) | \ 681 (1U << HINIC_25GE_BASE_KR) | \ 682 (1U << HINIC_25GE_BASE_CR)) 683 684 #define HINIC_LINK_MODE_SUPPORT_40G ((1U << HINIC_40GE_BASE_KR4) | \ 685 (1U << HINIC_40GE_BASE_CR4)) 686 687 #define HINIC_LINK_MODE_SUPPORT_100G ((1U << HINIC_100GE_BASE_KR4) | \ 688 (1U << HINIC_100GE_BASE_CR4)) 689 690 err = hinic_get_link_mode(nic_dev->hwdev, 691 &supported_link, &advertised_link); 692 if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || 693 advertised_link == HINIC_SUPPORTED_UNKNOWN) { 694 PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u", 695 nic_dev->proc_dev_name, dev->data->port_id); 696 } else { 697 *speed_capa = 0; 698 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G)) 699 *speed_capa |= ETH_LINK_SPEED_1G; 700 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G)) 701 *speed_capa |= ETH_LINK_SPEED_10G; 702 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G)) 703 *speed_capa |= ETH_LINK_SPEED_25G; 704 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G)) 705 *speed_capa |= ETH_LINK_SPEED_40G; 706 if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G)) 707 *speed_capa |= ETH_LINK_SPEED_100G; 708 } 709 } 710 711 /** 712 * DPDK callback to get information about the device. 713 * 714 * @param dev 715 * Pointer to Ethernet device structure. 716 * @param info 717 * Pointer to Info structure output buffer. 718 */ 719 static int 720 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 721 { 722 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 723 724 info->max_rx_queues = nic_dev->nic_cap.max_rqs; 725 info->max_tx_queues = nic_dev->nic_cap.max_sqs; 726 info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; 727 info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; 728 info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS; 729 info->min_mtu = HINIC_MIN_MTU_SIZE; 730 info->max_mtu = HINIC_MAX_MTU_SIZE; 731 info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE; 732 733 hinic_get_speed_capa(dev, &info->speed_capa); 734 info->rx_queue_offload_capa = 0; 735 info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 736 DEV_RX_OFFLOAD_IPV4_CKSUM | 737 DEV_RX_OFFLOAD_UDP_CKSUM | 738 DEV_RX_OFFLOAD_TCP_CKSUM | 739 DEV_RX_OFFLOAD_VLAN_FILTER | 740 DEV_RX_OFFLOAD_SCATTER | 741 DEV_RX_OFFLOAD_JUMBO_FRAME | 742 DEV_RX_OFFLOAD_TCP_LRO | 743 DEV_RX_OFFLOAD_RSS_HASH; 744 745 info->tx_queue_offload_capa = 0; 746 info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 747 DEV_TX_OFFLOAD_IPV4_CKSUM | 748 DEV_TX_OFFLOAD_UDP_CKSUM | 749 DEV_TX_OFFLOAD_TCP_CKSUM | 750 DEV_TX_OFFLOAD_SCTP_CKSUM | 751 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 752 DEV_TX_OFFLOAD_TCP_TSO | 753 DEV_TX_OFFLOAD_MULTI_SEGS; 754 755 info->hash_key_size = HINIC_RSS_KEY_SIZE; 756 info->reta_size = HINIC_RSS_INDIR_SIZE; 757 info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL; 758 info->rx_desc_lim = hinic_rx_desc_lim; 759 info->tx_desc_lim = hinic_tx_desc_lim; 760 761 /* Driver-preferred Rx/Tx parameters */ 762 info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 763 info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; 764 info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 765 info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; 766 info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 767 info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE; 768 769 return 0; 770 } 771 772 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 773 size_t fw_size) 774 { 775 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 776 char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 777 int err; 778 779 err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver); 780 if (err) { 781 PMD_DRV_LOG(ERR, "Failed to get fw version"); 782 return -EINVAL; 783 } 784 785 if (fw_size < strlen(fw_ver) + 1) 786 return (strlen(fw_ver) + 1); 787 788 snprintf(fw_version, fw_size, "%s", fw_ver); 789 790 return 0; 791 } 792 793 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl) 794 { 795 int err; 796 797 err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl); 798 if (err) { 799 PMD_DRV_LOG(ERR, "Failed to set rx mode"); 800 return -EINVAL; 801 } 802 nic_dev->rx_mode_status = rx_mode_ctrl; 803 804 return 0; 805 } 806 807 static int hinic_rxtx_configure(struct rte_eth_dev *dev) 808 { 809 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 810 int err; 811 812 /* rx configure, if rss enable, need to init default configuration */ 813 err = hinic_rx_configure(dev); 814 if (err) { 815 PMD_DRV_LOG(ERR, "Configure rss failed"); 816 return err; 817 } 818 819 /* rx mode init */ 820 err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE); 821 if (err) { 822 PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed", 823 HINIC_DEFAULT_RX_MODE); 824 goto set_rx_mode_fail; 825 } 826 827 return HINIC_OK; 828 829 set_rx_mode_fail: 830 hinic_rx_remove_configure(dev); 831 832 return err; 833 } 834 835 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev) 836 { 837 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 838 839 (void)hinic_config_rx_mode(nic_dev, 0); 840 hinic_rx_remove_configure(dev); 841 } 842 843 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev, 844 struct rte_eth_link *link) 845 { 846 int rc; 847 u8 port_link_status = 0; 848 struct nic_port_info port_link_info; 849 struct hinic_hwdev *nic_hwdev = nic_dev->hwdev; 850 uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M, 851 ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G, 852 ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G, 853 ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G}; 854 855 rc = hinic_get_link_status(nic_hwdev, &port_link_status); 856 if (rc) 857 return rc; 858 859 if (!port_link_status) { 860 link->link_status = ETH_LINK_DOWN; 861 link->link_speed = 0; 862 link->link_duplex = ETH_LINK_HALF_DUPLEX; 863 link->link_autoneg = ETH_LINK_FIXED; 864 return HINIC_OK; 865 } 866 867 memset(&port_link_info, 0, sizeof(port_link_info)); 868 rc = hinic_get_port_info(nic_hwdev, &port_link_info); 869 if (rc) 870 return rc; 871 872 link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX]; 873 link->link_duplex = port_link_info.duplex; 874 link->link_autoneg = port_link_info.autoneg_state; 875 link->link_status = port_link_status; 876 877 return HINIC_OK; 878 } 879 880 /** 881 * DPDK callback to retrieve physical link information. 882 * 883 * @param dev 884 * Pointer to Ethernet device structure. 885 * @param wait_to_complete 886 * Wait for request completion. 887 * 888 * @return 889 * 0 link status changed, -1 link status not changed 890 */ 891 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete) 892 { 893 #define CHECK_INTERVAL 10 /* 10ms */ 894 #define MAX_REPEAT_TIME 100 /* 1s (100 * 10ms) in total */ 895 int rc = HINIC_OK; 896 struct rte_eth_link link; 897 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 898 unsigned int rep_cnt = MAX_REPEAT_TIME; 899 900 memset(&link, 0, sizeof(link)); 901 do { 902 /* Get link status information from hardware */ 903 rc = hinic_priv_get_dev_link_status(nic_dev, &link); 904 if (rc != HINIC_OK) { 905 link.link_speed = ETH_SPEED_NUM_NONE; 906 link.link_duplex = ETH_LINK_FULL_DUPLEX; 907 PMD_DRV_LOG(ERR, "Get link status failed"); 908 goto out; 909 } 910 911 if (!wait_to_complete || link.link_status) 912 break; 913 914 rte_delay_ms(CHECK_INTERVAL); 915 } while (rep_cnt--); 916 917 out: 918 rc = rte_eth_linkstatus_set(dev, &link); 919 return rc; 920 } 921 922 /** 923 * DPDK callback to bring the link UP. 924 * 925 * @param dev 926 * Pointer to Ethernet device structure. 927 * 928 * @return 929 * 0 on success, negative errno value on failure. 930 */ 931 static int hinic_dev_set_link_up(struct rte_eth_dev *dev) 932 { 933 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 934 int ret; 935 936 /* link status follow phy port status, up will open pma */ 937 ret = hinic_set_port_enable(nic_dev->hwdev, true); 938 if (ret) 939 PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d", 940 nic_dev->proc_dev_name, dev->data->port_id); 941 942 return ret; 943 } 944 945 /** 946 * DPDK callback to bring the link DOWN. 947 * 948 * @param dev 949 * Pointer to Ethernet device structure. 950 * 951 * @return 952 * 0 on success, negative errno value on failure. 953 */ 954 static int hinic_dev_set_link_down(struct rte_eth_dev *dev) 955 { 956 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 957 int ret; 958 959 /* link status follow phy port status, up will close pma */ 960 ret = hinic_set_port_enable(nic_dev->hwdev, false); 961 if (ret) 962 PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d", 963 nic_dev->proc_dev_name, dev->data->port_id); 964 965 return ret; 966 } 967 968 /** 969 * DPDK callback to start the device. 970 * 971 * @param dev 972 * Pointer to Ethernet device structure. 973 * 974 * @return 975 * 0 on success, negative errno value on failure. 976 */ 977 static int hinic_dev_start(struct rte_eth_dev *dev) 978 { 979 int rc; 980 char *name; 981 struct hinic_nic_dev *nic_dev; 982 983 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 984 name = dev->data->name; 985 986 /* reset rx and tx queue */ 987 hinic_reset_rx_queue(dev); 988 hinic_reset_tx_queue(dev); 989 990 /* get func rx buf size */ 991 hinic_get_func_rx_buf_size(nic_dev); 992 993 /* init txq and rxq context */ 994 rc = hinic_init_qp_ctxts(nic_dev->hwdev); 995 if (rc) { 996 PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s", 997 name); 998 goto init_qp_fail; 999 } 1000 1001 /* rss template */ 1002 rc = hinic_config_mq_mode(dev, TRUE); 1003 if (rc) { 1004 PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s", 1005 name); 1006 goto cfg_mq_mode_fail; 1007 } 1008 1009 /* set default mtu */ 1010 rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size); 1011 if (rc) { 1012 PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s", 1013 nic_dev->mtu_size, name); 1014 goto set_mtu_fail; 1015 } 1016 1017 /* configure rss rx_mode and other rx or tx default feature */ 1018 rc = hinic_rxtx_configure(dev); 1019 if (rc) { 1020 PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s", 1021 name); 1022 goto cfg_rxtx_fail; 1023 } 1024 1025 /* reactive pf status, so that uP report asyn event */ 1026 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); 1027 1028 /* open virtual port and ready to start packet receiving */ 1029 rc = hinic_set_vport_enable(nic_dev->hwdev, true); 1030 if (rc) { 1031 PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name); 1032 goto en_vport_fail; 1033 } 1034 1035 /* open physical port and start packet receiving */ 1036 rc = hinic_set_port_enable(nic_dev->hwdev, true); 1037 if (rc) { 1038 PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s", 1039 name); 1040 goto en_port_fail; 1041 } 1042 1043 /* update eth_dev link status */ 1044 if (dev->data->dev_conf.intr_conf.lsc != 0) 1045 (void)hinic_link_update(dev, 0); 1046 1047 rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status); 1048 1049 return 0; 1050 1051 en_port_fail: 1052 (void)hinic_set_vport_enable(nic_dev->hwdev, false); 1053 1054 en_vport_fail: 1055 hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT); 1056 1057 /* Flush tx && rx chip resources in case of set vport fake fail */ 1058 (void)hinic_flush_qp_res(nic_dev->hwdev); 1059 rte_delay_ms(100); 1060 1061 hinic_remove_rxtx_configure(dev); 1062 1063 cfg_rxtx_fail: 1064 set_mtu_fail: 1065 cfg_mq_mode_fail: 1066 hinic_free_qp_ctxts(nic_dev->hwdev); 1067 1068 init_qp_fail: 1069 hinic_free_all_rx_mbuf(dev); 1070 hinic_free_all_tx_mbuf(dev); 1071 1072 return rc; 1073 } 1074 1075 /** 1076 * DPDK callback to release the receive queue. 1077 * 1078 * @param queue 1079 * Generic receive queue pointer. 1080 */ 1081 static void hinic_rx_queue_release(void *queue) 1082 { 1083 struct hinic_rxq *rxq = queue; 1084 struct hinic_nic_dev *nic_dev; 1085 1086 if (!rxq) { 1087 PMD_DRV_LOG(WARNING, "Rxq is null when release"); 1088 return; 1089 } 1090 nic_dev = rxq->nic_dev; 1091 1092 /* free rxq_pkt mbuf */ 1093 hinic_free_all_rx_mbufs(rxq); 1094 1095 /* free rxq_cqe, rxq_info */ 1096 hinic_free_rx_resources(rxq); 1097 1098 /* free root rq wq */ 1099 hinic_destroy_rq(nic_dev->hwdev, rxq->q_id); 1100 1101 nic_dev->rxqs[rxq->q_id] = NULL; 1102 1103 /* free rxq */ 1104 rte_free(rxq); 1105 } 1106 1107 /** 1108 * DPDK callback to release the transmit queue. 1109 * 1110 * @param queue 1111 * Generic transmit queue pointer. 1112 */ 1113 static void hinic_tx_queue_release(void *queue) 1114 { 1115 struct hinic_txq *txq = queue; 1116 struct hinic_nic_dev *nic_dev; 1117 1118 if (!txq) { 1119 PMD_DRV_LOG(WARNING, "Txq is null when release"); 1120 return; 1121 } 1122 nic_dev = txq->nic_dev; 1123 1124 /* free txq_pkt mbuf */ 1125 hinic_free_all_tx_mbufs(txq); 1126 1127 /* free txq_info */ 1128 hinic_free_tx_resources(txq); 1129 1130 /* free root sq wq */ 1131 hinic_destroy_sq(nic_dev->hwdev, txq->q_id); 1132 nic_dev->txqs[txq->q_id] = NULL; 1133 1134 /* free txq */ 1135 rte_free(txq); 1136 } 1137 1138 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev) 1139 { 1140 u16 q_id; 1141 1142 for (q_id = 0; q_id < nic_dev->num_rq; q_id++) 1143 hinic_destroy_rq(nic_dev->hwdev, q_id); 1144 } 1145 1146 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev) 1147 { 1148 u16 q_id; 1149 1150 for (q_id = 0; q_id < nic_dev->num_sq; q_id++) 1151 hinic_destroy_sq(nic_dev->hwdev, q_id); 1152 } 1153 1154 /** 1155 * DPDK callback to stop the device. 1156 * 1157 * @param dev 1158 * Pointer to Ethernet device structure. 1159 */ 1160 static int hinic_dev_stop(struct rte_eth_dev *dev) 1161 { 1162 int rc; 1163 char *name; 1164 uint16_t port_id; 1165 struct hinic_nic_dev *nic_dev; 1166 struct rte_eth_link link; 1167 1168 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1169 name = dev->data->name; 1170 port_id = dev->data->port_id; 1171 1172 dev->data->dev_started = 0; 1173 1174 if (!rte_bit_relaxed_test_and_clear32(HINIC_DEV_START, 1175 &nic_dev->dev_status)) { 1176 PMD_DRV_LOG(INFO, "Device %s already stopped", name); 1177 return 0; 1178 } 1179 1180 /* just stop phy port and vport */ 1181 rc = hinic_set_port_enable(nic_dev->hwdev, false); 1182 if (rc) 1183 PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d", 1184 rc, name, port_id); 1185 1186 rc = hinic_set_vport_enable(nic_dev->hwdev, false); 1187 if (rc) 1188 PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d", 1189 rc, name, port_id); 1190 1191 /* Clear recorded link status */ 1192 memset(&link, 0, sizeof(link)); 1193 (void)rte_eth_linkstatus_set(dev, &link); 1194 1195 /* flush pending io request */ 1196 rc = hinic_rx_tx_flush(nic_dev->hwdev); 1197 if (rc) 1198 PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d", 1199 rc, name, port_id); 1200 1201 /* clean rss table and rx_mode */ 1202 hinic_remove_rxtx_configure(dev); 1203 1204 /* clean root context */ 1205 hinic_free_qp_ctxts(nic_dev->hwdev); 1206 1207 hinic_destroy_fdir_filter(dev); 1208 1209 /* free mbuf */ 1210 hinic_free_all_rx_mbuf(dev); 1211 hinic_free_all_tx_mbuf(dev); 1212 1213 return 0; 1214 } 1215 1216 static void hinic_disable_interrupt(struct rte_eth_dev *dev) 1217 { 1218 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1219 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1220 int ret, retries = 0; 1221 1222 rte_bit_relaxed_clear32(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 1223 1224 /* disable msix interrupt in hardware */ 1225 hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE); 1226 1227 /* disable rte interrupt */ 1228 ret = rte_intr_disable(&pci_dev->intr_handle); 1229 if (ret) 1230 PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret); 1231 1232 do { 1233 ret = 1234 rte_intr_callback_unregister(&pci_dev->intr_handle, 1235 hinic_dev_interrupt_handler, dev); 1236 if (ret >= 0) { 1237 break; 1238 } else if (ret == -EAGAIN) { 1239 rte_delay_ms(100); 1240 retries++; 1241 } else { 1242 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", 1243 ret); 1244 break; 1245 } 1246 } while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES); 1247 1248 if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES) 1249 PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries", 1250 retries); 1251 1252 rte_bit_relaxed_clear32(HINIC_DEV_INIT, &nic_dev->dev_status); 1253 } 1254 1255 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable) 1256 { 1257 u32 rx_mode_ctrl; 1258 int err; 1259 1260 err = hinic_mutex_lock(&nic_dev->rx_mode_mutex); 1261 if (err) 1262 return err; 1263 1264 rx_mode_ctrl = nic_dev->rx_mode_status; 1265 1266 if (enable) 1267 rx_mode_ctrl |= HINIC_RX_MODE_PROMISC; 1268 else 1269 rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC); 1270 1271 err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1272 1273 (void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex); 1274 1275 return err; 1276 } 1277 1278 /** 1279 * DPDK callback to get device statistics. 1280 * 1281 * @param dev 1282 * Pointer to Ethernet device structure. 1283 * @param stats 1284 * Stats structure output buffer. 1285 * 1286 * @return 1287 * 0 on success and stats is filled, 1288 * negative error value otherwise. 1289 */ 1290 static int 1291 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1292 { 1293 int i, err, q_num; 1294 u64 rx_discards_pmd = 0; 1295 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1296 struct hinic_vport_stats vport_stats; 1297 struct hinic_rxq *rxq = NULL; 1298 struct hinic_rxq_stats rxq_stats; 1299 struct hinic_txq *txq = NULL; 1300 struct hinic_txq_stats txq_stats; 1301 1302 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 1303 if (err) { 1304 PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s", 1305 nic_dev->proc_dev_name); 1306 return err; 1307 } 1308 1309 dev->data->rx_mbuf_alloc_failed = 0; 1310 1311 /* rx queue stats */ 1312 q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1313 nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1314 for (i = 0; i < q_num; i++) { 1315 rxq = nic_dev->rxqs[i]; 1316 hinic_rxq_get_stats(rxq, &rxq_stats); 1317 stats->q_ipackets[i] = rxq_stats.packets; 1318 stats->q_ibytes[i] = rxq_stats.bytes; 1319 stats->q_errors[i] = rxq_stats.rx_discards; 1320 1321 stats->ierrors += rxq_stats.errors; 1322 rx_discards_pmd += rxq_stats.rx_discards; 1323 dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf; 1324 } 1325 1326 /* tx queue stats */ 1327 q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? 1328 nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS; 1329 for (i = 0; i < q_num; i++) { 1330 txq = nic_dev->txqs[i]; 1331 hinic_txq_get_stats(txq, &txq_stats); 1332 stats->q_opackets[i] = txq_stats.packets; 1333 stats->q_obytes[i] = txq_stats.bytes; 1334 stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs); 1335 } 1336 1337 /* vport stats */ 1338 stats->oerrors += vport_stats.tx_discard_vport; 1339 1340 stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd; 1341 1342 stats->ipackets = (vport_stats.rx_unicast_pkts_vport + 1343 vport_stats.rx_multicast_pkts_vport + 1344 vport_stats.rx_broadcast_pkts_vport - 1345 rx_discards_pmd); 1346 1347 stats->opackets = (vport_stats.tx_unicast_pkts_vport + 1348 vport_stats.tx_multicast_pkts_vport + 1349 vport_stats.tx_broadcast_pkts_vport); 1350 1351 stats->ibytes = (vport_stats.rx_unicast_bytes_vport + 1352 vport_stats.rx_multicast_bytes_vport + 1353 vport_stats.rx_broadcast_bytes_vport); 1354 1355 stats->obytes = (vport_stats.tx_unicast_bytes_vport + 1356 vport_stats.tx_multicast_bytes_vport + 1357 vport_stats.tx_broadcast_bytes_vport); 1358 return 0; 1359 } 1360 1361 /** 1362 * DPDK callback to clear device statistics. 1363 * 1364 * @param dev 1365 * Pointer to Ethernet device structure. 1366 */ 1367 static int hinic_dev_stats_reset(struct rte_eth_dev *dev) 1368 { 1369 int qid; 1370 struct hinic_rxq *rxq = NULL; 1371 struct hinic_txq *txq = NULL; 1372 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1373 int ret; 1374 1375 ret = hinic_clear_vport_stats(nic_dev->hwdev); 1376 if (ret != 0) 1377 return ret; 1378 1379 for (qid = 0; qid < nic_dev->num_rq; qid++) { 1380 rxq = nic_dev->rxqs[qid]; 1381 hinic_rxq_stats_reset(rxq); 1382 } 1383 1384 for (qid = 0; qid < nic_dev->num_sq; qid++) { 1385 txq = nic_dev->txqs[qid]; 1386 hinic_txq_stats_reset(txq); 1387 } 1388 1389 return 0; 1390 } 1391 1392 /** 1393 * DPDK callback to clear device extended statistics. 1394 * 1395 * @param dev 1396 * Pointer to Ethernet device structure. 1397 */ 1398 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev) 1399 { 1400 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1401 int ret; 1402 1403 ret = hinic_dev_stats_reset(dev); 1404 if (ret != 0) 1405 return ret; 1406 1407 if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) { 1408 ret = hinic_clear_phy_port_stats(nic_dev->hwdev); 1409 if (ret != 0) 1410 return ret; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr) 1417 { 1418 uint64_t random_value; 1419 1420 /* Set Organizationally Unique Identifier (OUI) prefix */ 1421 mac_addr->addr_bytes[0] = 0x00; 1422 mac_addr->addr_bytes[1] = 0x09; 1423 mac_addr->addr_bytes[2] = 0xC0; 1424 /* Force indication of locally assigned MAC address. */ 1425 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1426 /* Generate the last 3 bytes of the MAC address with a random number. */ 1427 random_value = rte_rand(); 1428 memcpy(&mac_addr->addr_bytes[3], &random_value, 3); 1429 } 1430 1431 /** 1432 * Init mac_vlan table in NIC. 1433 * 1434 * @param dev 1435 * Pointer to Ethernet device structure. 1436 * 1437 * @return 1438 * 0 on success and stats is filled, 1439 * negative error value otherwise. 1440 */ 1441 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev) 1442 { 1443 struct hinic_nic_dev *nic_dev = 1444 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1445 uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; 1446 u16 func_id = 0; 1447 int rc = 0; 1448 1449 rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes); 1450 if (rc) 1451 return rc; 1452 1453 rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes, 1454 ð_dev->data->mac_addrs[0]); 1455 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[0])) 1456 hinic_gen_random_mac_addr(ð_dev->data->mac_addrs[0]); 1457 1458 func_id = hinic_global_func_id(nic_dev->hwdev); 1459 rc = hinic_set_mac(nic_dev->hwdev, 1460 eth_dev->data->mac_addrs[0].addr_bytes, 1461 0, func_id); 1462 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1463 return rc; 1464 1465 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 1466 &nic_dev->default_addr); 1467 1468 return 0; 1469 } 1470 1471 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev) 1472 { 1473 u16 func_id; 1474 u32 i; 1475 1476 func_id = hinic_global_func_id(nic_dev->hwdev); 1477 1478 for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) { 1479 if (rte_is_zero_ether_addr(&nic_dev->mc_list[i])) 1480 break; 1481 1482 hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes, 1483 0, func_id); 1484 memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr)); 1485 } 1486 } 1487 1488 /** 1489 * Deinit mac_vlan table in NIC. 1490 * 1491 * @param dev 1492 * Pointer to Ethernet device structure. 1493 * 1494 * @return 1495 * 0 on success and stats is filled, 1496 * negative error value otherwise. 1497 */ 1498 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) 1499 { 1500 struct hinic_nic_dev *nic_dev = 1501 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1502 u16 func_id = 0; 1503 int rc; 1504 int i; 1505 1506 func_id = hinic_global_func_id(nic_dev->hwdev); 1507 1508 for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) { 1509 if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[i])) 1510 continue; 1511 1512 rc = hinic_del_mac(nic_dev->hwdev, 1513 eth_dev->data->mac_addrs[i].addr_bytes, 1514 0, func_id); 1515 if (rc && rc != HINIC_PF_SET_VF_ALREADY) 1516 PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", 1517 eth_dev->data->name); 1518 1519 memset(ð_dev->data->mac_addrs[i], 0, 1520 sizeof(struct rte_ether_addr)); 1521 } 1522 1523 /* delete multicast mac addrs */ 1524 hinic_delete_mc_addr_list(nic_dev); 1525 1526 rte_free(nic_dev->mc_list); 1527 1528 } 1529 1530 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1531 { 1532 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1533 uint32_t frame_size; 1534 int ret = 0; 1535 1536 PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d", 1537 dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu)); 1538 1539 if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) { 1540 PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d", 1541 mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE); 1542 return -EINVAL; 1543 } 1544 1545 ret = hinic_set_port_mtu(nic_dev->hwdev, mtu); 1546 if (ret) { 1547 PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret); 1548 return ret; 1549 } 1550 1551 /* update max frame size */ 1552 frame_size = HINIC_MTU_TO_PKTLEN(mtu); 1553 if (frame_size > HINIC_ETH_MAX_LEN) 1554 dev->data->dev_conf.rxmode.offloads |= 1555 DEV_RX_OFFLOAD_JUMBO_FRAME; 1556 else 1557 dev->data->dev_conf.rxmode.offloads &= 1558 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1559 1560 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1561 nic_dev->mtu_size = mtu; 1562 1563 return ret; 1564 } 1565 1566 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, 1567 u16 vlan_id, bool on) 1568 { 1569 u32 vid_idx, vid_bit; 1570 1571 vid_idx = HINIC_VFTA_IDX(vlan_id); 1572 vid_bit = HINIC_VFTA_BIT(vlan_id); 1573 1574 if (on) 1575 nic_dev->vfta[vid_idx] |= vid_bit; 1576 else 1577 nic_dev->vfta[vid_idx] &= ~vid_bit; 1578 } 1579 1580 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev, 1581 uint16_t vlan_id) 1582 { 1583 u32 vid_idx, vid_bit; 1584 1585 vid_idx = HINIC_VFTA_IDX(vlan_id); 1586 vid_bit = HINIC_VFTA_BIT(vlan_id); 1587 1588 return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE; 1589 } 1590 1591 /** 1592 * DPDK callback to set vlan filter. 1593 * 1594 * @param dev 1595 * Pointer to Ethernet device structure. 1596 * @param vlan_id 1597 * vlan id is used to filter vlan packets 1598 * @param enable 1599 * enable disable or enable vlan filter function 1600 */ 1601 static int hinic_vlan_filter_set(struct rte_eth_dev *dev, 1602 uint16_t vlan_id, int enable) 1603 { 1604 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1605 int err = 0; 1606 u16 func_id; 1607 1608 if (vlan_id > RTE_ETHER_MAX_VLAN_ID) 1609 return -EINVAL; 1610 1611 if (vlan_id == 0) 1612 return 0; 1613 1614 func_id = hinic_global_func_id(nic_dev->hwdev); 1615 1616 if (enable) { 1617 /* If vlanid is already set, just return */ 1618 if (hinic_find_vlan_filter(nic_dev, vlan_id)) { 1619 PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s", 1620 vlan_id, nic_dev->proc_dev_name); 1621 return 0; 1622 } 1623 1624 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1625 func_id, TRUE); 1626 } else { 1627 /* If vlanid can't be found, just return */ 1628 if (!hinic_find_vlan_filter(nic_dev, vlan_id)) { 1629 PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s", 1630 vlan_id, nic_dev->proc_dev_name); 1631 return 0; 1632 } 1633 1634 err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, 1635 func_id, FALSE); 1636 } 1637 1638 if (err) { 1639 PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d", 1640 enable ? "Add" : "Remove", func_id, vlan_id, err); 1641 return err; 1642 } 1643 1644 hinic_store_vlan_filter(nic_dev, vlan_id, enable); 1645 1646 PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s", 1647 enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name); 1648 return 0; 1649 } 1650 1651 /** 1652 * DPDK callback to enable or disable vlan offload. 1653 * 1654 * @param dev 1655 * Pointer to Ethernet device structure. 1656 * @param mask 1657 * Definitions used for VLAN setting 1658 */ 1659 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1660 { 1661 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1662 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1663 bool on; 1664 int err; 1665 1666 /* Enable or disable VLAN filter */ 1667 if (mask & ETH_VLAN_FILTER_MASK) { 1668 on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ? 1669 TRUE : FALSE; 1670 err = hinic_config_vlan_filter(nic_dev->hwdev, on); 1671 if (err == HINIC_MGMT_CMD_UNSUPPORTED) { 1672 PMD_DRV_LOG(WARNING, 1673 "Current matching version does not support vlan filter configuration, device: %s, port_id: %d", 1674 nic_dev->proc_dev_name, dev->data->port_id); 1675 } else if (err) { 1676 PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d", 1677 on ? "enable" : "disable", 1678 nic_dev->proc_dev_name, 1679 dev->data->port_id, err); 1680 return err; 1681 } 1682 1683 PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d", 1684 on ? "Enable" : "Disable", 1685 nic_dev->proc_dev_name, dev->data->port_id); 1686 } 1687 1688 /* Enable or disable VLAN stripping */ 1689 if (mask & ETH_VLAN_STRIP_MASK) { 1690 on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ? 1691 TRUE : FALSE; 1692 err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on); 1693 if (err) { 1694 PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d", 1695 on ? "enable" : "disable", 1696 nic_dev->proc_dev_name, 1697 dev->data->port_id, err); 1698 return err; 1699 } 1700 1701 PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d", 1702 on ? "Enable" : "Disable", 1703 nic_dev->proc_dev_name, dev->data->port_id); 1704 } 1705 1706 return 0; 1707 } 1708 1709 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) 1710 { 1711 struct hinic_nic_dev *nic_dev = 1712 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 1713 u16 func_id; 1714 int i; 1715 1716 func_id = hinic_global_func_id(nic_dev->hwdev); 1717 for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) { 1718 /* If can't find it, continue */ 1719 if (!hinic_find_vlan_filter(nic_dev, i)) 1720 continue; 1721 1722 (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE); 1723 hinic_store_vlan_filter(nic_dev, i, false); 1724 } 1725 } 1726 1727 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev, 1728 bool enable) 1729 { 1730 u32 rx_mode_ctrl; 1731 int err; 1732 1733 err = hinic_mutex_lock(&nic_dev->rx_mode_mutex); 1734 if (err) 1735 return err; 1736 1737 rx_mode_ctrl = nic_dev->rx_mode_status; 1738 1739 if (enable) 1740 rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL; 1741 else 1742 rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL); 1743 1744 err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl); 1745 1746 (void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex); 1747 1748 return err; 1749 } 1750 1751 /** 1752 * DPDK callback to enable allmulticast mode. 1753 * 1754 * @param dev 1755 * Pointer to Ethernet device structure. 1756 * 1757 * @return 1758 * 0 on success, 1759 * negative error value otherwise. 1760 */ 1761 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev) 1762 { 1763 int ret = HINIC_OK; 1764 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1765 1766 ret = hinic_set_dev_allmulticast(nic_dev, true); 1767 if (ret) { 1768 PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret); 1769 return ret; 1770 } 1771 1772 PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d", 1773 nic_dev->proc_dev_name, dev->data->port_id); 1774 return 0; 1775 } 1776 1777 /** 1778 * DPDK callback to disable allmulticast mode. 1779 * 1780 * @param dev 1781 * Pointer to Ethernet device structure. 1782 * 1783 * @return 1784 * 0 on success, 1785 * negative error value otherwise. 1786 */ 1787 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev) 1788 { 1789 int ret = HINIC_OK; 1790 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1791 1792 ret = hinic_set_dev_allmulticast(nic_dev, false); 1793 if (ret) { 1794 PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret); 1795 return ret; 1796 } 1797 1798 PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d", 1799 nic_dev->proc_dev_name, dev->data->port_id); 1800 return 0; 1801 } 1802 1803 /** 1804 * DPDK callback to enable promiscuous mode. 1805 * 1806 * @param dev 1807 * Pointer to Ethernet device structure. 1808 * 1809 * @return 1810 * 0 on success, 1811 * negative error value otherwise. 1812 */ 1813 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev) 1814 { 1815 int rc = HINIC_OK; 1816 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1817 1818 PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1819 nic_dev->proc_dev_name, dev->data->port_id, 1820 dev->data->promiscuous); 1821 1822 rc = hinic_set_dev_promiscuous(nic_dev, true); 1823 if (rc) 1824 PMD_DRV_LOG(ERR, "Enable promiscuous failed"); 1825 1826 return rc; 1827 } 1828 1829 /** 1830 * DPDK callback to disable promiscuous mode. 1831 * 1832 * @param dev 1833 * Pointer to Ethernet device structure. 1834 * 1835 * @return 1836 * 0 on success, 1837 * negative error value otherwise. 1838 */ 1839 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev) 1840 { 1841 int rc = HINIC_OK; 1842 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1843 1844 PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", 1845 nic_dev->proc_dev_name, dev->data->port_id, 1846 dev->data->promiscuous); 1847 1848 rc = hinic_set_dev_promiscuous(nic_dev, false); 1849 if (rc) 1850 PMD_DRV_LOG(ERR, "Disable promiscuous failed"); 1851 1852 return rc; 1853 } 1854 1855 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev, 1856 struct rte_eth_fc_conf *fc_conf) 1857 { 1858 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1859 struct nic_pause_config nic_pause; 1860 int err; 1861 1862 memset(&nic_pause, 0, sizeof(nic_pause)); 1863 1864 err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); 1865 if (err) 1866 return err; 1867 1868 if (nic_dev->pause_set || !nic_pause.auto_neg) { 1869 nic_pause.rx_pause = nic_dev->nic_pause.rx_pause; 1870 nic_pause.tx_pause = nic_dev->nic_pause.tx_pause; 1871 } 1872 1873 fc_conf->autoneg = nic_pause.auto_neg; 1874 1875 if (nic_pause.tx_pause && nic_pause.rx_pause) 1876 fc_conf->mode = RTE_FC_FULL; 1877 else if (nic_pause.tx_pause) 1878 fc_conf->mode = RTE_FC_TX_PAUSE; 1879 else if (nic_pause.rx_pause) 1880 fc_conf->mode = RTE_FC_RX_PAUSE; 1881 else 1882 fc_conf->mode = RTE_FC_NONE; 1883 1884 return 0; 1885 } 1886 1887 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev, 1888 struct rte_eth_fc_conf *fc_conf) 1889 { 1890 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1891 struct nic_pause_config nic_pause; 1892 int err; 1893 1894 nic_pause.auto_neg = fc_conf->autoneg; 1895 1896 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1897 (fc_conf->mode & RTE_FC_TX_PAUSE)) 1898 nic_pause.tx_pause = true; 1899 else 1900 nic_pause.tx_pause = false; 1901 1902 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1903 (fc_conf->mode & RTE_FC_RX_PAUSE)) 1904 nic_pause.rx_pause = true; 1905 else 1906 nic_pause.rx_pause = false; 1907 1908 err = hinic_set_pause_config(nic_dev->hwdev, nic_pause); 1909 if (err) 1910 return err; 1911 1912 nic_dev->pause_set = true; 1913 nic_dev->nic_pause.auto_neg = nic_pause.auto_neg; 1914 nic_dev->nic_pause.rx_pause = nic_pause.rx_pause; 1915 nic_dev->nic_pause.tx_pause = nic_pause.tx_pause; 1916 1917 PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n", 1918 nic_pause.tx_pause ? "on" : "off", 1919 nic_pause.rx_pause ? "on" : "off", 1920 nic_pause.auto_neg ? "on" : "off"); 1921 1922 return 0; 1923 } 1924 1925 /** 1926 * DPDK callback to update the RSS hash key and RSS hash type. 1927 * 1928 * @param dev 1929 * Pointer to Ethernet device structure. 1930 * @param rss_conf 1931 * RSS configuration data. 1932 * 1933 * @return 1934 * 0 on success, negative error value otherwise. 1935 */ 1936 static int hinic_rss_hash_update(struct rte_eth_dev *dev, 1937 struct rte_eth_rss_conf *rss_conf) 1938 { 1939 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 1940 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 1941 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 1942 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 1943 u64 rss_hf = rss_conf->rss_hf; 1944 struct nic_rss_type rss_type = {0}; 1945 int err = 0; 1946 1947 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { 1948 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 1949 return HINIC_OK; 1950 } 1951 1952 if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) { 1953 PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d", 1954 rss_conf->rss_key_len); 1955 return HINIC_ERROR; 1956 } 1957 1958 if (rss_conf->rss_key) { 1959 memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len); 1960 err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, 1961 hashkey); 1962 if (err) { 1963 PMD_DRV_LOG(ERR, "Set rss template table failed"); 1964 goto disable_rss; 1965 } 1966 } 1967 1968 rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0; 1969 rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0; 1970 rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0; 1971 rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0; 1972 rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0; 1973 rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0; 1974 rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0; 1975 rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0; 1976 1977 err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type); 1978 if (err) { 1979 PMD_DRV_LOG(ERR, "Set rss type table failed"); 1980 goto disable_rss; 1981 } 1982 1983 return 0; 1984 1985 disable_rss: 1986 memset(prio_tc, 0, sizeof(prio_tc)); 1987 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 1988 return err; 1989 } 1990 1991 /** 1992 * DPDK callback to get the RSS hash configuration. 1993 * 1994 * @param dev 1995 * Pointer to Ethernet device structure. 1996 * @param rss_conf 1997 * RSS configuration data. 1998 * 1999 * @return 2000 * 0 on success, negative error value otherwise. 2001 */ 2002 static int hinic_rss_conf_get(struct rte_eth_dev *dev, 2003 struct rte_eth_rss_conf *rss_conf) 2004 { 2005 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2006 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2007 u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; 2008 struct nic_rss_type rss_type = {0}; 2009 int err; 2010 2011 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { 2012 PMD_DRV_LOG(WARNING, "RSS is not enabled"); 2013 return HINIC_ERROR; 2014 } 2015 2016 err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey); 2017 if (err) 2018 return err; 2019 2020 if (rss_conf->rss_key && 2021 rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) { 2022 memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey)); 2023 rss_conf->rss_key_len = sizeof(hashkey); 2024 } 2025 2026 err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type); 2027 if (err) 2028 return err; 2029 2030 rss_conf->rss_hf = 0; 2031 rss_conf->rss_hf |= rss_type.ipv4 ? 2032 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0; 2033 rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0; 2034 rss_conf->rss_hf |= rss_type.ipv6 ? 2035 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0; 2036 rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0; 2037 rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0; 2038 rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0; 2039 rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0; 2040 rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0; 2041 2042 return HINIC_OK; 2043 } 2044 2045 /** 2046 * DPDK callback to update the RSS redirection table. 2047 * 2048 * @param dev 2049 * Pointer to Ethernet device structure. 2050 * @param reta_conf 2051 * Pointer to RSS reta configuration data. 2052 * @param reta_size 2053 * Size of the RETA table. 2054 * 2055 * @return 2056 * 0 on success, negative error value otherwise. 2057 */ 2058 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev, 2059 struct rte_eth_rss_reta_entry64 *reta_conf, 2060 uint16_t reta_size) 2061 { 2062 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2063 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2064 u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; 2065 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2066 int err = 0; 2067 u16 i = 0; 2068 u16 idx, shift; 2069 2070 if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) 2071 return HINIC_OK; 2072 2073 if (reta_size != NIC_RSS_INDIR_SIZE) { 2074 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2075 return HINIC_ERROR; 2076 } 2077 2078 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2079 if (err) 2080 return err; 2081 2082 /* update rss indir_tbl */ 2083 for (i = 0; i < reta_size; i++) { 2084 idx = i / RTE_RETA_GROUP_SIZE; 2085 shift = i % RTE_RETA_GROUP_SIZE; 2086 2087 if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) { 2088 PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d " 2089 "exceeds the maximum rxq num: %d", i, 2090 reta_conf[idx].reta[shift], nic_dev->num_rq); 2091 return -EINVAL; 2092 } 2093 2094 if (reta_conf[idx].mask & (1ULL << shift)) 2095 indirtbl[i] = reta_conf[idx].reta[shift]; 2096 } 2097 2098 err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2099 if (err) 2100 goto disable_rss; 2101 2102 nic_dev->rss_indir_flag = true; 2103 2104 return 0; 2105 2106 disable_rss: 2107 memset(prio_tc, 0, sizeof(prio_tc)); 2108 (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); 2109 2110 return HINIC_ERROR; 2111 } 2112 2113 /** 2114 * DPDK callback to get the RSS indirection table. 2115 * 2116 * @param dev 2117 * Pointer to Ethernet device structure. 2118 * @param reta_conf 2119 * Pointer to RSS reta configuration data. 2120 * @param reta_size 2121 * Size of the RETA table. 2122 * 2123 * @return 2124 * 0 on success, negative error value otherwise. 2125 */ 2126 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev, 2127 struct rte_eth_rss_reta_entry64 *reta_conf, 2128 uint16_t reta_size) 2129 { 2130 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2131 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 2132 int err = 0; 2133 u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; 2134 u16 idx, shift; 2135 u16 i = 0; 2136 2137 if (reta_size != NIC_RSS_INDIR_SIZE) { 2138 PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); 2139 return HINIC_ERROR; 2140 } 2141 2142 err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); 2143 if (err) { 2144 PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d", 2145 err); 2146 return err; 2147 } 2148 2149 for (i = 0; i < reta_size; i++) { 2150 idx = i / RTE_RETA_GROUP_SIZE; 2151 shift = i % RTE_RETA_GROUP_SIZE; 2152 if (reta_conf[idx].mask & (1ULL << shift)) 2153 reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i]; 2154 } 2155 2156 return HINIC_OK; 2157 } 2158 2159 /** 2160 * DPDK callback to get extended device statistics. 2161 * 2162 * @param dev 2163 * Pointer to Ethernet device. 2164 * @param xstats 2165 * Pointer to rte extended stats table. 2166 * @param n 2167 * The size of the stats table. 2168 * 2169 * @return 2170 * Number of extended stats on success and stats is filled, 2171 * negative error value otherwise. 2172 */ 2173 static int hinic_dev_xstats_get(struct rte_eth_dev *dev, 2174 struct rte_eth_xstat *xstats, 2175 unsigned int n) 2176 { 2177 u16 qid = 0; 2178 u32 i; 2179 int err, count; 2180 struct hinic_nic_dev *nic_dev; 2181 struct hinic_phy_port_stats port_stats; 2182 struct hinic_vport_stats vport_stats; 2183 struct hinic_rxq *rxq = NULL; 2184 struct hinic_rxq_stats rxq_stats; 2185 struct hinic_txq *txq = NULL; 2186 struct hinic_txq_stats txq_stats; 2187 2188 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2189 count = hinic_xstats_calc_num(nic_dev); 2190 if ((int)n < count) 2191 return count; 2192 2193 count = 0; 2194 2195 /* Get stats from hinic_rxq_stats */ 2196 for (qid = 0; qid < nic_dev->num_rq; qid++) { 2197 rxq = nic_dev->rxqs[qid]; 2198 hinic_rxq_get_stats(rxq, &rxq_stats); 2199 2200 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2201 xstats[count].value = 2202 *(uint64_t *)(((char *)&rxq_stats) + 2203 hinic_rxq_stats_strings[i].offset); 2204 xstats[count].id = count; 2205 count++; 2206 } 2207 } 2208 2209 /* Get stats from hinic_txq_stats */ 2210 for (qid = 0; qid < nic_dev->num_sq; qid++) { 2211 txq = nic_dev->txqs[qid]; 2212 hinic_txq_get_stats(txq, &txq_stats); 2213 2214 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2215 xstats[count].value = 2216 *(uint64_t *)(((char *)&txq_stats) + 2217 hinic_txq_stats_strings[i].offset); 2218 xstats[count].id = count; 2219 count++; 2220 } 2221 } 2222 2223 /* Get stats from hinic_vport_stats */ 2224 err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); 2225 if (err) 2226 return err; 2227 2228 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2229 xstats[count].value = 2230 *(uint64_t *)(((char *)&vport_stats) + 2231 hinic_vport_stats_strings[i].offset); 2232 xstats[count].id = count; 2233 count++; 2234 } 2235 2236 if (HINIC_IS_VF(nic_dev->hwdev)) 2237 return count; 2238 2239 /* Get stats from hinic_phy_port_stats */ 2240 err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats); 2241 if (err) 2242 return err; 2243 2244 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2245 xstats[count].value = *(uint64_t *)(((char *)&port_stats) + 2246 hinic_phyport_stats_strings[i].offset); 2247 xstats[count].id = count; 2248 count++; 2249 } 2250 2251 return count; 2252 } 2253 2254 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2255 struct rte_eth_rxq_info *qinfo) 2256 { 2257 struct hinic_rxq *rxq = dev->data->rx_queues[queue_id]; 2258 2259 qinfo->mp = rxq->mb_pool; 2260 qinfo->nb_desc = rxq->q_depth; 2261 } 2262 2263 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 2264 struct rte_eth_txq_info *qinfo) 2265 { 2266 struct hinic_txq *txq = dev->data->tx_queues[queue_id]; 2267 2268 qinfo->nb_desc = txq->q_depth; 2269 } 2270 2271 /** 2272 * DPDK callback to retrieve names of extended device statistics 2273 * 2274 * @param dev 2275 * Pointer to Ethernet device structure. 2276 * @param xstats_names 2277 * Buffer to insert names into. 2278 * 2279 * @return 2280 * Number of xstats names. 2281 */ 2282 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, 2283 struct rte_eth_xstat_name *xstats_names, 2284 __rte_unused unsigned int limit) 2285 { 2286 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2287 int count = 0; 2288 u16 i = 0, q_num; 2289 2290 if (xstats_names == NULL) 2291 return hinic_xstats_calc_num(nic_dev); 2292 2293 /* get pmd rxq stats */ 2294 for (q_num = 0; q_num < nic_dev->num_rq; q_num++) { 2295 for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { 2296 snprintf(xstats_names[count].name, 2297 sizeof(xstats_names[count].name), 2298 "rxq%d_%s_pmd", 2299 q_num, hinic_rxq_stats_strings[i].name); 2300 count++; 2301 } 2302 } 2303 2304 /* get pmd txq stats */ 2305 for (q_num = 0; q_num < nic_dev->num_sq; q_num++) { 2306 for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { 2307 snprintf(xstats_names[count].name, 2308 sizeof(xstats_names[count].name), 2309 "txq%d_%s_pmd", 2310 q_num, hinic_txq_stats_strings[i].name); 2311 count++; 2312 } 2313 } 2314 2315 /* get vport stats */ 2316 for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { 2317 snprintf(xstats_names[count].name, 2318 sizeof(xstats_names[count].name), 2319 "%s", hinic_vport_stats_strings[i].name); 2320 count++; 2321 } 2322 2323 if (HINIC_IS_VF(nic_dev->hwdev)) 2324 return count; 2325 2326 /* get phy port stats */ 2327 for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { 2328 snprintf(xstats_names[count].name, 2329 sizeof(xstats_names[count].name), 2330 "%s", hinic_phyport_stats_strings[i].name); 2331 count++; 2332 } 2333 2334 return count; 2335 } 2336 2337 /** 2338 * DPDK callback to set mac address 2339 * 2340 * @param dev 2341 * Pointer to Ethernet device structure. 2342 * @param addr 2343 * Pointer to mac address 2344 * @return 2345 * 0 on success, negative error value otherwise. 2346 */ 2347 static int hinic_set_mac_addr(struct rte_eth_dev *dev, 2348 struct rte_ether_addr *addr) 2349 { 2350 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2351 u16 func_id; 2352 int err; 2353 2354 func_id = hinic_global_func_id(nic_dev->hwdev); 2355 err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes, 2356 addr->addr_bytes, 0, func_id); 2357 if (err) 2358 return err; 2359 2360 rte_ether_addr_copy(addr, &nic_dev->default_addr); 2361 2362 PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x", 2363 addr->addr_bytes[0], addr->addr_bytes[1], 2364 addr->addr_bytes[2], addr->addr_bytes[3], 2365 addr->addr_bytes[4], addr->addr_bytes[5]); 2366 2367 return 0; 2368 } 2369 2370 /** 2371 * DPDK callback to remove a MAC address. 2372 * 2373 * @param dev 2374 * Pointer to Ethernet device structure. 2375 * @param index 2376 * MAC address index, should less than 128. 2377 */ 2378 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 2379 { 2380 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2381 u16 func_id; 2382 int ret; 2383 2384 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2385 PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range", 2386 index); 2387 return; 2388 } 2389 2390 func_id = hinic_global_func_id(nic_dev->hwdev); 2391 ret = hinic_del_mac(nic_dev->hwdev, 2392 dev->data->mac_addrs[index].addr_bytes, 0, func_id); 2393 if (ret) 2394 return; 2395 2396 memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); 2397 } 2398 2399 /** 2400 * DPDK callback to add a MAC address. 2401 * 2402 * @param dev 2403 * Pointer to Ethernet device structure. 2404 * @param mac_addr 2405 * Pointer to MAC address 2406 * @param index 2407 * MAC address index, should less than 128. 2408 * @param vmdq 2409 * VMDq pool index(not used). 2410 * 2411 * @return 2412 * 0 on success, negative error value otherwise. 2413 */ 2414 static int hinic_mac_addr_add(struct rte_eth_dev *dev, 2415 struct rte_ether_addr *mac_addr, uint32_t index, 2416 __rte_unused uint32_t vmdq) 2417 { 2418 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2419 unsigned int i; 2420 u16 func_id; 2421 int ret; 2422 2423 if (index >= HINIC_MAX_UC_MAC_ADDRS) { 2424 PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index); 2425 return -EINVAL; 2426 } 2427 2428 /* First, make sure this address isn't already configured. */ 2429 for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) { 2430 /* Skip this index, it's going to be reconfigured. */ 2431 if (i == index) 2432 continue; 2433 2434 if (memcmp(&dev->data->mac_addrs[i], 2435 mac_addr, sizeof(*mac_addr))) 2436 continue; 2437 2438 PMD_DRV_LOG(INFO, "MAC address already configured"); 2439 return -EADDRINUSE; 2440 } 2441 2442 func_id = hinic_global_func_id(nic_dev->hwdev); 2443 ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id); 2444 if (ret) 2445 return ret; 2446 2447 dev->data->mac_addrs[index] = *mac_addr; 2448 return 0; 2449 } 2450 2451 /** 2452 * DPDK callback to set multicast mac address 2453 * 2454 * @param dev 2455 * Pointer to Ethernet device structure. 2456 * @param mc_addr_set 2457 * Pointer to multicast mac address 2458 * @param nb_mc_addr 2459 * mc addr count 2460 * @return 2461 * 0 on success, negative error value otherwise. 2462 */ 2463 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, 2464 struct rte_ether_addr *mc_addr_set, 2465 uint32_t nb_mc_addr) 2466 { 2467 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2468 u16 func_id; 2469 int ret; 2470 u32 i; 2471 2472 func_id = hinic_global_func_id(nic_dev->hwdev); 2473 2474 /* delete old multi_cast addrs firstly */ 2475 hinic_delete_mc_addr_list(nic_dev); 2476 2477 if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS) 2478 goto allmulti; 2479 2480 for (i = 0; i < nb_mc_addr; i++) { 2481 ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes, 2482 0, func_id); 2483 /* if add mc addr failed, set all multi_cast */ 2484 if (ret) { 2485 hinic_delete_mc_addr_list(nic_dev); 2486 goto allmulti; 2487 } 2488 2489 rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]); 2490 } 2491 2492 return 0; 2493 2494 allmulti: 2495 hinic_dev_allmulticast_enable(dev); 2496 2497 return 0; 2498 } 2499 2500 /** 2501 * DPDK callback to get flow operations 2502 * 2503 * @param dev 2504 * Pointer to Ethernet device structure. 2505 * @param ops 2506 * Pointer to operation-specific structure. 2507 * 2508 * @return 2509 * 0 on success, negative error value otherwise. 2510 */ 2511 static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 2512 const struct rte_flow_ops **ops) 2513 { 2514 *ops = &hinic_flow_ops; 2515 return 0; 2516 } 2517 2518 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) 2519 { 2520 struct nic_pause_config pause_config = {0}; 2521 int err; 2522 2523 pause_config.auto_neg = 0; 2524 pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2525 pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG; 2526 2527 err = hinic_set_pause_config(nic_dev->hwdev, pause_config); 2528 if (err) 2529 return err; 2530 2531 nic_dev->pause_set = true; 2532 nic_dev->nic_pause.auto_neg = pause_config.auto_neg; 2533 nic_dev->nic_pause.rx_pause = pause_config.rx_pause; 2534 nic_dev->nic_pause.tx_pause = pause_config.tx_pause; 2535 2536 return 0; 2537 } 2538 2539 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev) 2540 { 2541 u8 up_tc[HINIC_DCB_UP_MAX] = {0}; 2542 u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; 2543 u8 up_bw[HINIC_DCB_UP_MAX] = {0}; 2544 u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; 2545 u8 up_strict[HINIC_DCB_UP_MAX] = {0}; 2546 int i = 0; 2547 2548 pg_bw[0] = 100; 2549 for (i = 0; i < HINIC_DCB_UP_MAX; i++) 2550 up_bw[i] = 100; 2551 2552 return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, 2553 up_pgid, up_bw, up_strict); 2554 } 2555 2556 static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) 2557 { 2558 u8 default_cos = 0; 2559 u8 valid_cos_bitmap; 2560 u8 i; 2561 2562 valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap; 2563 if (!valid_cos_bitmap) { 2564 PMD_DRV_LOG(ERR, "PF has none cos to support\n"); 2565 return -EFAULT; 2566 } 2567 2568 for (i = 0; i < NR_MAX_COS; i++) { 2569 if (valid_cos_bitmap & BIT(i)) 2570 default_cos = i; /* Find max cos id as default cos */ 2571 } 2572 2573 *cos_id = default_cos; 2574 2575 return 0; 2576 } 2577 2578 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev) 2579 { 2580 u8 cos_id = 0; 2581 int err; 2582 2583 if (!HINIC_IS_VF(nic_dev->hwdev)) { 2584 err = hinic_pf_get_default_cos(nic_dev->hwdev, &cos_id); 2585 if (err) { 2586 PMD_DRV_LOG(ERR, "Get PF default cos failed, err: %d", 2587 err); 2588 return HINIC_ERROR; 2589 } 2590 } else { 2591 err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id); 2592 if (err) { 2593 PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d", 2594 err); 2595 return HINIC_ERROR; 2596 } 2597 } 2598 2599 nic_dev->default_cos = cos_id; 2600 2601 PMD_DRV_LOG(INFO, "Default cos %d", nic_dev->default_cos); 2602 2603 return 0; 2604 } 2605 2606 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) 2607 { 2608 int err; 2609 2610 err = hinic_init_default_cos(nic_dev); 2611 if (err) 2612 return err; 2613 2614 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2615 return 0; 2616 2617 /* Restore DCB configure to default status */ 2618 err = hinic_set_default_dcb_feature(nic_dev); 2619 if (err) 2620 return err; 2621 2622 /* Set pause enable, and up will disable pfc. */ 2623 err = hinic_set_default_pause_feature(nic_dev); 2624 if (err) 2625 return err; 2626 2627 err = hinic_reset_port_link_cfg(nic_dev->hwdev); 2628 if (err) 2629 return err; 2630 2631 err = hinic_set_link_status_follow(nic_dev->hwdev, 2632 HINIC_LINK_FOLLOW_PORT); 2633 if (err == HINIC_MGMT_CMD_UNSUPPORTED) 2634 PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status"); 2635 else if (err) 2636 return err; 2637 2638 return hinic_set_anti_attack(nic_dev->hwdev, true); 2639 } 2640 2641 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev) 2642 { 2643 struct hinic_board_info info = { 0 }; 2644 int rc; 2645 2646 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) 2647 return 0; 2648 2649 rc = hinic_get_board_info(nic_dev->hwdev, &info); 2650 if (rc) 2651 return rc; 2652 2653 return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK : 2654 HINIC_ERROR); 2655 } 2656 2657 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev) 2658 { 2659 nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name); 2660 if (nic_dev->cpy_mpool == NULL) { 2661 nic_dev->cpy_mpool = 2662 rte_pktmbuf_pool_create(nic_dev->proc_dev_name, 2663 HINIC_COPY_MEMPOOL_DEPTH, 2664 0, 0, 2665 HINIC_COPY_MBUF_SIZE, 2666 rte_socket_id()); 2667 if (!nic_dev->cpy_mpool) { 2668 PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s", 2669 rte_errno, nic_dev->proc_dev_name); 2670 return -ENOMEM; 2671 } 2672 } 2673 2674 return 0; 2675 } 2676 2677 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev) 2678 { 2679 if (nic_dev->cpy_mpool != NULL) 2680 rte_mempool_free(nic_dev->cpy_mpool); 2681 } 2682 2683 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2684 { 2685 u32 txq_size; 2686 u32 rxq_size; 2687 2688 /* allocate software txq array */ 2689 txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs); 2690 nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL); 2691 if (!nic_dev->txqs) { 2692 PMD_DRV_LOG(ERR, "Allocate txqs failed"); 2693 return -ENOMEM; 2694 } 2695 2696 /* allocate software rxq array */ 2697 rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs); 2698 nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL); 2699 if (!nic_dev->rxqs) { 2700 /* free txqs */ 2701 kfree(nic_dev->txqs); 2702 nic_dev->txqs = NULL; 2703 2704 PMD_DRV_LOG(ERR, "Allocate rxqs failed"); 2705 return -ENOMEM; 2706 } 2707 2708 return HINIC_OK; 2709 } 2710 2711 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev) 2712 { 2713 kfree(nic_dev->txqs); 2714 nic_dev->txqs = NULL; 2715 2716 kfree(nic_dev->rxqs); 2717 nic_dev->rxqs = NULL; 2718 } 2719 2720 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) 2721 { 2722 struct hinic_nic_dev *nic_dev = 2723 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2724 int rc; 2725 2726 nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev), 2727 RTE_CACHE_LINE_SIZE); 2728 if (!nic_dev->hwdev) { 2729 PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s", 2730 eth_dev->data->name); 2731 return -ENOMEM; 2732 } 2733 nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev); 2734 2735 /* init osdep*/ 2736 rc = hinic_osdep_init(nic_dev->hwdev); 2737 if (rc) { 2738 PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s", 2739 eth_dev->data->name); 2740 goto init_osdep_fail; 2741 } 2742 2743 /* init_hwif */ 2744 rc = hinic_hwif_res_init(nic_dev->hwdev); 2745 if (rc) { 2746 PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s", 2747 eth_dev->data->name); 2748 goto init_hwif_fail; 2749 } 2750 2751 /* init_cfg_mgmt */ 2752 rc = init_cfg_mgmt(nic_dev->hwdev); 2753 if (rc) { 2754 PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s", 2755 eth_dev->data->name); 2756 goto init_cfgmgnt_fail; 2757 } 2758 2759 /* init_aeqs */ 2760 rc = hinic_comm_aeqs_init(nic_dev->hwdev); 2761 if (rc) { 2762 PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s", 2763 eth_dev->data->name); 2764 goto init_aeqs_fail; 2765 } 2766 2767 /* init_pf_to_mgnt */ 2768 rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev); 2769 if (rc) { 2770 PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s", 2771 eth_dev->data->name); 2772 goto init_pf_to_mgmt_fail; 2773 } 2774 2775 /* init mailbox */ 2776 rc = hinic_comm_func_to_func_init(nic_dev->hwdev); 2777 if (rc) { 2778 PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s", 2779 eth_dev->data->name); 2780 goto init_func_to_func_fail; 2781 } 2782 2783 rc = hinic_card_workmode_check(nic_dev); 2784 if (rc) { 2785 PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s", 2786 eth_dev->data->name); 2787 goto workmode_check_fail; 2788 } 2789 2790 /* do l2nic reset to make chip clear */ 2791 rc = hinic_l2nic_reset(nic_dev->hwdev); 2792 if (rc) { 2793 PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s", 2794 eth_dev->data->name); 2795 goto l2nic_reset_fail; 2796 } 2797 2798 /* init dma and aeq msix attribute table */ 2799 (void)hinic_init_attr_table(nic_dev->hwdev); 2800 2801 /* init_cmdqs */ 2802 rc = hinic_comm_cmdqs_init(nic_dev->hwdev); 2803 if (rc) { 2804 PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s", 2805 eth_dev->data->name); 2806 goto init_cmdq_fail; 2807 } 2808 2809 /* set hardware state active */ 2810 rc = hinic_activate_hwdev_state(nic_dev->hwdev); 2811 if (rc) { 2812 PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s", 2813 eth_dev->data->name); 2814 goto init_resources_state_fail; 2815 } 2816 2817 /* init_capability */ 2818 rc = hinic_init_capability(nic_dev->hwdev); 2819 if (rc) { 2820 PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s", 2821 eth_dev->data->name); 2822 goto init_cap_fail; 2823 } 2824 2825 /* get nic capability */ 2826 if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) { 2827 PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s", 2828 eth_dev->data->name); 2829 rc = -EINVAL; 2830 goto nic_check_fail; 2831 } 2832 2833 /* init root cla and function table */ 2834 rc = hinic_init_nicio(nic_dev->hwdev); 2835 if (rc) { 2836 PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s", 2837 eth_dev->data->name); 2838 goto init_nicio_fail; 2839 } 2840 2841 /* init_software_txrxq */ 2842 rc = hinic_init_sw_rxtxqs(nic_dev); 2843 if (rc) { 2844 PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s", 2845 eth_dev->data->name); 2846 goto init_sw_rxtxqs_fail; 2847 } 2848 2849 rc = hinic_copy_mempool_init(nic_dev); 2850 if (rc) { 2851 PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s", 2852 eth_dev->data->name); 2853 goto init_mpool_fail; 2854 } 2855 2856 /* set hardware feature to default status */ 2857 rc = hinic_set_default_hw_feature(nic_dev); 2858 if (rc) { 2859 PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s", 2860 eth_dev->data->name); 2861 goto set_default_hw_feature_fail; 2862 } 2863 2864 return 0; 2865 2866 set_default_hw_feature_fail: 2867 hinic_copy_mempool_uninit(nic_dev); 2868 2869 init_mpool_fail: 2870 hinic_deinit_sw_rxtxqs(nic_dev); 2871 2872 init_sw_rxtxqs_fail: 2873 hinic_deinit_nicio(nic_dev->hwdev); 2874 2875 nic_check_fail: 2876 init_nicio_fail: 2877 init_cap_fail: 2878 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2879 2880 init_resources_state_fail: 2881 hinic_comm_cmdqs_free(nic_dev->hwdev); 2882 2883 init_cmdq_fail: 2884 l2nic_reset_fail: 2885 workmode_check_fail: 2886 hinic_comm_func_to_func_free(nic_dev->hwdev); 2887 2888 init_func_to_func_fail: 2889 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2890 2891 init_pf_to_mgmt_fail: 2892 hinic_comm_aeqs_free(nic_dev->hwdev); 2893 2894 init_aeqs_fail: 2895 free_cfg_mgmt(nic_dev->hwdev); 2896 2897 init_cfgmgnt_fail: 2898 hinic_hwif_res_free(nic_dev->hwdev); 2899 2900 init_hwif_fail: 2901 hinic_osdep_deinit(nic_dev->hwdev); 2902 2903 init_osdep_fail: 2904 rte_free(nic_dev->hwdev); 2905 nic_dev->hwdev = NULL; 2906 2907 return rc; 2908 } 2909 2910 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) 2911 { 2912 struct hinic_nic_dev *nic_dev = 2913 HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 2914 2915 (void)hinic_set_link_status_follow(nic_dev->hwdev, 2916 HINIC_LINK_FOLLOW_DEFAULT); 2917 hinic_copy_mempool_uninit(nic_dev); 2918 hinic_deinit_sw_rxtxqs(nic_dev); 2919 hinic_deinit_nicio(nic_dev->hwdev); 2920 hinic_deactivate_hwdev_state(nic_dev->hwdev); 2921 hinic_comm_cmdqs_free(nic_dev->hwdev); 2922 hinic_comm_func_to_func_free(nic_dev->hwdev); 2923 hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); 2924 hinic_comm_aeqs_free(nic_dev->hwdev); 2925 free_cfg_mgmt(nic_dev->hwdev); 2926 hinic_hwif_res_free(nic_dev->hwdev); 2927 hinic_osdep_deinit(nic_dev->hwdev); 2928 rte_free(nic_dev->hwdev); 2929 nic_dev->hwdev = NULL; 2930 } 2931 2932 /** 2933 * DPDK callback to close the device. 2934 * 2935 * @param dev 2936 * Pointer to Ethernet device structure. 2937 */ 2938 static int hinic_dev_close(struct rte_eth_dev *dev) 2939 { 2940 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); 2941 int ret; 2942 2943 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2944 return 0; 2945 2946 if (rte_bit_relaxed_test_and_set32(HINIC_DEV_CLOSE, 2947 &nic_dev->dev_status)) { 2948 PMD_DRV_LOG(WARNING, "Device %s already closed", 2949 dev->data->name); 2950 return 0; 2951 } 2952 2953 /* stop device first */ 2954 ret = hinic_dev_stop(dev); 2955 2956 /* rx_cqe, rx_info */ 2957 hinic_free_all_rx_resources(dev); 2958 2959 /* tx_info */ 2960 hinic_free_all_tx_resources(dev); 2961 2962 /* free wq, pi_dma_addr */ 2963 hinic_free_all_rq(nic_dev); 2964 2965 /* free wq, db_addr */ 2966 hinic_free_all_sq(nic_dev); 2967 2968 /* deinit mac vlan tbl */ 2969 hinic_deinit_mac_addr(dev); 2970 hinic_remove_all_vlanid(dev); 2971 2972 /* disable hardware and uio interrupt */ 2973 hinic_disable_interrupt(dev); 2974 2975 /* destroy rx mode mutex */ 2976 hinic_mutex_destroy(&nic_dev->rx_mode_mutex); 2977 2978 /* deinit nic hardware device */ 2979 hinic_nic_dev_destroy(dev); 2980 2981 return ret; 2982 } 2983 2984 static const struct eth_dev_ops hinic_pmd_ops = { 2985 .dev_configure = hinic_dev_configure, 2986 .dev_infos_get = hinic_dev_infos_get, 2987 .fw_version_get = hinic_fw_version_get, 2988 .rx_queue_setup = hinic_rx_queue_setup, 2989 .tx_queue_setup = hinic_tx_queue_setup, 2990 .dev_start = hinic_dev_start, 2991 .dev_set_link_up = hinic_dev_set_link_up, 2992 .dev_set_link_down = hinic_dev_set_link_down, 2993 .link_update = hinic_link_update, 2994 .rx_queue_release = hinic_rx_queue_release, 2995 .tx_queue_release = hinic_tx_queue_release, 2996 .dev_stop = hinic_dev_stop, 2997 .dev_close = hinic_dev_close, 2998 .mtu_set = hinic_dev_set_mtu, 2999 .vlan_filter_set = hinic_vlan_filter_set, 3000 .vlan_offload_set = hinic_vlan_offload_set, 3001 .allmulticast_enable = hinic_dev_allmulticast_enable, 3002 .allmulticast_disable = hinic_dev_allmulticast_disable, 3003 .promiscuous_enable = hinic_dev_promiscuous_enable, 3004 .promiscuous_disable = hinic_dev_promiscuous_disable, 3005 .flow_ctrl_get = hinic_flow_ctrl_get, 3006 .flow_ctrl_set = hinic_flow_ctrl_set, 3007 .rss_hash_update = hinic_rss_hash_update, 3008 .rss_hash_conf_get = hinic_rss_conf_get, 3009 .reta_update = hinic_rss_indirtbl_update, 3010 .reta_query = hinic_rss_indirtbl_query, 3011 .stats_get = hinic_dev_stats_get, 3012 .stats_reset = hinic_dev_stats_reset, 3013 .xstats_get = hinic_dev_xstats_get, 3014 .xstats_reset = hinic_dev_xstats_reset, 3015 .xstats_get_names = hinic_dev_xstats_get_names, 3016 .rxq_info_get = hinic_rxq_info_get, 3017 .txq_info_get = hinic_txq_info_get, 3018 .mac_addr_set = hinic_set_mac_addr, 3019 .mac_addr_remove = hinic_mac_addr_remove, 3020 .mac_addr_add = hinic_mac_addr_add, 3021 .set_mc_addr_list = hinic_set_mc_addr_list, 3022 .flow_ops_get = hinic_dev_flow_ops_get, 3023 }; 3024 3025 static const struct eth_dev_ops hinic_pmd_vf_ops = { 3026 .dev_configure = hinic_dev_configure, 3027 .dev_infos_get = hinic_dev_infos_get, 3028 .fw_version_get = hinic_fw_version_get, 3029 .rx_queue_setup = hinic_rx_queue_setup, 3030 .tx_queue_setup = hinic_tx_queue_setup, 3031 .dev_start = hinic_dev_start, 3032 .link_update = hinic_link_update, 3033 .rx_queue_release = hinic_rx_queue_release, 3034 .tx_queue_release = hinic_tx_queue_release, 3035 .dev_stop = hinic_dev_stop, 3036 .dev_close = hinic_dev_close, 3037 .mtu_set = hinic_dev_set_mtu, 3038 .vlan_filter_set = hinic_vlan_filter_set, 3039 .vlan_offload_set = hinic_vlan_offload_set, 3040 .allmulticast_enable = hinic_dev_allmulticast_enable, 3041 .allmulticast_disable = hinic_dev_allmulticast_disable, 3042 .rss_hash_update = hinic_rss_hash_update, 3043 .rss_hash_conf_get = hinic_rss_conf_get, 3044 .reta_update = hinic_rss_indirtbl_update, 3045 .reta_query = hinic_rss_indirtbl_query, 3046 .stats_get = hinic_dev_stats_get, 3047 .stats_reset = hinic_dev_stats_reset, 3048 .xstats_get = hinic_dev_xstats_get, 3049 .xstats_reset = hinic_dev_xstats_reset, 3050 .xstats_get_names = hinic_dev_xstats_get_names, 3051 .rxq_info_get = hinic_rxq_info_get, 3052 .txq_info_get = hinic_txq_info_get, 3053 .mac_addr_set = hinic_set_mac_addr, 3054 .mac_addr_remove = hinic_mac_addr_remove, 3055 .mac_addr_add = hinic_mac_addr_add, 3056 .set_mc_addr_list = hinic_set_mc_addr_list, 3057 .flow_ops_get = hinic_dev_flow_ops_get, 3058 }; 3059 3060 static const struct eth_dev_ops hinic_dev_sec_ops = { 3061 .dev_infos_get = hinic_dev_infos_get, 3062 }; 3063 3064 static int hinic_func_init(struct rte_eth_dev *eth_dev) 3065 { 3066 struct rte_pci_device *pci_dev; 3067 struct rte_ether_addr *eth_addr; 3068 struct hinic_nic_dev *nic_dev; 3069 struct hinic_filter_info *filter_info; 3070 struct hinic_tcam_info *tcam_info; 3071 u32 mac_size; 3072 int rc; 3073 3074 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3075 3076 /* EAL is SECONDARY and eth_dev is already created */ 3077 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3078 eth_dev->dev_ops = &hinic_dev_sec_ops; 3079 PMD_DRV_LOG(INFO, "Initialize %s in secondary process", 3080 eth_dev->data->name); 3081 3082 return 0; 3083 } 3084 3085 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 3086 3087 nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 3088 memset(nic_dev, 0, sizeof(*nic_dev)); 3089 3090 snprintf(nic_dev->proc_dev_name, 3091 sizeof(nic_dev->proc_dev_name), 3092 "hinic-%.4x:%.2x:%.2x.%x", 3093 pci_dev->addr.domain, pci_dev->addr.bus, 3094 pci_dev->addr.devid, pci_dev->addr.function); 3095 3096 /* alloc mac_addrs */ 3097 mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3098 eth_addr = rte_zmalloc("hinic_mac", mac_size, 0); 3099 if (!eth_addr) { 3100 PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s", 3101 eth_dev->data->name); 3102 rc = -ENOMEM; 3103 goto eth_addr_fail; 3104 } 3105 eth_dev->data->mac_addrs = eth_addr; 3106 3107 mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr); 3108 nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0); 3109 if (!nic_dev->mc_list) { 3110 PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s", 3111 eth_dev->data->name); 3112 rc = -ENOMEM; 3113 goto mc_addr_fail; 3114 } 3115 3116 /* create hardware nic_device */ 3117 rc = hinic_nic_dev_create(eth_dev); 3118 if (rc) { 3119 PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s", 3120 eth_dev->data->name); 3121 goto create_nic_dev_fail; 3122 } 3123 3124 if (HINIC_IS_VF(nic_dev->hwdev)) 3125 eth_dev->dev_ops = &hinic_pmd_vf_ops; 3126 else 3127 eth_dev->dev_ops = &hinic_pmd_ops; 3128 3129 rc = hinic_init_mac_addr(eth_dev); 3130 if (rc) { 3131 PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s", 3132 eth_dev->data->name); 3133 goto init_mac_fail; 3134 } 3135 3136 /* register callback func to eal lib */ 3137 rc = rte_intr_callback_register(&pci_dev->intr_handle, 3138 hinic_dev_interrupt_handler, 3139 (void *)eth_dev); 3140 if (rc) { 3141 PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s", 3142 eth_dev->data->name); 3143 goto reg_intr_cb_fail; 3144 } 3145 3146 /* enable uio/vfio intr/eventfd mapping */ 3147 rc = rte_intr_enable(&pci_dev->intr_handle); 3148 if (rc) { 3149 PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s", 3150 eth_dev->data->name); 3151 goto enable_intr_fail; 3152 } 3153 rte_bit_relaxed_set32(HINIC_DEV_INTR_EN, &nic_dev->dev_status); 3154 3155 hinic_mutex_init(&nic_dev->rx_mode_mutex, NULL); 3156 3157 /* initialize filter info */ 3158 filter_info = &nic_dev->filter; 3159 tcam_info = &nic_dev->tcam; 3160 memset(filter_info, 0, sizeof(struct hinic_filter_info)); 3161 memset(tcam_info, 0, sizeof(struct hinic_tcam_info)); 3162 /* initialize 5tuple filter list */ 3163 TAILQ_INIT(&filter_info->fivetuple_list); 3164 TAILQ_INIT(&tcam_info->tcam_list); 3165 TAILQ_INIT(&nic_dev->filter_ntuple_list); 3166 TAILQ_INIT(&nic_dev->filter_ethertype_list); 3167 TAILQ_INIT(&nic_dev->filter_fdir_rule_list); 3168 TAILQ_INIT(&nic_dev->hinic_flow_list); 3169 3170 rte_bit_relaxed_set32(HINIC_DEV_INIT, &nic_dev->dev_status); 3171 PMD_DRV_LOG(INFO, "Initialize %s in primary successfully", 3172 eth_dev->data->name); 3173 3174 return 0; 3175 3176 enable_intr_fail: 3177 (void)rte_intr_callback_unregister(&pci_dev->intr_handle, 3178 hinic_dev_interrupt_handler, 3179 (void *)eth_dev); 3180 3181 reg_intr_cb_fail: 3182 hinic_deinit_mac_addr(eth_dev); 3183 3184 init_mac_fail: 3185 eth_dev->dev_ops = NULL; 3186 hinic_nic_dev_destroy(eth_dev); 3187 3188 create_nic_dev_fail: 3189 rte_free(nic_dev->mc_list); 3190 nic_dev->mc_list = NULL; 3191 3192 mc_addr_fail: 3193 rte_free(eth_addr); 3194 eth_dev->data->mac_addrs = NULL; 3195 3196 eth_addr_fail: 3197 PMD_DRV_LOG(ERR, "Initialize %s in primary failed", 3198 eth_dev->data->name); 3199 return rc; 3200 } 3201 3202 static int hinic_dev_init(struct rte_eth_dev *eth_dev) 3203 { 3204 struct rte_pci_device *pci_dev; 3205 3206 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3207 3208 PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process", 3209 pci_dev->addr.domain, pci_dev->addr.bus, 3210 pci_dev->addr.devid, pci_dev->addr.function, 3211 (rte_eal_process_type() == RTE_PROC_PRIMARY) ? 3212 "primary" : "secondary"); 3213 3214 /* rte_eth_dev rx_burst and tx_burst */ 3215 eth_dev->rx_pkt_burst = hinic_recv_pkts; 3216 eth_dev->tx_pkt_burst = hinic_xmit_pkts; 3217 3218 return hinic_func_init(eth_dev); 3219 } 3220 3221 static int hinic_dev_uninit(struct rte_eth_dev *dev) 3222 { 3223 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3224 return 0; 3225 3226 hinic_dev_close(dev); 3227 3228 return HINIC_OK; 3229 } 3230 3231 static struct rte_pci_id pci_id_hinic_map[] = { 3232 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) }, 3233 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) }, 3234 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) }, 3235 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) }, 3236 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) }, 3237 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) }, 3238 { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) }, 3239 {.vendor_id = 0}, 3240 }; 3241 3242 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3243 struct rte_pci_device *pci_dev) 3244 { 3245 return rte_eth_dev_pci_generic_probe(pci_dev, 3246 sizeof(struct hinic_nic_dev), hinic_dev_init); 3247 } 3248 3249 static int hinic_pci_remove(struct rte_pci_device *pci_dev) 3250 { 3251 return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit); 3252 } 3253 3254 static struct rte_pci_driver rte_hinic_pmd = { 3255 .id_table = pci_id_hinic_map, 3256 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 3257 .probe = hinic_pci_probe, 3258 .remove = hinic_pci_remove, 3259 }; 3260 3261 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd); 3262 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map); 3263 RTE_LOG_REGISTER_DEFAULT(hinic_logtype, INFO); 3264