1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Hisilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 20 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 21 22 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 23 #define HNS3_SERVICE_QUICK_INTERVAL 10 24 #define HNS3_INVALID_PVID 0xFFFF 25 26 #define HNS3_FILTER_TYPE_VF 0 27 #define HNS3_FILTER_TYPE_PORT 1 28 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 29 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 30 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 31 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 32 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 33 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 34 | HNS3_FILTER_FE_ROCE_EGRESS_B) 35 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 36 | HNS3_FILTER_FE_ROCE_INGRESS_B) 37 38 /* Reset related Registers */ 39 #define HNS3_GLOBAL_RESET_BIT 0 40 #define HNS3_CORE_RESET_BIT 1 41 #define HNS3_IMP_RESET_BIT 2 42 #define HNS3_FUN_RST_ING_B 0 43 44 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 45 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 46 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 47 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 48 49 #define HNS3_RESET_WAIT_MS 100 50 #define HNS3_RESET_WAIT_CNT 200 51 52 /* FEC mode order defined in HNS3 hardware */ 53 #define HNS3_HW_FEC_MODE_NOFEC 0 54 #define HNS3_HW_FEC_MODE_BASER 1 55 #define HNS3_HW_FEC_MODE_RS 2 56 57 enum hns3_evt_cause { 58 HNS3_VECTOR0_EVENT_RST, 59 HNS3_VECTOR0_EVENT_MBX, 60 HNS3_VECTOR0_EVENT_ERR, 61 HNS3_VECTOR0_EVENT_OTHER, 62 }; 63 64 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 65 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 66 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 67 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 68 69 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 71 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 72 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 73 74 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 75 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 76 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 77 78 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 80 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 81 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 82 83 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 84 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 85 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 86 87 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 88 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 89 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 90 }; 91 92 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 93 uint64_t *levels); 94 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 95 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 96 int on); 97 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 98 static bool hns3_update_link_status(struct hns3_hw *hw); 99 100 static int hns3_add_mc_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_remove_mc_addr(struct hns3_hw *hw, 103 struct rte_ether_addr *mac_addr); 104 static int hns3_restore_fec(struct hns3_hw *hw); 105 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 106 static int hns3_do_stop(struct hns3_adapter *hns); 107 108 void hns3_ether_format_addr(char *buf, uint16_t size, 109 const struct rte_ether_addr *ether_addr) 110 { 111 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 112 ether_addr->addr_bytes[0], 113 ether_addr->addr_bytes[4], 114 ether_addr->addr_bytes[5]); 115 } 116 117 static void 118 hns3_pf_disable_irq0(struct hns3_hw *hw) 119 { 120 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 121 } 122 123 static void 124 hns3_pf_enable_irq0(struct hns3_hw *hw) 125 { 126 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 127 } 128 129 static enum hns3_evt_cause 130 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 131 uint32_t *vec_val) 132 { 133 struct hns3_hw *hw = &hns->hw; 134 135 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 136 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 137 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 138 if (!is_delay) { 139 hw->reset.stats.imp_cnt++; 140 hns3_warn(hw, "IMP reset detected, clear reset status"); 141 } else { 142 hns3_schedule_delayed_reset(hns); 143 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 144 } 145 146 return HNS3_VECTOR0_EVENT_RST; 147 } 148 149 static enum hns3_evt_cause 150 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 151 uint32_t *vec_val) 152 { 153 struct hns3_hw *hw = &hns->hw; 154 155 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 156 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 157 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 158 if (!is_delay) { 159 hw->reset.stats.global_cnt++; 160 hns3_warn(hw, "Global reset detected, clear reset status"); 161 } else { 162 hns3_schedule_delayed_reset(hns); 163 hns3_warn(hw, 164 "Global reset detected, don't clear reset status"); 165 } 166 167 return HNS3_VECTOR0_EVENT_RST; 168 } 169 170 static enum hns3_evt_cause 171 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 172 { 173 struct hns3_hw *hw = &hns->hw; 174 uint32_t vector0_int_stats; 175 uint32_t cmdq_src_val; 176 uint32_t hw_err_src_reg; 177 uint32_t val; 178 enum hns3_evt_cause ret; 179 bool is_delay; 180 181 /* fetch the events from their corresponding regs */ 182 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 183 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 184 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 185 186 is_delay = clearval == NULL ? true : false; 187 /* 188 * Assumption: If by any chance reset and mailbox events are reported 189 * together then we will only process reset event and defer the 190 * processing of the mailbox events. Since, we would have not cleared 191 * RX CMDQ event this time we would receive again another interrupt 192 * from H/W just for the mailbox. 193 */ 194 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 195 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 196 goto out; 197 } 198 199 /* Global reset */ 200 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 201 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 202 goto out; 203 } 204 205 /* check for vector0 msix event source */ 206 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 207 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 208 val = vector0_int_stats | hw_err_src_reg; 209 ret = HNS3_VECTOR0_EVENT_ERR; 210 goto out; 211 } 212 213 /* check for vector0 mailbox(=CMDQ RX) event source */ 214 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 215 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 216 val = cmdq_src_val; 217 ret = HNS3_VECTOR0_EVENT_MBX; 218 goto out; 219 } 220 221 val = vector0_int_stats; 222 ret = HNS3_VECTOR0_EVENT_OTHER; 223 out: 224 225 if (clearval) 226 *clearval = val; 227 return ret; 228 } 229 230 static void 231 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 232 { 233 if (event_type == HNS3_VECTOR0_EVENT_RST) 234 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 235 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 236 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 237 } 238 239 static void 240 hns3_clear_all_event_cause(struct hns3_hw *hw) 241 { 242 uint32_t vector0_int_stats; 243 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 244 245 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 246 hns3_warn(hw, "Probe during IMP reset interrupt"); 247 248 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 249 hns3_warn(hw, "Probe during Global reset interrupt"); 250 251 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 252 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 253 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 254 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 255 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 256 } 257 258 static void 259 hns3_handle_mac_tnl(struct hns3_hw *hw) 260 { 261 struct hns3_cmd_desc desc; 262 uint32_t status; 263 int ret; 264 265 /* query and clear mac tnl interruptions */ 266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 267 ret = hns3_cmd_send(hw, &desc, 1); 268 if (ret) { 269 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 270 return; 271 } 272 273 status = rte_le_to_cpu_32(desc.data[0]); 274 if (status) { 275 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 276 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 277 false); 278 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 279 ret = hns3_cmd_send(hw, &desc, 1); 280 if (ret) 281 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 282 ret); 283 } 284 } 285 286 static void 287 hns3_interrupt_handler(void *param) 288 { 289 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 290 struct hns3_adapter *hns = dev->data->dev_private; 291 struct hns3_hw *hw = &hns->hw; 292 enum hns3_evt_cause event_cause; 293 uint32_t clearval = 0; 294 uint32_t vector0_int; 295 uint32_t ras_int; 296 uint32_t cmdq_int; 297 298 /* Disable interrupt */ 299 hns3_pf_disable_irq0(hw); 300 301 event_cause = hns3_check_event_cause(hns, &clearval); 302 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 303 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 304 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 305 /* vector 0 interrupt is shared with reset and mailbox source events. */ 306 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 307 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 308 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 309 vector0_int, ras_int, cmdq_int); 310 hns3_handle_msix_error(hns, &hw->reset.request); 311 hns3_handle_ras_error(hns, &hw->reset.request); 312 hns3_handle_mac_tnl(hw); 313 hns3_schedule_reset(hns); 314 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 315 hns3_warn(hw, "received reset interrupt"); 316 hns3_schedule_reset(hns); 317 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 318 hns3_dev_handle_mbx_msg(hw); 319 } else { 320 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 321 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 322 vector0_int, ras_int, cmdq_int); 323 } 324 325 hns3_clear_event_cause(hw, event_cause, clearval); 326 /* Enable interrupt if it is not cause by reset */ 327 hns3_pf_enable_irq0(hw); 328 } 329 330 static int 331 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 332 { 333 #define HNS3_VLAN_ID_OFFSET_STEP 160 334 #define HNS3_VLAN_BYTE_SIZE 8 335 struct hns3_vlan_filter_pf_cfg_cmd *req; 336 struct hns3_hw *hw = &hns->hw; 337 uint8_t vlan_offset_byte_val; 338 struct hns3_cmd_desc desc; 339 uint8_t vlan_offset_byte; 340 uint8_t vlan_offset_base; 341 int ret; 342 343 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 344 345 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 346 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 347 HNS3_VLAN_BYTE_SIZE; 348 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 349 350 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 351 req->vlan_offset = vlan_offset_base; 352 req->vlan_cfg = on ? 0 : 1; 353 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 354 355 ret = hns3_cmd_send(hw, &desc, 1); 356 if (ret) 357 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 358 vlan_id, ret); 359 360 return ret; 361 } 362 363 static void 364 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 365 { 366 struct hns3_user_vlan_table *vlan_entry; 367 struct hns3_pf *pf = &hns->pf; 368 369 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 370 if (vlan_entry->vlan_id == vlan_id) { 371 if (vlan_entry->hd_tbl_status) 372 hns3_set_port_vlan_filter(hns, vlan_id, 0); 373 LIST_REMOVE(vlan_entry, next); 374 rte_free(vlan_entry); 375 break; 376 } 377 } 378 } 379 380 static void 381 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 382 bool writen_to_tbl) 383 { 384 struct hns3_user_vlan_table *vlan_entry; 385 struct hns3_hw *hw = &hns->hw; 386 struct hns3_pf *pf = &hns->pf; 387 388 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 389 if (vlan_entry->vlan_id == vlan_id) 390 return; 391 } 392 393 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 394 if (vlan_entry == NULL) { 395 hns3_err(hw, "Failed to malloc hns3 vlan table"); 396 return; 397 } 398 399 vlan_entry->hd_tbl_status = writen_to_tbl; 400 vlan_entry->vlan_id = vlan_id; 401 402 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 403 } 404 405 static int 406 hns3_restore_vlan_table(struct hns3_adapter *hns) 407 { 408 struct hns3_user_vlan_table *vlan_entry; 409 struct hns3_hw *hw = &hns->hw; 410 struct hns3_pf *pf = &hns->pf; 411 uint16_t vlan_id; 412 int ret = 0; 413 414 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 415 return hns3_vlan_pvid_configure(hns, 416 hw->port_base_vlan_cfg.pvid, 1); 417 418 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 419 if (vlan_entry->hd_tbl_status) { 420 vlan_id = vlan_entry->vlan_id; 421 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 422 if (ret) 423 break; 424 } 425 } 426 427 return ret; 428 } 429 430 static int 431 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 432 { 433 struct hns3_hw *hw = &hns->hw; 434 bool writen_to_tbl = false; 435 int ret = 0; 436 437 /* 438 * When vlan filter is enabled, hardware regards packets without vlan 439 * as packets with vlan 0. So, to receive packets without vlan, vlan id 440 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 441 */ 442 if (on == 0 && vlan_id == 0) 443 return 0; 444 445 /* 446 * When port base vlan enabled, we use port base vlan as the vlan 447 * filter condition. In this case, we don't update vlan filter table 448 * when user add new vlan or remove exist vlan, just update the 449 * vlan list. The vlan id in vlan list will be writen in vlan filter 450 * table until port base vlan disabled 451 */ 452 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 453 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 454 writen_to_tbl = true; 455 } 456 457 if (ret == 0) { 458 if (on) 459 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 460 else 461 hns3_rm_dev_vlan_table(hns, vlan_id); 462 } 463 return ret; 464 } 465 466 static int 467 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 468 { 469 struct hns3_adapter *hns = dev->data->dev_private; 470 struct hns3_hw *hw = &hns->hw; 471 int ret; 472 473 rte_spinlock_lock(&hw->lock); 474 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 475 rte_spinlock_unlock(&hw->lock); 476 return ret; 477 } 478 479 static int 480 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 481 uint16_t tpid) 482 { 483 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 484 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 485 struct hns3_hw *hw = &hns->hw; 486 struct hns3_cmd_desc desc; 487 int ret; 488 489 if ((vlan_type != ETH_VLAN_TYPE_INNER && 490 vlan_type != ETH_VLAN_TYPE_OUTER)) { 491 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 492 return -EINVAL; 493 } 494 495 if (tpid != RTE_ETHER_TYPE_VLAN) { 496 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 497 return -EINVAL; 498 } 499 500 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 501 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 502 503 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 504 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 505 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 506 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 507 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 508 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 509 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 510 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 511 } 512 513 ret = hns3_cmd_send(hw, &desc, 1); 514 if (ret) { 515 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 516 ret); 517 return ret; 518 } 519 520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 521 522 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 523 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 524 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 525 526 ret = hns3_cmd_send(hw, &desc, 1); 527 if (ret) 528 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 529 ret); 530 return ret; 531 } 532 533 static int 534 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 535 uint16_t tpid) 536 { 537 struct hns3_adapter *hns = dev->data->dev_private; 538 struct hns3_hw *hw = &hns->hw; 539 int ret; 540 541 rte_spinlock_lock(&hw->lock); 542 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 543 rte_spinlock_unlock(&hw->lock); 544 return ret; 545 } 546 547 static int 548 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 549 struct hns3_rx_vtag_cfg *vcfg) 550 { 551 struct hns3_vport_vtag_rx_cfg_cmd *req; 552 struct hns3_hw *hw = &hns->hw; 553 struct hns3_cmd_desc desc; 554 uint16_t vport_id; 555 uint8_t bitmap; 556 int ret; 557 558 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 559 560 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 561 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 562 vcfg->strip_tag1_en ? 1 : 0); 563 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 564 vcfg->strip_tag2_en ? 1 : 0); 565 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 566 vcfg->vlan1_vlan_prionly ? 1 : 0); 567 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 568 vcfg->vlan2_vlan_prionly ? 1 : 0); 569 570 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 571 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 572 vcfg->strip_tag1_discard_en ? 1 : 0); 573 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 574 vcfg->strip_tag2_discard_en ? 1 : 0); 575 /* 576 * In current version VF is not supported when PF is driven by DPDK 577 * driver, just need to configure parameters for PF vport. 578 */ 579 vport_id = HNS3_PF_FUNC_ID; 580 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 581 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 582 req->vf_bitmap[req->vf_offset] = bitmap; 583 584 ret = hns3_cmd_send(hw, &desc, 1); 585 if (ret) 586 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 587 return ret; 588 } 589 590 static void 591 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 592 struct hns3_rx_vtag_cfg *vcfg) 593 { 594 struct hns3_pf *pf = &hns->pf; 595 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 596 } 597 598 static void 599 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 600 struct hns3_tx_vtag_cfg *vcfg) 601 { 602 struct hns3_pf *pf = &hns->pf; 603 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 604 } 605 606 static int 607 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 608 { 609 struct hns3_rx_vtag_cfg rxvlan_cfg; 610 struct hns3_hw *hw = &hns->hw; 611 int ret; 612 613 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 614 rxvlan_cfg.strip_tag1_en = false; 615 rxvlan_cfg.strip_tag2_en = enable; 616 rxvlan_cfg.strip_tag2_discard_en = false; 617 } else { 618 rxvlan_cfg.strip_tag1_en = enable; 619 rxvlan_cfg.strip_tag2_en = true; 620 rxvlan_cfg.strip_tag2_discard_en = true; 621 } 622 623 rxvlan_cfg.strip_tag1_discard_en = false; 624 rxvlan_cfg.vlan1_vlan_prionly = false; 625 rxvlan_cfg.vlan2_vlan_prionly = false; 626 rxvlan_cfg.rx_vlan_offload_en = enable; 627 628 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 629 if (ret) { 630 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret); 631 return ret; 632 } 633 634 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 635 636 return ret; 637 } 638 639 static int 640 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 641 uint8_t fe_type, bool filter_en, uint8_t vf_id) 642 { 643 struct hns3_vlan_filter_ctrl_cmd *req; 644 struct hns3_cmd_desc desc; 645 int ret; 646 647 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 648 649 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 650 req->vlan_type = vlan_type; 651 req->vlan_fe = filter_en ? fe_type : 0; 652 req->vf_id = vf_id; 653 654 ret = hns3_cmd_send(hw, &desc, 1); 655 if (ret) 656 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 657 658 return ret; 659 } 660 661 static int 662 hns3_vlan_filter_init(struct hns3_adapter *hns) 663 { 664 struct hns3_hw *hw = &hns->hw; 665 int ret; 666 667 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 668 HNS3_FILTER_FE_EGRESS, false, 669 HNS3_PF_FUNC_ID); 670 if (ret) { 671 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 672 return ret; 673 } 674 675 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 676 HNS3_FILTER_FE_INGRESS, false, 677 HNS3_PF_FUNC_ID); 678 if (ret) 679 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 680 681 return ret; 682 } 683 684 static int 685 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 686 { 687 struct hns3_hw *hw = &hns->hw; 688 int ret; 689 690 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 691 HNS3_FILTER_FE_INGRESS, enable, 692 HNS3_PF_FUNC_ID); 693 if (ret) 694 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 695 enable ? "enable" : "disable", ret); 696 697 return ret; 698 } 699 700 static int 701 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 702 { 703 struct hns3_adapter *hns = dev->data->dev_private; 704 struct hns3_hw *hw = &hns->hw; 705 struct rte_eth_rxmode *rxmode; 706 unsigned int tmp_mask; 707 bool enable; 708 int ret = 0; 709 710 rte_spinlock_lock(&hw->lock); 711 rxmode = &dev->data->dev_conf.rxmode; 712 tmp_mask = (unsigned int)mask; 713 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 714 /* ignore vlan filter configuration during promiscuous mode */ 715 if (!dev->data->promiscuous) { 716 /* Enable or disable VLAN filter */ 717 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 718 true : false; 719 720 ret = hns3_enable_vlan_filter(hns, enable); 721 if (ret) { 722 rte_spinlock_unlock(&hw->lock); 723 hns3_err(hw, "failed to %s rx filter, ret = %d", 724 enable ? "enable" : "disable", ret); 725 return ret; 726 } 727 } 728 } 729 730 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 731 /* Enable or disable VLAN stripping */ 732 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 733 true : false; 734 735 ret = hns3_en_hw_strip_rxvtag(hns, enable); 736 if (ret) { 737 rte_spinlock_unlock(&hw->lock); 738 hns3_err(hw, "failed to %s rx strip, ret = %d", 739 enable ? "enable" : "disable", ret); 740 return ret; 741 } 742 } 743 744 rte_spinlock_unlock(&hw->lock); 745 746 return ret; 747 } 748 749 static int 750 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 751 struct hns3_tx_vtag_cfg *vcfg) 752 { 753 struct hns3_vport_vtag_tx_cfg_cmd *req; 754 struct hns3_cmd_desc desc; 755 struct hns3_hw *hw = &hns->hw; 756 uint16_t vport_id; 757 uint8_t bitmap; 758 int ret; 759 760 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 761 762 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 763 req->def_vlan_tag1 = vcfg->default_tag1; 764 req->def_vlan_tag2 = vcfg->default_tag2; 765 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 766 vcfg->accept_tag1 ? 1 : 0); 767 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 768 vcfg->accept_untag1 ? 1 : 0); 769 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 770 vcfg->accept_tag2 ? 1 : 0); 771 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 772 vcfg->accept_untag2 ? 1 : 0); 773 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 774 vcfg->insert_tag1_en ? 1 : 0); 775 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 776 vcfg->insert_tag2_en ? 1 : 0); 777 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 778 779 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 781 vcfg->tag_shift_mode_en ? 1 : 0); 782 783 /* 784 * In current version VF is not supported when PF is driven by DPDK 785 * driver, just need to configure parameters for PF vport. 786 */ 787 vport_id = HNS3_PF_FUNC_ID; 788 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 789 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 790 req->vf_bitmap[req->vf_offset] = bitmap; 791 792 ret = hns3_cmd_send(hw, &desc, 1); 793 if (ret) 794 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 795 796 return ret; 797 } 798 799 static int 800 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 801 uint16_t pvid) 802 { 803 struct hns3_hw *hw = &hns->hw; 804 struct hns3_tx_vtag_cfg txvlan_cfg; 805 int ret; 806 807 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 808 txvlan_cfg.accept_tag1 = true; 809 txvlan_cfg.insert_tag1_en = false; 810 txvlan_cfg.default_tag1 = 0; 811 } else { 812 txvlan_cfg.accept_tag1 = 813 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 814 txvlan_cfg.insert_tag1_en = true; 815 txvlan_cfg.default_tag1 = pvid; 816 } 817 818 txvlan_cfg.accept_untag1 = true; 819 txvlan_cfg.accept_tag2 = true; 820 txvlan_cfg.accept_untag2 = true; 821 txvlan_cfg.insert_tag2_en = false; 822 txvlan_cfg.default_tag2 = 0; 823 txvlan_cfg.tag_shift_mode_en = true; 824 825 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 826 if (ret) { 827 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 828 ret); 829 return ret; 830 } 831 832 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 833 return ret; 834 } 835 836 837 static void 838 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 839 { 840 struct hns3_user_vlan_table *vlan_entry; 841 struct hns3_pf *pf = &hns->pf; 842 843 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 844 if (vlan_entry->hd_tbl_status) { 845 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 846 vlan_entry->hd_tbl_status = false; 847 } 848 } 849 850 if (is_del_list) { 851 vlan_entry = LIST_FIRST(&pf->vlan_list); 852 while (vlan_entry) { 853 LIST_REMOVE(vlan_entry, next); 854 rte_free(vlan_entry); 855 vlan_entry = LIST_FIRST(&pf->vlan_list); 856 } 857 } 858 } 859 860 static void 861 hns3_add_all_vlan_table(struct hns3_adapter *hns) 862 { 863 struct hns3_user_vlan_table *vlan_entry; 864 struct hns3_pf *pf = &hns->pf; 865 866 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 867 if (!vlan_entry->hd_tbl_status) { 868 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 869 vlan_entry->hd_tbl_status = true; 870 } 871 } 872 } 873 874 static void 875 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 876 { 877 struct hns3_hw *hw = &hns->hw; 878 int ret; 879 880 hns3_rm_all_vlan_table(hns, true); 881 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 882 ret = hns3_set_port_vlan_filter(hns, 883 hw->port_base_vlan_cfg.pvid, 0); 884 if (ret) { 885 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 886 ret); 887 return; 888 } 889 } 890 } 891 892 static int 893 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 894 uint16_t port_base_vlan_state, uint16_t new_pvid) 895 { 896 struct hns3_hw *hw = &hns->hw; 897 uint16_t old_pvid; 898 int ret; 899 900 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 901 old_pvid = hw->port_base_vlan_cfg.pvid; 902 if (old_pvid != HNS3_INVALID_PVID) { 903 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 904 if (ret) { 905 hns3_err(hw, "failed to remove old pvid %u, " 906 "ret = %d", old_pvid, ret); 907 return ret; 908 } 909 } 910 911 hns3_rm_all_vlan_table(hns, false); 912 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 913 if (ret) { 914 hns3_err(hw, "failed to add new pvid %u, ret = %d", 915 new_pvid, ret); 916 return ret; 917 } 918 } else { 919 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 920 if (ret) { 921 hns3_err(hw, "failed to remove pvid %u, ret = %d", 922 new_pvid, ret); 923 return ret; 924 } 925 926 hns3_add_all_vlan_table(hns); 927 } 928 return 0; 929 } 930 931 static int 932 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 933 { 934 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 935 struct hns3_rx_vtag_cfg rx_vlan_cfg; 936 bool rx_strip_en; 937 int ret; 938 939 rx_strip_en = old_cfg->rx_vlan_offload_en; 940 if (on) { 941 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 942 rx_vlan_cfg.strip_tag2_en = true; 943 rx_vlan_cfg.strip_tag2_discard_en = true; 944 } else { 945 rx_vlan_cfg.strip_tag1_en = false; 946 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 947 rx_vlan_cfg.strip_tag2_discard_en = false; 948 } 949 rx_vlan_cfg.strip_tag1_discard_en = false; 950 rx_vlan_cfg.vlan1_vlan_prionly = false; 951 rx_vlan_cfg.vlan2_vlan_prionly = false; 952 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 953 954 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 955 if (ret) 956 return ret; 957 958 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 959 return ret; 960 } 961 962 static int 963 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 964 { 965 struct hns3_hw *hw = &hns->hw; 966 uint16_t port_base_vlan_state; 967 int ret; 968 969 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 970 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 971 hns3_warn(hw, "Invalid operation! As current pvid set " 972 "is %u, disable pvid %u is invalid", 973 hw->port_base_vlan_cfg.pvid, pvid); 974 return 0; 975 } 976 977 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 978 HNS3_PORT_BASE_VLAN_DISABLE; 979 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 980 if (ret) { 981 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 982 ret); 983 return ret; 984 } 985 986 ret = hns3_en_pvid_strip(hns, on); 987 if (ret) { 988 hns3_err(hw, "failed to config rx vlan strip for pvid, " 989 "ret = %d", ret); 990 return ret; 991 } 992 993 if (pvid == HNS3_INVALID_PVID) 994 goto out; 995 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 996 if (ret) { 997 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 998 ret); 999 return ret; 1000 } 1001 1002 out: 1003 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1004 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1005 return ret; 1006 } 1007 1008 static int 1009 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1010 { 1011 struct hns3_adapter *hns = dev->data->dev_private; 1012 struct hns3_hw *hw = &hns->hw; 1013 bool pvid_en_state_change; 1014 uint16_t pvid_state; 1015 int ret; 1016 1017 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1018 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1019 RTE_ETHER_MAX_VLAN_ID); 1020 return -EINVAL; 1021 } 1022 1023 /* 1024 * If PVID configuration state change, should refresh the PVID 1025 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1026 */ 1027 pvid_state = hw->port_base_vlan_cfg.state; 1028 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1029 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1030 pvid_en_state_change = false; 1031 else 1032 pvid_en_state_change = true; 1033 1034 rte_spinlock_lock(&hw->lock); 1035 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1036 rte_spinlock_unlock(&hw->lock); 1037 if (ret) 1038 return ret; 1039 /* 1040 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1041 * need be processed by PMD driver. 1042 */ 1043 if (pvid_en_state_change && 1044 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1045 hns3_update_all_queues_pvid_proc_en(hw); 1046 1047 return 0; 1048 } 1049 1050 static int 1051 hns3_default_vlan_config(struct hns3_adapter *hns) 1052 { 1053 struct hns3_hw *hw = &hns->hw; 1054 int ret; 1055 1056 /* 1057 * When vlan filter is enabled, hardware regards packets without vlan 1058 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1059 * table, packets without vlan won't be received. So, add vlan 0 as 1060 * the default vlan. 1061 */ 1062 ret = hns3_vlan_filter_configure(hns, 0, 1); 1063 if (ret) 1064 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1065 return ret; 1066 } 1067 1068 static int 1069 hns3_init_vlan_config(struct hns3_adapter *hns) 1070 { 1071 struct hns3_hw *hw = &hns->hw; 1072 int ret; 1073 1074 /* 1075 * This function can be called in the initialization and reset process, 1076 * when in reset process, it means that hardware had been reseted 1077 * successfully and we need to restore the hardware configuration to 1078 * ensure that the hardware configuration remains unchanged before and 1079 * after reset. 1080 */ 1081 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1082 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1083 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1084 } 1085 1086 ret = hns3_vlan_filter_init(hns); 1087 if (ret) { 1088 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1089 return ret; 1090 } 1091 1092 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1093 RTE_ETHER_TYPE_VLAN); 1094 if (ret) { 1095 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1096 return ret; 1097 } 1098 1099 /* 1100 * When in the reinit dev stage of the reset process, the following 1101 * vlan-related configurations may differ from those at initialization, 1102 * we will restore configurations to hardware in hns3_restore_vlan_table 1103 * and hns3_restore_vlan_conf later. 1104 */ 1105 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1106 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1107 if (ret) { 1108 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1109 return ret; 1110 } 1111 1112 ret = hns3_en_hw_strip_rxvtag(hns, false); 1113 if (ret) { 1114 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1115 ret); 1116 return ret; 1117 } 1118 } 1119 1120 return hns3_default_vlan_config(hns); 1121 } 1122 1123 static int 1124 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1125 { 1126 struct hns3_pf *pf = &hns->pf; 1127 struct hns3_hw *hw = &hns->hw; 1128 uint64_t offloads; 1129 bool enable; 1130 int ret; 1131 1132 if (!hw->data->promiscuous) { 1133 /* restore vlan filter states */ 1134 offloads = hw->data->dev_conf.rxmode.offloads; 1135 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1136 ret = hns3_enable_vlan_filter(hns, enable); 1137 if (ret) { 1138 hns3_err(hw, "failed to restore vlan rx filter conf, " 1139 "ret = %d", ret); 1140 return ret; 1141 } 1142 } 1143 1144 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1145 if (ret) { 1146 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1147 return ret; 1148 } 1149 1150 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1151 if (ret) 1152 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1153 1154 return ret; 1155 } 1156 1157 static int 1158 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1159 { 1160 struct hns3_adapter *hns = dev->data->dev_private; 1161 struct rte_eth_dev_data *data = dev->data; 1162 struct rte_eth_txmode *txmode; 1163 struct hns3_hw *hw = &hns->hw; 1164 int mask; 1165 int ret; 1166 1167 txmode = &data->dev_conf.txmode; 1168 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1169 hns3_warn(hw, 1170 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1171 "configuration is not supported! Ignore these two " 1172 "parameters: hw_vlan_reject_tagged(%u), " 1173 "hw_vlan_reject_untagged(%u)", 1174 txmode->hw_vlan_reject_tagged, 1175 txmode->hw_vlan_reject_untagged); 1176 1177 /* Apply vlan offload setting */ 1178 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1179 ret = hns3_vlan_offload_set(dev, mask); 1180 if (ret) { 1181 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1182 ret); 1183 return ret; 1184 } 1185 1186 /* 1187 * If pvid config is not set in rte_eth_conf, driver needn't to set 1188 * VLAN pvid related configuration to hardware. 1189 */ 1190 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1191 return 0; 1192 1193 /* Apply pvid setting */ 1194 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1195 txmode->hw_vlan_insert_pvid); 1196 if (ret) 1197 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1198 txmode->pvid, ret); 1199 1200 return ret; 1201 } 1202 1203 static int 1204 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1205 unsigned int tso_mss_max) 1206 { 1207 struct hns3_cfg_tso_status_cmd *req; 1208 struct hns3_cmd_desc desc; 1209 uint16_t tso_mss; 1210 1211 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1212 1213 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1214 1215 tso_mss = 0; 1216 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1217 tso_mss_min); 1218 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1219 1220 tso_mss = 0; 1221 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1222 tso_mss_max); 1223 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1224 1225 return hns3_cmd_send(hw, &desc, 1); 1226 } 1227 1228 static int 1229 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1230 uint16_t *allocated_size, bool is_alloc) 1231 { 1232 struct hns3_umv_spc_alc_cmd *req; 1233 struct hns3_cmd_desc desc; 1234 int ret; 1235 1236 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1237 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1238 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1239 req->space_size = rte_cpu_to_le_32(space_size); 1240 1241 ret = hns3_cmd_send(hw, &desc, 1); 1242 if (ret) { 1243 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1244 is_alloc ? "allocate" : "free", ret); 1245 return ret; 1246 } 1247 1248 if (is_alloc && allocated_size) 1249 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1250 1251 return 0; 1252 } 1253 1254 static int 1255 hns3_init_umv_space(struct hns3_hw *hw) 1256 { 1257 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1258 struct hns3_pf *pf = &hns->pf; 1259 uint16_t allocated_size = 0; 1260 int ret; 1261 1262 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1263 true); 1264 if (ret) 1265 return ret; 1266 1267 if (allocated_size < pf->wanted_umv_size) 1268 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1269 pf->wanted_umv_size, allocated_size); 1270 1271 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1272 pf->wanted_umv_size; 1273 pf->used_umv_size = 0; 1274 return 0; 1275 } 1276 1277 static int 1278 hns3_uninit_umv_space(struct hns3_hw *hw) 1279 { 1280 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1281 struct hns3_pf *pf = &hns->pf; 1282 int ret; 1283 1284 if (pf->max_umv_size == 0) 1285 return 0; 1286 1287 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1288 if (ret) 1289 return ret; 1290 1291 pf->max_umv_size = 0; 1292 1293 return 0; 1294 } 1295 1296 static bool 1297 hns3_is_umv_space_full(struct hns3_hw *hw) 1298 { 1299 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1300 struct hns3_pf *pf = &hns->pf; 1301 bool is_full; 1302 1303 is_full = (pf->used_umv_size >= pf->max_umv_size); 1304 1305 return is_full; 1306 } 1307 1308 static void 1309 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1310 { 1311 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1312 struct hns3_pf *pf = &hns->pf; 1313 1314 if (is_free) { 1315 if (pf->used_umv_size > 0) 1316 pf->used_umv_size--; 1317 } else 1318 pf->used_umv_size++; 1319 } 1320 1321 static void 1322 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1323 const uint8_t *addr, bool is_mc) 1324 { 1325 const unsigned char *mac_addr = addr; 1326 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1327 ((uint32_t)mac_addr[2] << 16) | 1328 ((uint32_t)mac_addr[1] << 8) | 1329 (uint32_t)mac_addr[0]; 1330 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1331 1332 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1333 if (is_mc) { 1334 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1335 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1336 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1337 } 1338 1339 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1340 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1341 } 1342 1343 static int 1344 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1345 uint8_t resp_code, 1346 enum hns3_mac_vlan_tbl_opcode op) 1347 { 1348 if (cmdq_resp) { 1349 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1350 cmdq_resp); 1351 return -EIO; 1352 } 1353 1354 if (op == HNS3_MAC_VLAN_ADD) { 1355 if (resp_code == 0 || resp_code == 1) { 1356 return 0; 1357 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1358 hns3_err(hw, "add mac addr failed for uc_overflow"); 1359 return -ENOSPC; 1360 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1361 hns3_err(hw, "add mac addr failed for mc_overflow"); 1362 return -ENOSPC; 1363 } 1364 1365 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1366 resp_code); 1367 return -EIO; 1368 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1369 if (resp_code == 0) { 1370 return 0; 1371 } else if (resp_code == 1) { 1372 hns3_dbg(hw, "remove mac addr failed for miss"); 1373 return -ENOENT; 1374 } 1375 1376 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1377 resp_code); 1378 return -EIO; 1379 } else if (op == HNS3_MAC_VLAN_LKUP) { 1380 if (resp_code == 0) { 1381 return 0; 1382 } else if (resp_code == 1) { 1383 hns3_dbg(hw, "lookup mac addr failed for miss"); 1384 return -ENOENT; 1385 } 1386 1387 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1388 resp_code); 1389 return -EIO; 1390 } 1391 1392 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1393 op); 1394 1395 return -EINVAL; 1396 } 1397 1398 static int 1399 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1400 struct hns3_mac_vlan_tbl_entry_cmd *req, 1401 struct hns3_cmd_desc *desc, bool is_mc) 1402 { 1403 uint8_t resp_code; 1404 uint16_t retval; 1405 int ret; 1406 1407 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1408 if (is_mc) { 1409 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1410 memcpy(desc[0].data, req, 1411 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1412 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1413 true); 1414 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1415 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1416 true); 1417 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1418 } else { 1419 memcpy(desc[0].data, req, 1420 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1421 ret = hns3_cmd_send(hw, desc, 1); 1422 } 1423 if (ret) { 1424 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1425 ret); 1426 return ret; 1427 } 1428 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1429 retval = rte_le_to_cpu_16(desc[0].retval); 1430 1431 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1432 HNS3_MAC_VLAN_LKUP); 1433 } 1434 1435 static int 1436 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1437 struct hns3_mac_vlan_tbl_entry_cmd *req, 1438 struct hns3_cmd_desc *mc_desc) 1439 { 1440 uint8_t resp_code; 1441 uint16_t retval; 1442 int cfg_status; 1443 int ret; 1444 1445 if (mc_desc == NULL) { 1446 struct hns3_cmd_desc desc; 1447 1448 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1449 memcpy(desc.data, req, 1450 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1451 ret = hns3_cmd_send(hw, &desc, 1); 1452 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1453 retval = rte_le_to_cpu_16(desc.retval); 1454 1455 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1456 HNS3_MAC_VLAN_ADD); 1457 } else { 1458 hns3_cmd_reuse_desc(&mc_desc[0], false); 1459 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1460 hns3_cmd_reuse_desc(&mc_desc[1], false); 1461 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1462 hns3_cmd_reuse_desc(&mc_desc[2], false); 1463 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1464 memcpy(mc_desc[0].data, req, 1465 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1466 mc_desc[0].retval = 0; 1467 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1468 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1469 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1470 1471 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1472 HNS3_MAC_VLAN_ADD); 1473 } 1474 1475 if (ret) { 1476 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1477 return ret; 1478 } 1479 1480 return cfg_status; 1481 } 1482 1483 static int 1484 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1485 struct hns3_mac_vlan_tbl_entry_cmd *req) 1486 { 1487 struct hns3_cmd_desc desc; 1488 uint8_t resp_code; 1489 uint16_t retval; 1490 int ret; 1491 1492 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1493 1494 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1495 1496 ret = hns3_cmd_send(hw, &desc, 1); 1497 if (ret) { 1498 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1499 return ret; 1500 } 1501 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1502 retval = rte_le_to_cpu_16(desc.retval); 1503 1504 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1505 HNS3_MAC_VLAN_REMOVE); 1506 } 1507 1508 static int 1509 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1510 { 1511 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1512 struct hns3_mac_vlan_tbl_entry_cmd req; 1513 struct hns3_pf *pf = &hns->pf; 1514 struct hns3_cmd_desc desc[3]; 1515 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1516 uint16_t egress_port = 0; 1517 uint8_t vf_id; 1518 int ret; 1519 1520 /* check if mac addr is valid */ 1521 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1522 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1523 mac_addr); 1524 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1525 mac_str); 1526 return -EINVAL; 1527 } 1528 1529 memset(&req, 0, sizeof(req)); 1530 1531 /* 1532 * In current version VF is not supported when PF is driven by DPDK 1533 * driver, just need to configure parameters for PF vport. 1534 */ 1535 vf_id = HNS3_PF_FUNC_ID; 1536 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1537 HNS3_MAC_EPORT_VFID_S, vf_id); 1538 1539 req.egress_port = rte_cpu_to_le_16(egress_port); 1540 1541 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1542 1543 /* 1544 * Lookup the mac address in the mac_vlan table, and add 1545 * it if the entry is inexistent. Repeated unicast entry 1546 * is not allowed in the mac vlan table. 1547 */ 1548 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1549 if (ret == -ENOENT) { 1550 if (!hns3_is_umv_space_full(hw)) { 1551 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1552 if (!ret) 1553 hns3_update_umv_space(hw, false); 1554 return ret; 1555 } 1556 1557 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1558 1559 return -ENOSPC; 1560 } 1561 1562 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1563 1564 /* check if we just hit the duplicate */ 1565 if (ret == 0) { 1566 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1567 return 0; 1568 } 1569 1570 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1571 mac_str); 1572 1573 return ret; 1574 } 1575 1576 static int 1577 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1578 { 1579 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1580 struct rte_ether_addr *addr; 1581 int ret; 1582 int i; 1583 1584 for (i = 0; i < hw->mc_addrs_num; i++) { 1585 addr = &hw->mc_addrs[i]; 1586 /* Check if there are duplicate addresses */ 1587 if (rte_is_same_ether_addr(addr, mac_addr)) { 1588 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1589 addr); 1590 hns3_err(hw, "failed to add mc mac addr, same addrs" 1591 "(%s) is added by the set_mc_mac_addr_list " 1592 "API", mac_str); 1593 return -EINVAL; 1594 } 1595 } 1596 1597 ret = hns3_add_mc_addr(hw, mac_addr); 1598 if (ret) { 1599 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1600 mac_addr); 1601 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1602 mac_str, ret); 1603 } 1604 return ret; 1605 } 1606 1607 static int 1608 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1609 { 1610 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1611 int ret; 1612 1613 ret = hns3_remove_mc_addr(hw, mac_addr); 1614 if (ret) { 1615 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1616 mac_addr); 1617 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1618 mac_str, ret); 1619 } 1620 return ret; 1621 } 1622 1623 static int 1624 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1625 uint32_t idx, __rte_unused uint32_t pool) 1626 { 1627 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1628 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1629 int ret; 1630 1631 rte_spinlock_lock(&hw->lock); 1632 1633 /* 1634 * In hns3 network engine adding UC and MC mac address with different 1635 * commands with firmware. We need to determine whether the input 1636 * address is a UC or a MC address to call different commands. 1637 * By the way, it is recommended calling the API function named 1638 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1639 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1640 * may affect the specifications of UC mac addresses. 1641 */ 1642 if (rte_is_multicast_ether_addr(mac_addr)) 1643 ret = hns3_add_mc_addr_common(hw, mac_addr); 1644 else 1645 ret = hns3_add_uc_addr_common(hw, mac_addr); 1646 1647 if (ret) { 1648 rte_spinlock_unlock(&hw->lock); 1649 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1650 mac_addr); 1651 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1652 ret); 1653 return ret; 1654 } 1655 1656 if (idx == 0) 1657 hw->mac.default_addr_setted = true; 1658 rte_spinlock_unlock(&hw->lock); 1659 1660 return ret; 1661 } 1662 1663 static int 1664 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1665 { 1666 struct hns3_mac_vlan_tbl_entry_cmd req; 1667 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1668 int ret; 1669 1670 /* check if mac addr is valid */ 1671 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1672 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1673 mac_addr); 1674 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1675 mac_str); 1676 return -EINVAL; 1677 } 1678 1679 memset(&req, 0, sizeof(req)); 1680 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1681 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1682 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1683 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1684 return 0; 1685 else if (ret == 0) 1686 hns3_update_umv_space(hw, true); 1687 1688 return ret; 1689 } 1690 1691 static void 1692 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1693 { 1694 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1695 /* index will be checked by upper level rte interface */ 1696 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1697 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1698 int ret; 1699 1700 rte_spinlock_lock(&hw->lock); 1701 1702 if (rte_is_multicast_ether_addr(mac_addr)) 1703 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1704 else 1705 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1706 rte_spinlock_unlock(&hw->lock); 1707 if (ret) { 1708 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1709 mac_addr); 1710 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1711 ret); 1712 } 1713 } 1714 1715 static int 1716 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1717 struct rte_ether_addr *mac_addr) 1718 { 1719 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1720 struct rte_ether_addr *oaddr; 1721 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1722 bool default_addr_setted; 1723 bool rm_succes = false; 1724 int ret, ret_val; 1725 1726 /* 1727 * It has been guaranteed that input parameter named mac_addr is valid 1728 * address in the rte layer of DPDK framework. 1729 */ 1730 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1731 default_addr_setted = hw->mac.default_addr_setted; 1732 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1733 return 0; 1734 1735 rte_spinlock_lock(&hw->lock); 1736 if (default_addr_setted) { 1737 ret = hns3_remove_uc_addr_common(hw, oaddr); 1738 if (ret) { 1739 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1740 oaddr); 1741 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1742 mac_str, ret); 1743 rm_succes = false; 1744 } else 1745 rm_succes = true; 1746 } 1747 1748 ret = hns3_add_uc_addr_common(hw, mac_addr); 1749 if (ret) { 1750 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1751 mac_addr); 1752 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1753 goto err_add_uc_addr; 1754 } 1755 1756 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1757 if (ret) { 1758 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1759 goto err_pause_addr_cfg; 1760 } 1761 1762 rte_ether_addr_copy(mac_addr, 1763 (struct rte_ether_addr *)hw->mac.mac_addr); 1764 hw->mac.default_addr_setted = true; 1765 rte_spinlock_unlock(&hw->lock); 1766 1767 return 0; 1768 1769 err_pause_addr_cfg: 1770 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1771 if (ret_val) { 1772 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1773 mac_addr); 1774 hns3_warn(hw, 1775 "Failed to roll back to del setted mac addr(%s): %d", 1776 mac_str, ret_val); 1777 } 1778 1779 err_add_uc_addr: 1780 if (rm_succes) { 1781 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1782 if (ret_val) { 1783 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1784 oaddr); 1785 hns3_warn(hw, 1786 "Failed to restore old uc mac addr(%s): %d", 1787 mac_str, ret_val); 1788 hw->mac.default_addr_setted = false; 1789 } 1790 } 1791 rte_spinlock_unlock(&hw->lock); 1792 1793 return ret; 1794 } 1795 1796 static int 1797 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1798 { 1799 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1800 struct hns3_hw *hw = &hns->hw; 1801 struct rte_ether_addr *addr; 1802 int err = 0; 1803 int ret; 1804 int i; 1805 1806 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1807 addr = &hw->data->mac_addrs[i]; 1808 if (rte_is_zero_ether_addr(addr)) 1809 continue; 1810 if (rte_is_multicast_ether_addr(addr)) 1811 ret = del ? hns3_remove_mc_addr(hw, addr) : 1812 hns3_add_mc_addr(hw, addr); 1813 else 1814 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1815 hns3_add_uc_addr_common(hw, addr); 1816 1817 if (ret) { 1818 err = ret; 1819 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1820 addr); 1821 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1822 "ret = %d.", del ? "remove" : "restore", 1823 mac_str, i, ret); 1824 } 1825 } 1826 return err; 1827 } 1828 1829 static void 1830 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1831 { 1832 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1833 uint8_t word_num; 1834 uint8_t bit_num; 1835 1836 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1837 word_num = vfid / 32; 1838 bit_num = vfid % 32; 1839 if (clr) 1840 desc[1].data[word_num] &= 1841 rte_cpu_to_le_32(~(1UL << bit_num)); 1842 else 1843 desc[1].data[word_num] |= 1844 rte_cpu_to_le_32(1UL << bit_num); 1845 } else { 1846 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1847 bit_num = vfid % 32; 1848 if (clr) 1849 desc[2].data[word_num] &= 1850 rte_cpu_to_le_32(~(1UL << bit_num)); 1851 else 1852 desc[2].data[word_num] |= 1853 rte_cpu_to_le_32(1UL << bit_num); 1854 } 1855 } 1856 1857 static int 1858 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1859 { 1860 struct hns3_mac_vlan_tbl_entry_cmd req; 1861 struct hns3_cmd_desc desc[3]; 1862 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1863 uint8_t vf_id; 1864 int ret; 1865 1866 /* Check if mac addr is valid */ 1867 if (!rte_is_multicast_ether_addr(mac_addr)) { 1868 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1869 mac_addr); 1870 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1871 mac_str); 1872 return -EINVAL; 1873 } 1874 1875 memset(&req, 0, sizeof(req)); 1876 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1877 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1878 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1879 if (ret) { 1880 /* This mac addr do not exist, add new entry for it */ 1881 memset(desc[0].data, 0, sizeof(desc[0].data)); 1882 memset(desc[1].data, 0, sizeof(desc[0].data)); 1883 memset(desc[2].data, 0, sizeof(desc[0].data)); 1884 } 1885 1886 /* 1887 * In current version VF is not supported when PF is driven by DPDK 1888 * driver, just need to configure parameters for PF vport. 1889 */ 1890 vf_id = HNS3_PF_FUNC_ID; 1891 hns3_update_desc_vfid(desc, vf_id, false); 1892 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1893 if (ret) { 1894 if (ret == -ENOSPC) 1895 hns3_err(hw, "mc mac vlan table is full"); 1896 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1897 mac_addr); 1898 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1899 } 1900 1901 return ret; 1902 } 1903 1904 static int 1905 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1906 { 1907 struct hns3_mac_vlan_tbl_entry_cmd req; 1908 struct hns3_cmd_desc desc[3]; 1909 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1910 uint8_t vf_id; 1911 int ret; 1912 1913 /* Check if mac addr is valid */ 1914 if (!rte_is_multicast_ether_addr(mac_addr)) { 1915 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1916 mac_addr); 1917 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1918 mac_str); 1919 return -EINVAL; 1920 } 1921 1922 memset(&req, 0, sizeof(req)); 1923 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1924 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1925 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1926 if (ret == 0) { 1927 /* 1928 * This mac addr exist, remove this handle's VFID for it. 1929 * In current version VF is not supported when PF is driven by 1930 * DPDK driver, just need to configure parameters for PF vport. 1931 */ 1932 vf_id = HNS3_PF_FUNC_ID; 1933 hns3_update_desc_vfid(desc, vf_id, true); 1934 1935 /* All the vfid is zero, so need to delete this entry */ 1936 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1937 } else if (ret == -ENOENT) { 1938 /* This mac addr doesn't exist. */ 1939 return 0; 1940 } 1941 1942 if (ret) { 1943 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1944 mac_addr); 1945 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1946 } 1947 1948 return ret; 1949 } 1950 1951 static int 1952 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1953 struct rte_ether_addr *mc_addr_set, 1954 uint32_t nb_mc_addr) 1955 { 1956 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1957 struct rte_ether_addr *addr; 1958 uint32_t i; 1959 uint32_t j; 1960 1961 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1962 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1963 "invalid. valid range: 0~%d", 1964 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1965 return -EINVAL; 1966 } 1967 1968 /* Check if input mac addresses are valid */ 1969 for (i = 0; i < nb_mc_addr; i++) { 1970 addr = &mc_addr_set[i]; 1971 if (!rte_is_multicast_ether_addr(addr)) { 1972 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1973 addr); 1974 hns3_err(hw, 1975 "failed to set mc mac addr, addr(%s) invalid.", 1976 mac_str); 1977 return -EINVAL; 1978 } 1979 1980 /* Check if there are duplicate addresses */ 1981 for (j = i + 1; j < nb_mc_addr; j++) { 1982 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 1983 hns3_ether_format_addr(mac_str, 1984 RTE_ETHER_ADDR_FMT_SIZE, 1985 addr); 1986 hns3_err(hw, "failed to set mc mac addr, " 1987 "addrs invalid. two same addrs(%s).", 1988 mac_str); 1989 return -EINVAL; 1990 } 1991 } 1992 1993 /* 1994 * Check if there are duplicate addresses between mac_addrs 1995 * and mc_addr_set 1996 */ 1997 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 1998 if (rte_is_same_ether_addr(addr, 1999 &hw->data->mac_addrs[j])) { 2000 hns3_ether_format_addr(mac_str, 2001 RTE_ETHER_ADDR_FMT_SIZE, 2002 addr); 2003 hns3_err(hw, "failed to set mc mac addr, " 2004 "addrs invalid. addrs(%s) has already " 2005 "configured in mac_addr add API", 2006 mac_str); 2007 return -EINVAL; 2008 } 2009 } 2010 } 2011 2012 return 0; 2013 } 2014 2015 static void 2016 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2017 struct rte_ether_addr *mc_addr_set, 2018 int mc_addr_num, 2019 struct rte_ether_addr *reserved_addr_list, 2020 int *reserved_addr_num, 2021 struct rte_ether_addr *add_addr_list, 2022 int *add_addr_num, 2023 struct rte_ether_addr *rm_addr_list, 2024 int *rm_addr_num) 2025 { 2026 struct rte_ether_addr *addr; 2027 int current_addr_num; 2028 int reserved_num = 0; 2029 int add_num = 0; 2030 int rm_num = 0; 2031 int num; 2032 int i; 2033 int j; 2034 bool same_addr; 2035 2036 /* Calculate the mc mac address list that should be removed */ 2037 current_addr_num = hw->mc_addrs_num; 2038 for (i = 0; i < current_addr_num; i++) { 2039 addr = &hw->mc_addrs[i]; 2040 same_addr = false; 2041 for (j = 0; j < mc_addr_num; j++) { 2042 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2043 same_addr = true; 2044 break; 2045 } 2046 } 2047 2048 if (!same_addr) { 2049 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2050 rm_num++; 2051 } else { 2052 rte_ether_addr_copy(addr, 2053 &reserved_addr_list[reserved_num]); 2054 reserved_num++; 2055 } 2056 } 2057 2058 /* Calculate the mc mac address list that should be added */ 2059 for (i = 0; i < mc_addr_num; i++) { 2060 addr = &mc_addr_set[i]; 2061 same_addr = false; 2062 for (j = 0; j < current_addr_num; j++) { 2063 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2064 same_addr = true; 2065 break; 2066 } 2067 } 2068 2069 if (!same_addr) { 2070 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2071 add_num++; 2072 } 2073 } 2074 2075 /* Reorder the mc mac address list maintained by driver */ 2076 for (i = 0; i < reserved_num; i++) 2077 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2078 2079 for (i = 0; i < rm_num; i++) { 2080 num = reserved_num + i; 2081 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2082 } 2083 2084 *reserved_addr_num = reserved_num; 2085 *add_addr_num = add_num; 2086 *rm_addr_num = rm_num; 2087 } 2088 2089 static int 2090 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2091 struct rte_ether_addr *mc_addr_set, 2092 uint32_t nb_mc_addr) 2093 { 2094 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2095 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2096 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2097 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2098 struct rte_ether_addr *addr; 2099 int reserved_addr_num; 2100 int add_addr_num; 2101 int rm_addr_num; 2102 int mc_addr_num; 2103 int num; 2104 int ret; 2105 int i; 2106 2107 /* Check if input parameters are valid */ 2108 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2109 if (ret) 2110 return ret; 2111 2112 rte_spinlock_lock(&hw->lock); 2113 2114 /* 2115 * Calculate the mc mac address lists those should be removed and be 2116 * added, Reorder the mc mac address list maintained by driver. 2117 */ 2118 mc_addr_num = (int)nb_mc_addr; 2119 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2120 reserved_addr_list, &reserved_addr_num, 2121 add_addr_list, &add_addr_num, 2122 rm_addr_list, &rm_addr_num); 2123 2124 /* Remove mc mac addresses */ 2125 for (i = 0; i < rm_addr_num; i++) { 2126 num = rm_addr_num - i - 1; 2127 addr = &rm_addr_list[num]; 2128 ret = hns3_remove_mc_addr(hw, addr); 2129 if (ret) { 2130 rte_spinlock_unlock(&hw->lock); 2131 return ret; 2132 } 2133 hw->mc_addrs_num--; 2134 } 2135 2136 /* Add mc mac addresses */ 2137 for (i = 0; i < add_addr_num; i++) { 2138 addr = &add_addr_list[i]; 2139 ret = hns3_add_mc_addr(hw, addr); 2140 if (ret) { 2141 rte_spinlock_unlock(&hw->lock); 2142 return ret; 2143 } 2144 2145 num = reserved_addr_num + i; 2146 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2147 hw->mc_addrs_num++; 2148 } 2149 rte_spinlock_unlock(&hw->lock); 2150 2151 return 0; 2152 } 2153 2154 static int 2155 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2156 { 2157 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2158 struct hns3_hw *hw = &hns->hw; 2159 struct rte_ether_addr *addr; 2160 int err = 0; 2161 int ret; 2162 int i; 2163 2164 for (i = 0; i < hw->mc_addrs_num; i++) { 2165 addr = &hw->mc_addrs[i]; 2166 if (!rte_is_multicast_ether_addr(addr)) 2167 continue; 2168 if (del) 2169 ret = hns3_remove_mc_addr(hw, addr); 2170 else 2171 ret = hns3_add_mc_addr(hw, addr); 2172 if (ret) { 2173 err = ret; 2174 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2175 addr); 2176 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2177 del ? "Remove" : "Restore", mac_str, ret); 2178 } 2179 } 2180 return err; 2181 } 2182 2183 static int 2184 hns3_check_mq_mode(struct rte_eth_dev *dev) 2185 { 2186 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2187 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2188 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2189 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2190 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2191 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2192 uint8_t num_tc; 2193 int max_tc = 0; 2194 int i; 2195 2196 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2197 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2198 2199 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2200 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. " 2201 "rx_mq_mode = %d", rx_mq_mode); 2202 return -EINVAL; 2203 } 2204 2205 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB || 2206 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2207 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB " 2208 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d", 2209 rx_mq_mode, tx_mq_mode); 2210 return -EINVAL; 2211 } 2212 2213 if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) { 2214 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2215 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2216 dcb_rx_conf->nb_tcs, pf->tc_max); 2217 return -EINVAL; 2218 } 2219 2220 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2221 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2222 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2223 "nb_tcs(%d) != %d or %d in rx direction.", 2224 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2225 return -EINVAL; 2226 } 2227 2228 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2229 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2230 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2231 return -EINVAL; 2232 } 2233 2234 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2235 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2236 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2237 "is not equal to one in tx direction.", 2238 i, dcb_rx_conf->dcb_tc[i]); 2239 return -EINVAL; 2240 } 2241 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2242 max_tc = dcb_rx_conf->dcb_tc[i]; 2243 } 2244 2245 num_tc = max_tc + 1; 2246 if (num_tc > dcb_rx_conf->nb_tcs) { 2247 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2248 num_tc, dcb_rx_conf->nb_tcs); 2249 return -EINVAL; 2250 } 2251 } 2252 2253 return 0; 2254 } 2255 2256 static int 2257 hns3_check_dcb_cfg(struct rte_eth_dev *dev) 2258 { 2259 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2260 2261 if (!hns3_dev_dcb_supported(hw)) { 2262 hns3_err(hw, "this port does not support dcb configurations."); 2263 return -EOPNOTSUPP; 2264 } 2265 2266 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2267 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2268 return -EOPNOTSUPP; 2269 } 2270 2271 /* Check multiple queue mode */ 2272 return hns3_check_mq_mode(dev); 2273 } 2274 2275 static int 2276 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2277 enum hns3_ring_type queue_type, uint16_t queue_id) 2278 { 2279 struct hns3_cmd_desc desc; 2280 struct hns3_ctrl_vector_chain_cmd *req = 2281 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2282 enum hns3_cmd_status status; 2283 enum hns3_opcode_type op; 2284 uint16_t tqp_type_and_id = 0; 2285 uint16_t type; 2286 uint16_t gl; 2287 2288 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2289 hns3_cmd_setup_basic_desc(&desc, op, false); 2290 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2291 HNS3_TQP_INT_ID_L_S); 2292 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2293 HNS3_TQP_INT_ID_H_S); 2294 2295 if (queue_type == HNS3_RING_TYPE_RX) 2296 gl = HNS3_RING_GL_RX; 2297 else 2298 gl = HNS3_RING_GL_TX; 2299 2300 type = queue_type; 2301 2302 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2303 type); 2304 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2305 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2306 gl); 2307 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2308 req->int_cause_num = 1; 2309 status = hns3_cmd_send(hw, &desc, 1); 2310 if (status) { 2311 hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.", 2312 en ? "Map" : "Unmap", queue_id, vector_id, status); 2313 return status; 2314 } 2315 2316 return 0; 2317 } 2318 2319 static int 2320 hns3_init_ring_with_vector(struct hns3_hw *hw) 2321 { 2322 uint16_t vec; 2323 int ret; 2324 int i; 2325 2326 /* 2327 * In hns3 network engine, vector 0 is always the misc interrupt of this 2328 * function, vector 1~N can be used respectively for the queues of the 2329 * function. Tx and Rx queues with the same number share the interrupt 2330 * vector. In the initialization clearing the all hardware mapping 2331 * relationship configurations between queues and interrupt vectors is 2332 * needed, so some error caused by the residual configurations, such as 2333 * the unexpected Tx interrupt, can be avoid. 2334 */ 2335 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2336 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2337 vec = vec - 1; /* the last interrupt is reserved */ 2338 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2339 for (i = 0; i < hw->intr_tqps_num; i++) { 2340 /* 2341 * Set gap limiter/rate limiter/quanity limiter algorithm 2342 * configuration for interrupt coalesce of queue's interrupt. 2343 */ 2344 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2345 HNS3_TQP_INTR_GL_DEFAULT); 2346 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2347 HNS3_TQP_INTR_GL_DEFAULT); 2348 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2349 /* 2350 * QL(quantity limiter) is not used currently, just set 0 to 2351 * close it. 2352 */ 2353 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2354 2355 ret = hns3_bind_ring_with_vector(hw, vec, false, 2356 HNS3_RING_TYPE_TX, i); 2357 if (ret) { 2358 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2359 "vector: %u, ret=%d", i, vec, ret); 2360 return ret; 2361 } 2362 2363 ret = hns3_bind_ring_with_vector(hw, vec, false, 2364 HNS3_RING_TYPE_RX, i); 2365 if (ret) { 2366 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2367 "vector: %u, ret=%d", i, vec, ret); 2368 return ret; 2369 } 2370 } 2371 2372 return 0; 2373 } 2374 2375 static int 2376 hns3_dev_configure(struct rte_eth_dev *dev) 2377 { 2378 struct hns3_adapter *hns = dev->data->dev_private; 2379 struct rte_eth_conf *conf = &dev->data->dev_conf; 2380 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2381 struct hns3_hw *hw = &hns->hw; 2382 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2383 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2384 struct rte_eth_rss_conf rss_conf; 2385 uint32_t max_rx_pkt_len; 2386 uint16_t mtu; 2387 bool gro_en; 2388 int ret; 2389 2390 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2391 2392 /* 2393 * Some versions of hardware network engine does not support 2394 * individually enable/disable/reset the Tx or Rx queue. These devices 2395 * must enable/disable/reset Tx and Rx queues at the same time. When the 2396 * numbers of Tx queues allocated by upper applications are not equal to 2397 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2398 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2399 * work as usual. But these fake queues are imperceptible, and can not 2400 * be used by upper applications. 2401 */ 2402 if (!hns3_dev_indep_txrx_supported(hw)) { 2403 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2404 if (ret) { 2405 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", 2406 ret); 2407 return ret; 2408 } 2409 } 2410 2411 hw->adapter_state = HNS3_NIC_CONFIGURING; 2412 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 2413 hns3_err(hw, "setting link speed/duplex not supported"); 2414 ret = -EINVAL; 2415 goto cfg_err; 2416 } 2417 2418 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2419 ret = hns3_check_dcb_cfg(dev); 2420 if (ret) 2421 goto cfg_err; 2422 } 2423 2424 /* When RSS is not configured, redirect the packet queue 0 */ 2425 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2426 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2427 rss_conf = conf->rx_adv_conf.rss_conf; 2428 hw->rss_dis_flag = false; 2429 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2430 if (ret) 2431 goto cfg_err; 2432 } 2433 2434 /* 2435 * If jumbo frames are enabled, MTU needs to be refreshed 2436 * according to the maximum RX packet length. 2437 */ 2438 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 2439 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 2440 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 2441 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 2442 hns3_err(hw, "maximum Rx packet length must be greater " 2443 "than %u and less than %u when jumbo frame enabled.", 2444 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 2445 (uint16_t)HNS3_MAX_FRAME_LEN); 2446 ret = -EINVAL; 2447 goto cfg_err; 2448 } 2449 2450 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 2451 ret = hns3_dev_mtu_set(dev, mtu); 2452 if (ret) 2453 goto cfg_err; 2454 dev->data->mtu = mtu; 2455 } 2456 2457 ret = hns3_dev_configure_vlan(dev); 2458 if (ret) 2459 goto cfg_err; 2460 2461 /* config hardware GRO */ 2462 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2463 ret = hns3_config_gro(hw, gro_en); 2464 if (ret) 2465 goto cfg_err; 2466 2467 hns->rx_simple_allowed = true; 2468 hns->rx_vec_allowed = true; 2469 hns->tx_simple_allowed = true; 2470 hns->tx_vec_allowed = true; 2471 2472 hns3_init_rx_ptype_tble(dev); 2473 hw->adapter_state = HNS3_NIC_CONFIGURED; 2474 2475 return 0; 2476 2477 cfg_err: 2478 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2479 hw->adapter_state = HNS3_NIC_INITIALIZED; 2480 2481 return ret; 2482 } 2483 2484 static int 2485 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2486 { 2487 struct hns3_config_max_frm_size_cmd *req; 2488 struct hns3_cmd_desc desc; 2489 2490 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2491 2492 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2493 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2494 req->min_frm_size = RTE_ETHER_MIN_LEN; 2495 2496 return hns3_cmd_send(hw, &desc, 1); 2497 } 2498 2499 static int 2500 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2501 { 2502 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2503 uint16_t original_mps = hns->pf.mps; 2504 int err; 2505 int ret; 2506 2507 ret = hns3_set_mac_mtu(hw, mps); 2508 if (ret) { 2509 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2510 return ret; 2511 } 2512 2513 hns->pf.mps = mps; 2514 ret = hns3_buffer_alloc(hw); 2515 if (ret) { 2516 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2517 goto rollback; 2518 } 2519 2520 return 0; 2521 2522 rollback: 2523 err = hns3_set_mac_mtu(hw, original_mps); 2524 if (err) { 2525 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2526 return ret; 2527 } 2528 hns->pf.mps = original_mps; 2529 2530 return ret; 2531 } 2532 2533 static int 2534 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2535 { 2536 struct hns3_adapter *hns = dev->data->dev_private; 2537 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2538 struct hns3_hw *hw = &hns->hw; 2539 bool is_jumbo_frame; 2540 int ret; 2541 2542 if (dev->data->dev_started) { 2543 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2544 "before configuration", dev->data->port_id); 2545 return -EBUSY; 2546 } 2547 2548 rte_spinlock_lock(&hw->lock); 2549 is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; 2550 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2551 2552 /* 2553 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2554 * assign to "uint16_t" type variable. 2555 */ 2556 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2557 if (ret) { 2558 rte_spinlock_unlock(&hw->lock); 2559 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2560 dev->data->port_id, mtu, ret); 2561 return ret; 2562 } 2563 2564 if (is_jumbo_frame) 2565 dev->data->dev_conf.rxmode.offloads |= 2566 DEV_RX_OFFLOAD_JUMBO_FRAME; 2567 else 2568 dev->data->dev_conf.rxmode.offloads &= 2569 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2570 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2571 rte_spinlock_unlock(&hw->lock); 2572 2573 return 0; 2574 } 2575 2576 int 2577 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2578 { 2579 struct hns3_adapter *hns = eth_dev->data->dev_private; 2580 struct hns3_hw *hw = &hns->hw; 2581 uint16_t queue_num = hw->tqps_num; 2582 2583 /* 2584 * In interrupt mode, 'max_rx_queues' is set based on the number of 2585 * MSI-X interrupt resources of the hardware. 2586 */ 2587 if (hw->data->dev_conf.intr_conf.rxq == 1) 2588 queue_num = hw->intr_tqps_num; 2589 2590 info->max_rx_queues = queue_num; 2591 info->max_tx_queues = hw->tqps_num; 2592 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2593 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2594 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2595 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2596 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2597 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2598 DEV_RX_OFFLOAD_TCP_CKSUM | 2599 DEV_RX_OFFLOAD_UDP_CKSUM | 2600 DEV_RX_OFFLOAD_SCTP_CKSUM | 2601 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2602 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2603 DEV_RX_OFFLOAD_KEEP_CRC | 2604 DEV_RX_OFFLOAD_SCATTER | 2605 DEV_RX_OFFLOAD_VLAN_STRIP | 2606 DEV_RX_OFFLOAD_VLAN_FILTER | 2607 DEV_RX_OFFLOAD_JUMBO_FRAME | 2608 DEV_RX_OFFLOAD_RSS_HASH | 2609 DEV_RX_OFFLOAD_TCP_LRO); 2610 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2611 DEV_TX_OFFLOAD_IPV4_CKSUM | 2612 DEV_TX_OFFLOAD_TCP_CKSUM | 2613 DEV_TX_OFFLOAD_UDP_CKSUM | 2614 DEV_TX_OFFLOAD_SCTP_CKSUM | 2615 DEV_TX_OFFLOAD_MULTI_SEGS | 2616 DEV_TX_OFFLOAD_TCP_TSO | 2617 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2618 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2619 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2620 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2621 hns3_txvlan_cap_get(hw)); 2622 2623 if (hns3_dev_outer_udp_cksum_supported(hw)) 2624 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 2625 2626 if (hns3_dev_indep_txrx_supported(hw)) 2627 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2628 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2629 2630 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2631 .nb_max = HNS3_MAX_RING_DESC, 2632 .nb_min = HNS3_MIN_RING_DESC, 2633 .nb_align = HNS3_ALIGN_RING_DESC, 2634 }; 2635 2636 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2637 .nb_max = HNS3_MAX_RING_DESC, 2638 .nb_min = HNS3_MIN_RING_DESC, 2639 .nb_align = HNS3_ALIGN_RING_DESC, 2640 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2641 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2642 }; 2643 2644 info->default_rxconf = (struct rte_eth_rxconf) { 2645 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2646 /* 2647 * If there are no available Rx buffer descriptors, incoming 2648 * packets are always dropped by hardware based on hns3 network 2649 * engine. 2650 */ 2651 .rx_drop_en = 1, 2652 .offloads = 0, 2653 }; 2654 info->default_txconf = (struct rte_eth_txconf) { 2655 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2656 .offloads = 0, 2657 }; 2658 2659 info->vmdq_queue_num = 0; 2660 2661 info->reta_size = hw->rss_ind_tbl_size; 2662 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2663 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2664 2665 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2666 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2667 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2668 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2669 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2670 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2671 2672 return 0; 2673 } 2674 2675 static int 2676 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2677 size_t fw_size) 2678 { 2679 struct hns3_adapter *hns = eth_dev->data->dev_private; 2680 struct hns3_hw *hw = &hns->hw; 2681 uint32_t version = hw->fw_version; 2682 int ret; 2683 2684 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2685 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2686 HNS3_FW_VERSION_BYTE3_S), 2687 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2688 HNS3_FW_VERSION_BYTE2_S), 2689 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2690 HNS3_FW_VERSION_BYTE1_S), 2691 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2692 HNS3_FW_VERSION_BYTE0_S)); 2693 ret += 1; /* add the size of '\0' */ 2694 if (fw_size < (uint32_t)ret) 2695 return ret; 2696 else 2697 return 0; 2698 } 2699 2700 static int 2701 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2702 { 2703 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2704 2705 (void)hns3_update_link_status(hw); 2706 2707 return hns3_update_link_info(eth_dev); 2708 } 2709 2710 static void 2711 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2712 struct rte_eth_link *new_link) 2713 { 2714 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2715 struct hns3_mac *mac = &hw->mac; 2716 2717 switch (mac->link_speed) { 2718 case ETH_SPEED_NUM_10M: 2719 case ETH_SPEED_NUM_100M: 2720 case ETH_SPEED_NUM_1G: 2721 case ETH_SPEED_NUM_10G: 2722 case ETH_SPEED_NUM_25G: 2723 case ETH_SPEED_NUM_40G: 2724 case ETH_SPEED_NUM_50G: 2725 case ETH_SPEED_NUM_100G: 2726 case ETH_SPEED_NUM_200G: 2727 new_link->link_speed = mac->link_speed; 2728 break; 2729 default: 2730 if (mac->link_status) 2731 new_link->link_speed = ETH_SPEED_NUM_UNKNOWN; 2732 else 2733 new_link->link_speed = ETH_SPEED_NUM_NONE; 2734 break; 2735 } 2736 2737 new_link->link_duplex = mac->link_duplex; 2738 new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2739 new_link->link_autoneg = 2740 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); 2741 } 2742 2743 static int 2744 hns3_dev_link_update(struct rte_eth_dev *eth_dev, 2745 __rte_unused int wait_to_complete) 2746 { 2747 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2748 struct hns3_mac *mac = &hw->mac; 2749 struct rte_eth_link new_link; 2750 int ret; 2751 2752 ret = hns3_update_port_link_info(eth_dev); 2753 if (ret) { 2754 mac->link_status = ETH_LINK_DOWN; 2755 hns3_err(hw, "failed to get port link info, ret = %d.", ret); 2756 } 2757 2758 memset(&new_link, 0, sizeof(new_link)); 2759 hns3_setup_linkstatus(eth_dev, &new_link); 2760 2761 return rte_eth_linkstatus_set(eth_dev, &new_link); 2762 } 2763 2764 static int 2765 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2766 { 2767 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2768 struct hns3_pf *pf = &hns->pf; 2769 2770 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2771 return -EINVAL; 2772 2773 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2774 2775 return 0; 2776 } 2777 2778 static int 2779 hns3_query_function_status(struct hns3_hw *hw) 2780 { 2781 #define HNS3_QUERY_MAX_CNT 10 2782 #define HNS3_QUERY_SLEEP_MSCOEND 1 2783 struct hns3_func_status_cmd *req; 2784 struct hns3_cmd_desc desc; 2785 int timeout = 0; 2786 int ret; 2787 2788 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2789 req = (struct hns3_func_status_cmd *)desc.data; 2790 2791 do { 2792 ret = hns3_cmd_send(hw, &desc, 1); 2793 if (ret) { 2794 PMD_INIT_LOG(ERR, "query function status failed %d", 2795 ret); 2796 return ret; 2797 } 2798 2799 /* Check pf reset is done */ 2800 if (req->pf_state) 2801 break; 2802 2803 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2804 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2805 2806 return hns3_parse_func_status(hw, req); 2807 } 2808 2809 static int 2810 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2811 { 2812 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2813 struct hns3_pf *pf = &hns->pf; 2814 2815 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2816 /* 2817 * The total_tqps_num obtained from firmware is maximum tqp 2818 * numbers of this port, which should be used for PF and VFs. 2819 * There is no need for pf to have so many tqp numbers in 2820 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2821 * coming from config file, is assigned to maximum queue number 2822 * for the PF of this port by user. So users can modify the 2823 * maximum queue number of PF according to their own application 2824 * scenarios, which is more flexible to use. In addition, many 2825 * memories can be saved due to allocating queue statistics 2826 * room according to the actual number of queues required. The 2827 * maximum queue number of PF for network engine with 2828 * revision_id greater than 0x30 is assigned by config file. 2829 */ 2830 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2831 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2832 "must be greater than 0.", 2833 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2834 return -EINVAL; 2835 } 2836 2837 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2838 hw->total_tqps_num); 2839 } else { 2840 /* 2841 * Due to the limitation on the number of PF interrupts 2842 * available, the maximum queue number assigned to PF on 2843 * the network engine with revision_id 0x21 is 64. 2844 */ 2845 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2846 HNS3_MAX_TQP_NUM_HIP08_PF); 2847 } 2848 2849 return 0; 2850 } 2851 2852 static int 2853 hns3_query_pf_resource(struct hns3_hw *hw) 2854 { 2855 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2856 struct hns3_pf *pf = &hns->pf; 2857 struct hns3_pf_res_cmd *req; 2858 struct hns3_cmd_desc desc; 2859 int ret; 2860 2861 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2862 ret = hns3_cmd_send(hw, &desc, 1); 2863 if (ret) { 2864 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2865 return ret; 2866 } 2867 2868 req = (struct hns3_pf_res_cmd *)desc.data; 2869 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2870 rte_le_to_cpu_16(req->ext_tqp_num); 2871 ret = hns3_get_pf_max_tqp_num(hw); 2872 if (ret) 2873 return ret; 2874 2875 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2876 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2877 2878 if (req->tx_buf_size) 2879 pf->tx_buf_size = 2880 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2881 else 2882 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2883 2884 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2885 2886 if (req->dv_buf_size) 2887 pf->dv_buf_size = 2888 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2889 else 2890 pf->dv_buf_size = HNS3_DEFAULT_DV; 2891 2892 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2893 2894 hw->num_msi = 2895 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2896 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2897 2898 return 0; 2899 } 2900 2901 static void 2902 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2903 { 2904 struct hns3_cfg_param_cmd *req; 2905 uint64_t mac_addr_tmp_high; 2906 uint8_t ext_rss_size_max; 2907 uint64_t mac_addr_tmp; 2908 uint32_t i; 2909 2910 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2911 2912 /* get the configuration */ 2913 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2914 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S); 2915 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2916 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 2917 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2918 HNS3_CFG_TQP_DESC_N_M, 2919 HNS3_CFG_TQP_DESC_N_S); 2920 2921 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2922 HNS3_CFG_PHY_ADDR_M, 2923 HNS3_CFG_PHY_ADDR_S); 2924 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2925 HNS3_CFG_MEDIA_TP_M, 2926 HNS3_CFG_MEDIA_TP_S); 2927 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2928 HNS3_CFG_RX_BUF_LEN_M, 2929 HNS3_CFG_RX_BUF_LEN_S); 2930 /* get mac address */ 2931 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 2932 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2933 HNS3_CFG_MAC_ADDR_H_M, 2934 HNS3_CFG_MAC_ADDR_H_S); 2935 2936 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 2937 2938 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2939 HNS3_CFG_DEFAULT_SPEED_M, 2940 HNS3_CFG_DEFAULT_SPEED_S); 2941 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2942 HNS3_CFG_RSS_SIZE_M, 2943 HNS3_CFG_RSS_SIZE_S); 2944 2945 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2946 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 2947 2948 req = (struct hns3_cfg_param_cmd *)desc[1].data; 2949 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 2950 2951 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2952 HNS3_CFG_SPEED_ABILITY_M, 2953 HNS3_CFG_SPEED_ABILITY_S); 2954 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2955 HNS3_CFG_UMV_TBL_SPACE_M, 2956 HNS3_CFG_UMV_TBL_SPACE_S); 2957 if (!cfg->umv_space) 2958 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 2959 2960 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 2961 HNS3_CFG_EXT_RSS_SIZE_M, 2962 HNS3_CFG_EXT_RSS_SIZE_S); 2963 2964 /* 2965 * Field ext_rss_size_max obtained from firmware will be more flexible 2966 * for future changes and expansions, which is an exponent of 2, instead 2967 * of reading out directly. If this field is not zero, hns3 PF PMD 2968 * driver uses it as rss_size_max under one TC. Device, whose revision 2969 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 2970 * maximum number of queues supported under a TC through this field. 2971 */ 2972 if (ext_rss_size_max) 2973 cfg->rss_size_max = 1U << ext_rss_size_max; 2974 } 2975 2976 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 2977 * @hw: pointer to struct hns3_hw 2978 * @hcfg: the config structure to be getted 2979 */ 2980 static int 2981 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 2982 { 2983 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 2984 struct hns3_cfg_param_cmd *req; 2985 uint32_t offset; 2986 uint32_t i; 2987 int ret; 2988 2989 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 2990 offset = 0; 2991 req = (struct hns3_cfg_param_cmd *)desc[i].data; 2992 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 2993 true); 2994 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 2995 i * HNS3_CFG_RD_LEN_BYTES); 2996 /* Len should be divided by 4 when send to hardware */ 2997 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 2998 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 2999 req->offset = rte_cpu_to_le_32(offset); 3000 } 3001 3002 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3003 if (ret) { 3004 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3005 return ret; 3006 } 3007 3008 hns3_parse_cfg(hcfg, desc); 3009 3010 return 0; 3011 } 3012 3013 static int 3014 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3015 { 3016 switch (speed_cmd) { 3017 case HNS3_CFG_SPEED_10M: 3018 *speed = ETH_SPEED_NUM_10M; 3019 break; 3020 case HNS3_CFG_SPEED_100M: 3021 *speed = ETH_SPEED_NUM_100M; 3022 break; 3023 case HNS3_CFG_SPEED_1G: 3024 *speed = ETH_SPEED_NUM_1G; 3025 break; 3026 case HNS3_CFG_SPEED_10G: 3027 *speed = ETH_SPEED_NUM_10G; 3028 break; 3029 case HNS3_CFG_SPEED_25G: 3030 *speed = ETH_SPEED_NUM_25G; 3031 break; 3032 case HNS3_CFG_SPEED_40G: 3033 *speed = ETH_SPEED_NUM_40G; 3034 break; 3035 case HNS3_CFG_SPEED_50G: 3036 *speed = ETH_SPEED_NUM_50G; 3037 break; 3038 case HNS3_CFG_SPEED_100G: 3039 *speed = ETH_SPEED_NUM_100G; 3040 break; 3041 case HNS3_CFG_SPEED_200G: 3042 *speed = ETH_SPEED_NUM_200G; 3043 break; 3044 default: 3045 return -EINVAL; 3046 } 3047 3048 return 0; 3049 } 3050 3051 static void 3052 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3053 { 3054 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3055 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3056 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3057 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3058 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3059 } 3060 3061 static void 3062 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3063 { 3064 struct hns3_dev_specs_0_cmd *req0; 3065 3066 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3067 3068 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3069 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3070 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3071 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3072 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3073 } 3074 3075 static int 3076 hns3_check_dev_specifications(struct hns3_hw *hw) 3077 { 3078 if (hw->rss_ind_tbl_size == 0 || 3079 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3080 hns3_err(hw, "the size of hash lookup table configured (%u)" 3081 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3082 HNS3_RSS_IND_TBL_SIZE_MAX); 3083 return -EINVAL; 3084 } 3085 3086 return 0; 3087 } 3088 3089 static int 3090 hns3_query_dev_specifications(struct hns3_hw *hw) 3091 { 3092 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3093 int ret; 3094 int i; 3095 3096 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3097 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3098 true); 3099 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3100 } 3101 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3102 3103 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3104 if (ret) 3105 return ret; 3106 3107 hns3_parse_dev_specifications(hw, desc); 3108 3109 return hns3_check_dev_specifications(hw); 3110 } 3111 3112 static int 3113 hns3_get_capability(struct hns3_hw *hw) 3114 { 3115 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3116 struct rte_pci_device *pci_dev; 3117 struct hns3_pf *pf = &hns->pf; 3118 struct rte_eth_dev *eth_dev; 3119 uint16_t device_id; 3120 uint8_t revision; 3121 int ret; 3122 3123 eth_dev = &rte_eth_devices[hw->data->port_id]; 3124 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3125 device_id = pci_dev->id.device_id; 3126 3127 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3128 device_id == HNS3_DEV_ID_50GE_RDMA || 3129 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3130 device_id == HNS3_DEV_ID_200G_RDMA) 3131 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3132 3133 /* Get PCI revision id */ 3134 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3135 HNS3_PCI_REVISION_ID); 3136 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3137 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3138 ret); 3139 return -EIO; 3140 } 3141 hw->revision = revision; 3142 3143 if (revision < PCI_REVISION_ID_HIP09_A) { 3144 hns3_set_default_dev_specifications(hw); 3145 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3146 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3147 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3148 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3149 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3150 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3151 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3152 hw->rss_info.ipv6_sctp_offload_supported = false; 3153 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3154 return 0; 3155 } 3156 3157 ret = hns3_query_dev_specifications(hw); 3158 if (ret) { 3159 PMD_INIT_LOG(ERR, 3160 "failed to query dev specifications, ret = %d", 3161 ret); 3162 return ret; 3163 } 3164 3165 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3166 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3167 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3168 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3169 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3170 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3171 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3172 hw->rss_info.ipv6_sctp_offload_supported = true; 3173 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3174 3175 return 0; 3176 } 3177 3178 static int 3179 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3180 { 3181 int ret; 3182 3183 switch (media_type) { 3184 case HNS3_MEDIA_TYPE_COPPER: 3185 if (!hns3_dev_copper_supported(hw)) { 3186 PMD_INIT_LOG(ERR, 3187 "Media type is copper, not supported."); 3188 ret = -EOPNOTSUPP; 3189 } else { 3190 ret = 0; 3191 } 3192 break; 3193 case HNS3_MEDIA_TYPE_FIBER: 3194 ret = 0; 3195 break; 3196 case HNS3_MEDIA_TYPE_BACKPLANE: 3197 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3198 ret = -EOPNOTSUPP; 3199 break; 3200 default: 3201 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3202 ret = -EINVAL; 3203 break; 3204 } 3205 3206 return ret; 3207 } 3208 3209 static int 3210 hns3_get_board_configuration(struct hns3_hw *hw) 3211 { 3212 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3213 struct hns3_pf *pf = &hns->pf; 3214 struct hns3_cfg cfg; 3215 int ret; 3216 3217 ret = hns3_get_board_cfg(hw, &cfg); 3218 if (ret) { 3219 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3220 return ret; 3221 } 3222 3223 ret = hns3_check_media_type(hw, cfg.media_type); 3224 if (ret) 3225 return ret; 3226 3227 hw->mac.media_type = cfg.media_type; 3228 hw->rss_size_max = cfg.rss_size_max; 3229 hw->rss_dis_flag = false; 3230 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3231 hw->mac.phy_addr = cfg.phy_addr; 3232 hw->mac.default_addr_setted = false; 3233 hw->num_tx_desc = cfg.tqp_desc_num; 3234 hw->num_rx_desc = cfg.tqp_desc_num; 3235 hw->dcb_info.num_pg = 1; 3236 hw->dcb_info.hw_pfc_map = 0; 3237 3238 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3239 if (ret) { 3240 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3241 cfg.default_speed, ret); 3242 return ret; 3243 } 3244 3245 pf->tc_max = cfg.tc_num; 3246 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3247 PMD_INIT_LOG(WARNING, 3248 "Get TC num(%u) from flash, set TC num to 1", 3249 pf->tc_max); 3250 pf->tc_max = 1; 3251 } 3252 3253 /* Dev does not support DCB */ 3254 if (!hns3_dev_dcb_supported(hw)) { 3255 pf->tc_max = 1; 3256 pf->pfc_max = 0; 3257 } else 3258 pf->pfc_max = pf->tc_max; 3259 3260 hw->dcb_info.num_tc = 1; 3261 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3262 hw->tqps_num / hw->dcb_info.num_tc); 3263 hns3_set_bit(hw->hw_tc_map, 0, 1); 3264 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3265 3266 pf->wanted_umv_size = cfg.umv_space; 3267 3268 return ret; 3269 } 3270 3271 static int 3272 hns3_get_configuration(struct hns3_hw *hw) 3273 { 3274 int ret; 3275 3276 ret = hns3_query_function_status(hw); 3277 if (ret) { 3278 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3279 return ret; 3280 } 3281 3282 /* Get device capability */ 3283 ret = hns3_get_capability(hw); 3284 if (ret) { 3285 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3286 return ret; 3287 } 3288 3289 /* Get pf resource */ 3290 ret = hns3_query_pf_resource(hw); 3291 if (ret) { 3292 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3293 return ret; 3294 } 3295 3296 ret = hns3_get_board_configuration(hw); 3297 if (ret) { 3298 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3299 return ret; 3300 } 3301 3302 ret = hns3_query_dev_fec_info(hw); 3303 if (ret) 3304 PMD_INIT_LOG(ERR, 3305 "failed to query FEC information, ret = %d", ret); 3306 3307 return ret; 3308 } 3309 3310 static int 3311 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3312 uint16_t tqp_vid, bool is_pf) 3313 { 3314 struct hns3_tqp_map_cmd *req; 3315 struct hns3_cmd_desc desc; 3316 int ret; 3317 3318 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3319 3320 req = (struct hns3_tqp_map_cmd *)desc.data; 3321 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3322 req->tqp_vf = func_id; 3323 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3324 if (!is_pf) 3325 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3326 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3327 3328 ret = hns3_cmd_send(hw, &desc, 1); 3329 if (ret) 3330 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3331 3332 return ret; 3333 } 3334 3335 static int 3336 hns3_map_tqp(struct hns3_hw *hw) 3337 { 3338 int ret; 3339 int i; 3340 3341 /* 3342 * In current version, VF is not supported when PF is driven by DPDK 3343 * driver, so we assign total tqps_num tqps allocated to this port 3344 * to PF. 3345 */ 3346 for (i = 0; i < hw->total_tqps_num; i++) { 3347 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3348 if (ret) 3349 return ret; 3350 } 3351 3352 return 0; 3353 } 3354 3355 static int 3356 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3357 { 3358 struct hns3_config_mac_speed_dup_cmd *req; 3359 struct hns3_cmd_desc desc; 3360 int ret; 3361 3362 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3363 3364 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3365 3366 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3367 3368 switch (speed) { 3369 case ETH_SPEED_NUM_10M: 3370 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3371 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3372 break; 3373 case ETH_SPEED_NUM_100M: 3374 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3375 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3376 break; 3377 case ETH_SPEED_NUM_1G: 3378 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3379 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3380 break; 3381 case ETH_SPEED_NUM_10G: 3382 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3383 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3384 break; 3385 case ETH_SPEED_NUM_25G: 3386 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3387 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3388 break; 3389 case ETH_SPEED_NUM_40G: 3390 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3391 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3392 break; 3393 case ETH_SPEED_NUM_50G: 3394 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3395 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3396 break; 3397 case ETH_SPEED_NUM_100G: 3398 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3399 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3400 break; 3401 case ETH_SPEED_NUM_200G: 3402 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3403 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3404 break; 3405 default: 3406 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3407 return -EINVAL; 3408 } 3409 3410 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3411 3412 ret = hns3_cmd_send(hw, &desc, 1); 3413 if (ret) 3414 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3415 3416 return ret; 3417 } 3418 3419 static int 3420 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3421 { 3422 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3423 struct hns3_pf *pf = &hns->pf; 3424 struct hns3_priv_buf *priv; 3425 uint32_t i, total_size; 3426 3427 total_size = pf->pkt_buf_size; 3428 3429 /* alloc tx buffer for all enabled tc */ 3430 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3431 priv = &buf_alloc->priv_buf[i]; 3432 3433 if (hw->hw_tc_map & BIT(i)) { 3434 if (total_size < pf->tx_buf_size) 3435 return -ENOMEM; 3436 3437 priv->tx_buf_size = pf->tx_buf_size; 3438 } else 3439 priv->tx_buf_size = 0; 3440 3441 total_size -= priv->tx_buf_size; 3442 } 3443 3444 return 0; 3445 } 3446 3447 static int 3448 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3449 { 3450 /* TX buffer size is unit by 128 byte */ 3451 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3452 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3453 struct hns3_tx_buff_alloc_cmd *req; 3454 struct hns3_cmd_desc desc; 3455 uint32_t buf_size; 3456 uint32_t i; 3457 int ret; 3458 3459 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3460 3461 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3462 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3463 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3464 3465 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3466 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3467 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3468 } 3469 3470 ret = hns3_cmd_send(hw, &desc, 1); 3471 if (ret) 3472 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3473 3474 return ret; 3475 } 3476 3477 static int 3478 hns3_get_tc_num(struct hns3_hw *hw) 3479 { 3480 int cnt = 0; 3481 uint8_t i; 3482 3483 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3484 if (hw->hw_tc_map & BIT(i)) 3485 cnt++; 3486 return cnt; 3487 } 3488 3489 static uint32_t 3490 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3491 { 3492 struct hns3_priv_buf *priv; 3493 uint32_t rx_priv = 0; 3494 int i; 3495 3496 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3497 priv = &buf_alloc->priv_buf[i]; 3498 if (priv->enable) 3499 rx_priv += priv->buf_size; 3500 } 3501 return rx_priv; 3502 } 3503 3504 static uint32_t 3505 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3506 { 3507 uint32_t total_tx_size = 0; 3508 uint32_t i; 3509 3510 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3511 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3512 3513 return total_tx_size; 3514 } 3515 3516 /* Get the number of pfc enabled TCs, which have private buffer */ 3517 static int 3518 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3519 { 3520 struct hns3_priv_buf *priv; 3521 int cnt = 0; 3522 uint8_t i; 3523 3524 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3525 priv = &buf_alloc->priv_buf[i]; 3526 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3527 cnt++; 3528 } 3529 3530 return cnt; 3531 } 3532 3533 /* Get the number of pfc disabled TCs, which have private buffer */ 3534 static int 3535 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3536 struct hns3_pkt_buf_alloc *buf_alloc) 3537 { 3538 struct hns3_priv_buf *priv; 3539 int cnt = 0; 3540 uint8_t i; 3541 3542 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3543 priv = &buf_alloc->priv_buf[i]; 3544 if (hw->hw_tc_map & BIT(i) && 3545 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3546 cnt++; 3547 } 3548 3549 return cnt; 3550 } 3551 3552 static bool 3553 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3554 uint32_t rx_all) 3555 { 3556 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3557 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3558 struct hns3_pf *pf = &hns->pf; 3559 uint32_t shared_buf, aligned_mps; 3560 uint32_t rx_priv; 3561 uint8_t tc_num; 3562 uint8_t i; 3563 3564 tc_num = hns3_get_tc_num(hw); 3565 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3566 3567 if (hns3_dev_dcb_supported(hw)) 3568 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3569 pf->dv_buf_size; 3570 else 3571 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3572 + pf->dv_buf_size; 3573 3574 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3575 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3576 HNS3_BUF_SIZE_UNIT); 3577 3578 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3579 if (rx_all < rx_priv + shared_std) 3580 return false; 3581 3582 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3583 buf_alloc->s_buf.buf_size = shared_buf; 3584 if (hns3_dev_dcb_supported(hw)) { 3585 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3586 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3587 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3588 HNS3_BUF_SIZE_UNIT); 3589 } else { 3590 buf_alloc->s_buf.self.high = 3591 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3592 buf_alloc->s_buf.self.low = aligned_mps; 3593 } 3594 3595 if (hns3_dev_dcb_supported(hw)) { 3596 hi_thrd = shared_buf - pf->dv_buf_size; 3597 3598 if (tc_num <= NEED_RESERVE_TC_NUM) 3599 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3600 BUF_MAX_PERCENT; 3601 3602 if (tc_num) 3603 hi_thrd = hi_thrd / tc_num; 3604 3605 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3606 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3607 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3608 } else { 3609 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3610 lo_thrd = aligned_mps; 3611 } 3612 3613 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3614 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3615 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3616 } 3617 3618 return true; 3619 } 3620 3621 static bool 3622 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3623 struct hns3_pkt_buf_alloc *buf_alloc) 3624 { 3625 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3626 struct hns3_pf *pf = &hns->pf; 3627 struct hns3_priv_buf *priv; 3628 uint32_t aligned_mps; 3629 uint32_t rx_all; 3630 uint8_t i; 3631 3632 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3633 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3634 3635 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3636 priv = &buf_alloc->priv_buf[i]; 3637 3638 priv->enable = 0; 3639 priv->wl.low = 0; 3640 priv->wl.high = 0; 3641 priv->buf_size = 0; 3642 3643 if (!(hw->hw_tc_map & BIT(i))) 3644 continue; 3645 3646 priv->enable = 1; 3647 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3648 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3649 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3650 HNS3_BUF_SIZE_UNIT); 3651 } else { 3652 priv->wl.low = 0; 3653 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3654 aligned_mps; 3655 } 3656 3657 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3658 } 3659 3660 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3661 } 3662 3663 static bool 3664 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3665 struct hns3_pkt_buf_alloc *buf_alloc) 3666 { 3667 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3668 struct hns3_pf *pf = &hns->pf; 3669 struct hns3_priv_buf *priv; 3670 int no_pfc_priv_num; 3671 uint32_t rx_all; 3672 uint8_t mask; 3673 int i; 3674 3675 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3676 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3677 3678 /* let the last to be cleared first */ 3679 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3680 priv = &buf_alloc->priv_buf[i]; 3681 mask = BIT((uint8_t)i); 3682 3683 if (hw->hw_tc_map & mask && 3684 !(hw->dcb_info.hw_pfc_map & mask)) { 3685 /* Clear the no pfc TC private buffer */ 3686 priv->wl.low = 0; 3687 priv->wl.high = 0; 3688 priv->buf_size = 0; 3689 priv->enable = 0; 3690 no_pfc_priv_num--; 3691 } 3692 3693 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3694 no_pfc_priv_num == 0) 3695 break; 3696 } 3697 3698 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3699 } 3700 3701 static bool 3702 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3703 struct hns3_pkt_buf_alloc *buf_alloc) 3704 { 3705 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3706 struct hns3_pf *pf = &hns->pf; 3707 struct hns3_priv_buf *priv; 3708 uint32_t rx_all; 3709 int pfc_priv_num; 3710 uint8_t mask; 3711 int i; 3712 3713 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3714 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3715 3716 /* let the last to be cleared first */ 3717 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3718 priv = &buf_alloc->priv_buf[i]; 3719 mask = BIT((uint8_t)i); 3720 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3721 /* Reduce the number of pfc TC with private buffer */ 3722 priv->wl.low = 0; 3723 priv->enable = 0; 3724 priv->wl.high = 0; 3725 priv->buf_size = 0; 3726 pfc_priv_num--; 3727 } 3728 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3729 pfc_priv_num == 0) 3730 break; 3731 } 3732 3733 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3734 } 3735 3736 static bool 3737 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3738 struct hns3_pkt_buf_alloc *buf_alloc) 3739 { 3740 #define COMPENSATE_BUFFER 0x3C00 3741 #define COMPENSATE_HALF_MPS_NUM 5 3742 #define PRIV_WL_GAP 0x1800 3743 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3744 struct hns3_pf *pf = &hns->pf; 3745 uint32_t tc_num = hns3_get_tc_num(hw); 3746 uint32_t half_mps = pf->mps >> 1; 3747 struct hns3_priv_buf *priv; 3748 uint32_t min_rx_priv; 3749 uint32_t rx_priv; 3750 uint8_t i; 3751 3752 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3753 if (tc_num) 3754 rx_priv = rx_priv / tc_num; 3755 3756 if (tc_num <= NEED_RESERVE_TC_NUM) 3757 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3758 3759 /* 3760 * Minimum value of private buffer in rx direction (min_rx_priv) is 3761 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3762 * buffer if rx_priv is greater than min_rx_priv. 3763 */ 3764 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3765 COMPENSATE_HALF_MPS_NUM * half_mps; 3766 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3767 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3768 3769 if (rx_priv < min_rx_priv) 3770 return false; 3771 3772 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3773 priv = &buf_alloc->priv_buf[i]; 3774 priv->enable = 0; 3775 priv->wl.low = 0; 3776 priv->wl.high = 0; 3777 priv->buf_size = 0; 3778 3779 if (!(hw->hw_tc_map & BIT(i))) 3780 continue; 3781 3782 priv->enable = 1; 3783 priv->buf_size = rx_priv; 3784 priv->wl.high = rx_priv - pf->dv_buf_size; 3785 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3786 } 3787 3788 buf_alloc->s_buf.buf_size = 0; 3789 3790 return true; 3791 } 3792 3793 /* 3794 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3795 * @hw: pointer to struct hns3_hw 3796 * @buf_alloc: pointer to buffer calculation data 3797 * @return: 0: calculate sucessful, negative: fail 3798 */ 3799 static int 3800 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3801 { 3802 /* When DCB is not supported, rx private buffer is not allocated. */ 3803 if (!hns3_dev_dcb_supported(hw)) { 3804 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3805 struct hns3_pf *pf = &hns->pf; 3806 uint32_t rx_all = pf->pkt_buf_size; 3807 3808 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3809 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3810 return -ENOMEM; 3811 3812 return 0; 3813 } 3814 3815 /* 3816 * Try to allocate privated packet buffer for all TCs without share 3817 * buffer. 3818 */ 3819 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3820 return 0; 3821 3822 /* 3823 * Try to allocate privated packet buffer for all TCs with share 3824 * buffer. 3825 */ 3826 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3827 return 0; 3828 3829 /* 3830 * For different application scenes, the enabled port number, TC number 3831 * and no_drop TC number are different. In order to obtain the better 3832 * performance, software could allocate the buffer size and configure 3833 * the waterline by tring to decrease the private buffer size according 3834 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc 3835 * enabled tc. 3836 */ 3837 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3838 return 0; 3839 3840 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3841 return 0; 3842 3843 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3844 return 0; 3845 3846 return -ENOMEM; 3847 } 3848 3849 static int 3850 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3851 { 3852 struct hns3_rx_priv_buff_cmd *req; 3853 struct hns3_cmd_desc desc; 3854 uint32_t buf_size; 3855 int ret; 3856 int i; 3857 3858 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3859 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3860 3861 /* Alloc private buffer TCs */ 3862 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3863 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3864 3865 req->buf_num[i] = 3866 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3867 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3868 } 3869 3870 buf_size = buf_alloc->s_buf.buf_size; 3871 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3872 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3873 3874 ret = hns3_cmd_send(hw, &desc, 1); 3875 if (ret) 3876 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3877 3878 return ret; 3879 } 3880 3881 static int 3882 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3883 { 3884 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3885 struct hns3_rx_priv_wl_buf *req; 3886 struct hns3_priv_buf *priv; 3887 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3888 int i, j; 3889 int ret; 3890 3891 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3892 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3893 false); 3894 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3895 3896 /* The first descriptor set the NEXT bit to 1 */ 3897 if (i == 0) 3898 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3899 else 3900 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3901 3902 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3903 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3904 3905 priv = &buf_alloc->priv_buf[idx]; 3906 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3907 HNS3_BUF_UNIT_S); 3908 req->tc_wl[j].high |= 3909 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3910 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3911 HNS3_BUF_UNIT_S); 3912 req->tc_wl[j].low |= 3913 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3914 } 3915 } 3916 3917 /* Send 2 descriptor at one time */ 3918 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 3919 if (ret) 3920 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 3921 ret); 3922 return ret; 3923 } 3924 3925 static int 3926 hns3_common_thrd_config(struct hns3_hw *hw, 3927 struct hns3_pkt_buf_alloc *buf_alloc) 3928 { 3929 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 3930 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 3931 struct hns3_rx_com_thrd *req; 3932 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 3933 struct hns3_tc_thrd *tc; 3934 int tc_idx; 3935 int i, j; 3936 int ret; 3937 3938 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 3939 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 3940 false); 3941 req = (struct hns3_rx_com_thrd *)&desc[i].data; 3942 3943 /* The first descriptor set the NEXT bit to 1 */ 3944 if (i == 0) 3945 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3946 else 3947 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3948 3949 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3950 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 3951 tc = &s_buf->tc_thrd[tc_idx]; 3952 3953 req->com_thrd[j].high = 3954 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 3955 req->com_thrd[j].high |= 3956 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3957 req->com_thrd[j].low = 3958 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 3959 req->com_thrd[j].low |= 3960 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3961 } 3962 } 3963 3964 /* Send 2 descriptors at one time */ 3965 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 3966 if (ret) 3967 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 3968 3969 return ret; 3970 } 3971 3972 static int 3973 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3974 { 3975 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 3976 struct hns3_rx_com_wl *req; 3977 struct hns3_cmd_desc desc; 3978 int ret; 3979 3980 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 3981 3982 req = (struct hns3_rx_com_wl *)desc.data; 3983 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 3984 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3985 3986 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 3987 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3988 3989 ret = hns3_cmd_send(hw, &desc, 1); 3990 if (ret) 3991 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 3992 3993 return ret; 3994 } 3995 3996 int 3997 hns3_buffer_alloc(struct hns3_hw *hw) 3998 { 3999 struct hns3_pkt_buf_alloc pkt_buf; 4000 int ret; 4001 4002 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4003 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4004 if (ret) { 4005 PMD_INIT_LOG(ERR, 4006 "could not calc tx buffer size for all TCs %d", 4007 ret); 4008 return ret; 4009 } 4010 4011 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4012 if (ret) { 4013 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4014 return ret; 4015 } 4016 4017 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4018 if (ret) { 4019 PMD_INIT_LOG(ERR, 4020 "could not calc rx priv buffer size for all TCs %d", 4021 ret); 4022 return ret; 4023 } 4024 4025 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4026 if (ret) { 4027 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4028 return ret; 4029 } 4030 4031 if (hns3_dev_dcb_supported(hw)) { 4032 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4033 if (ret) { 4034 PMD_INIT_LOG(ERR, 4035 "could not configure rx private waterline %d", 4036 ret); 4037 return ret; 4038 } 4039 4040 ret = hns3_common_thrd_config(hw, &pkt_buf); 4041 if (ret) { 4042 PMD_INIT_LOG(ERR, 4043 "could not configure common threshold %d", 4044 ret); 4045 return ret; 4046 } 4047 } 4048 4049 ret = hns3_common_wl_config(hw, &pkt_buf); 4050 if (ret) 4051 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4052 ret); 4053 4054 return ret; 4055 } 4056 4057 static int 4058 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) 4059 { 4060 struct hns3_firmware_compat_cmd *req; 4061 struct hns3_cmd_desc desc; 4062 uint32_t compat = 0; 4063 4064 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); 4065 req = (struct hns3_firmware_compat_cmd *)desc.data; 4066 4067 if (is_init) { 4068 hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); 4069 hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); 4070 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4071 hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); 4072 } 4073 4074 req->compat = rte_cpu_to_le_32(compat); 4075 4076 return hns3_cmd_send(hw, &desc, 1); 4077 } 4078 4079 static int 4080 hns3_mac_init(struct hns3_hw *hw) 4081 { 4082 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4083 struct hns3_mac *mac = &hw->mac; 4084 struct hns3_pf *pf = &hns->pf; 4085 int ret; 4086 4087 pf->support_sfp_query = true; 4088 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 4089 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4090 if (ret) { 4091 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4092 return ret; 4093 } 4094 4095 mac->link_status = ETH_LINK_DOWN; 4096 4097 return hns3_config_mtu(hw, pf->mps); 4098 } 4099 4100 static int 4101 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4102 { 4103 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4104 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4105 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4106 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4107 int return_status; 4108 4109 if (cmdq_resp) { 4110 PMD_INIT_LOG(ERR, 4111 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4112 cmdq_resp); 4113 return -EIO; 4114 } 4115 4116 switch (resp_code) { 4117 case HNS3_ETHERTYPE_SUCCESS_ADD: 4118 case HNS3_ETHERTYPE_ALREADY_ADD: 4119 return_status = 0; 4120 break; 4121 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4122 PMD_INIT_LOG(ERR, 4123 "add mac ethertype failed for manager table overflow."); 4124 return_status = -EIO; 4125 break; 4126 case HNS3_ETHERTYPE_KEY_CONFLICT: 4127 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4128 return_status = -EIO; 4129 break; 4130 default: 4131 PMD_INIT_LOG(ERR, 4132 "add mac ethertype failed for undefined, code=%u.", 4133 resp_code); 4134 return_status = -EIO; 4135 break; 4136 } 4137 4138 return return_status; 4139 } 4140 4141 static int 4142 hns3_add_mgr_tbl(struct hns3_hw *hw, 4143 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4144 { 4145 struct hns3_cmd_desc desc; 4146 uint8_t resp_code; 4147 uint16_t retval; 4148 int ret; 4149 4150 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4151 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4152 4153 ret = hns3_cmd_send(hw, &desc, 1); 4154 if (ret) { 4155 PMD_INIT_LOG(ERR, 4156 "add mac ethertype failed for cmd_send, ret =%d.", 4157 ret); 4158 return ret; 4159 } 4160 4161 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4162 retval = rte_le_to_cpu_16(desc.retval); 4163 4164 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4165 } 4166 4167 static void 4168 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4169 int *table_item_num) 4170 { 4171 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4172 4173 /* 4174 * In current version, we add one item in management table as below: 4175 * 0x0180C200000E -- LLDP MC address 4176 */ 4177 tbl = mgr_table; 4178 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4179 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4180 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4181 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4182 tbl->i_port_bitmap = 0x1; 4183 *table_item_num = 1; 4184 } 4185 4186 static int 4187 hns3_init_mgr_tbl(struct hns3_hw *hw) 4188 { 4189 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4190 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4191 int table_item_num; 4192 int ret; 4193 int i; 4194 4195 memset(mgr_table, 0, sizeof(mgr_table)); 4196 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4197 for (i = 0; i < table_item_num; i++) { 4198 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4199 if (ret) { 4200 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4201 ret); 4202 return ret; 4203 } 4204 } 4205 4206 return 0; 4207 } 4208 4209 static void 4210 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4211 bool en_mc, bool en_bc, int vport_id) 4212 { 4213 if (!param) 4214 return; 4215 4216 memset(param, 0, sizeof(struct hns3_promisc_param)); 4217 if (en_uc) 4218 param->enable = HNS3_PROMISC_EN_UC; 4219 if (en_mc) 4220 param->enable |= HNS3_PROMISC_EN_MC; 4221 if (en_bc) 4222 param->enable |= HNS3_PROMISC_EN_BC; 4223 param->vf_id = vport_id; 4224 } 4225 4226 static int 4227 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4228 { 4229 struct hns3_promisc_cfg_cmd *req; 4230 struct hns3_cmd_desc desc; 4231 int ret; 4232 4233 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4234 4235 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4236 req->vf_id = param->vf_id; 4237 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4238 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4239 4240 ret = hns3_cmd_send(hw, &desc, 1); 4241 if (ret) 4242 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4243 4244 return ret; 4245 } 4246 4247 static int 4248 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4249 { 4250 struct hns3_promisc_param param; 4251 bool en_bc_pmc = true; 4252 uint8_t vf_id; 4253 4254 /* 4255 * In current version VF is not supported when PF is driven by DPDK 4256 * driver, just need to configure parameters for PF vport. 4257 */ 4258 vf_id = HNS3_PF_FUNC_ID; 4259 4260 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4261 return hns3_cmd_set_promisc_mode(hw, ¶m); 4262 } 4263 4264 static int 4265 hns3_promisc_init(struct hns3_hw *hw) 4266 { 4267 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4268 struct hns3_pf *pf = &hns->pf; 4269 struct hns3_promisc_param param; 4270 uint16_t func_id; 4271 int ret; 4272 4273 ret = hns3_set_promisc_mode(hw, false, false); 4274 if (ret) { 4275 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4276 return ret; 4277 } 4278 4279 /* 4280 * In current version VFs are not supported when PF is driven by DPDK 4281 * driver. After PF has been taken over by DPDK, the original VF will 4282 * be invalid. So, there is a possibility of entry residues. It should 4283 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4284 * during init. 4285 */ 4286 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4287 hns3_promisc_param_init(¶m, false, false, false, func_id); 4288 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4289 if (ret) { 4290 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4291 " ret = %d", func_id, ret); 4292 return ret; 4293 } 4294 } 4295 4296 return 0; 4297 } 4298 4299 static void 4300 hns3_promisc_uninit(struct hns3_hw *hw) 4301 { 4302 struct hns3_promisc_param param; 4303 uint16_t func_id; 4304 int ret; 4305 4306 func_id = HNS3_PF_FUNC_ID; 4307 4308 /* 4309 * In current version VFs are not supported when PF is driven by 4310 * DPDK driver, and VFs' promisc mode status has been cleared during 4311 * init and their status will not change. So just clear PF's promisc 4312 * mode status during uninit. 4313 */ 4314 hns3_promisc_param_init(¶m, false, false, false, func_id); 4315 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4316 if (ret) 4317 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4318 " uninit, ret = %d", ret); 4319 } 4320 4321 static int 4322 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4323 { 4324 bool allmulti = dev->data->all_multicast ? true : false; 4325 struct hns3_adapter *hns = dev->data->dev_private; 4326 struct hns3_hw *hw = &hns->hw; 4327 uint64_t offloads; 4328 int err; 4329 int ret; 4330 4331 rte_spinlock_lock(&hw->lock); 4332 ret = hns3_set_promisc_mode(hw, true, true); 4333 if (ret) { 4334 rte_spinlock_unlock(&hw->lock); 4335 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4336 ret); 4337 return ret; 4338 } 4339 4340 /* 4341 * When promiscuous mode was enabled, disable the vlan filter to let 4342 * all packets coming in in the receiving direction. 4343 */ 4344 offloads = dev->data->dev_conf.rxmode.offloads; 4345 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4346 ret = hns3_enable_vlan_filter(hns, false); 4347 if (ret) { 4348 hns3_err(hw, "failed to enable promiscuous mode due to " 4349 "failure to disable vlan filter, ret = %d", 4350 ret); 4351 err = hns3_set_promisc_mode(hw, false, allmulti); 4352 if (err) 4353 hns3_err(hw, "failed to restore promiscuous " 4354 "status after disable vlan filter " 4355 "failed during enabling promiscuous " 4356 "mode, ret = %d", ret); 4357 } 4358 } 4359 4360 rte_spinlock_unlock(&hw->lock); 4361 4362 return ret; 4363 } 4364 4365 static int 4366 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4367 { 4368 bool allmulti = dev->data->all_multicast ? true : false; 4369 struct hns3_adapter *hns = dev->data->dev_private; 4370 struct hns3_hw *hw = &hns->hw; 4371 uint64_t offloads; 4372 int err; 4373 int ret; 4374 4375 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4376 rte_spinlock_lock(&hw->lock); 4377 ret = hns3_set_promisc_mode(hw, false, allmulti); 4378 if (ret) { 4379 rte_spinlock_unlock(&hw->lock); 4380 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4381 ret); 4382 return ret; 4383 } 4384 /* when promiscuous mode was disabled, restore the vlan filter status */ 4385 offloads = dev->data->dev_conf.rxmode.offloads; 4386 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4387 ret = hns3_enable_vlan_filter(hns, true); 4388 if (ret) { 4389 hns3_err(hw, "failed to disable promiscuous mode due to" 4390 " failure to restore vlan filter, ret = %d", 4391 ret); 4392 err = hns3_set_promisc_mode(hw, true, true); 4393 if (err) 4394 hns3_err(hw, "failed to restore promiscuous " 4395 "status after enabling vlan filter " 4396 "failed during disabling promiscuous " 4397 "mode, ret = %d", ret); 4398 } 4399 } 4400 rte_spinlock_unlock(&hw->lock); 4401 4402 return ret; 4403 } 4404 4405 static int 4406 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4407 { 4408 struct hns3_adapter *hns = dev->data->dev_private; 4409 struct hns3_hw *hw = &hns->hw; 4410 int ret; 4411 4412 if (dev->data->promiscuous) 4413 return 0; 4414 4415 rte_spinlock_lock(&hw->lock); 4416 ret = hns3_set_promisc_mode(hw, false, true); 4417 rte_spinlock_unlock(&hw->lock); 4418 if (ret) 4419 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4420 ret); 4421 4422 return ret; 4423 } 4424 4425 static int 4426 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4427 { 4428 struct hns3_adapter *hns = dev->data->dev_private; 4429 struct hns3_hw *hw = &hns->hw; 4430 int ret; 4431 4432 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4433 if (dev->data->promiscuous) 4434 return 0; 4435 4436 rte_spinlock_lock(&hw->lock); 4437 ret = hns3_set_promisc_mode(hw, false, false); 4438 rte_spinlock_unlock(&hw->lock); 4439 if (ret) 4440 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4441 ret); 4442 4443 return ret; 4444 } 4445 4446 static int 4447 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4448 { 4449 struct hns3_hw *hw = &hns->hw; 4450 bool allmulti = hw->data->all_multicast ? true : false; 4451 int ret; 4452 4453 if (hw->data->promiscuous) { 4454 ret = hns3_set_promisc_mode(hw, true, true); 4455 if (ret) 4456 hns3_err(hw, "failed to restore promiscuous mode, " 4457 "ret = %d", ret); 4458 return ret; 4459 } 4460 4461 ret = hns3_set_promisc_mode(hw, false, allmulti); 4462 if (ret) 4463 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4464 ret); 4465 return ret; 4466 } 4467 4468 static int 4469 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed) 4470 { 4471 struct hns3_sfp_speed_cmd *resp; 4472 struct hns3_cmd_desc desc; 4473 int ret; 4474 4475 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); 4476 resp = (struct hns3_sfp_speed_cmd *)desc.data; 4477 ret = hns3_cmd_send(hw, &desc, 1); 4478 if (ret == -EOPNOTSUPP) { 4479 hns3_err(hw, "IMP do not support get SFP speed %d", ret); 4480 return ret; 4481 } else if (ret) { 4482 hns3_err(hw, "get sfp speed failed %d", ret); 4483 return ret; 4484 } 4485 4486 *speed = resp->sfp_speed; 4487 4488 return 0; 4489 } 4490 4491 static uint8_t 4492 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4493 { 4494 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4495 duplex = ETH_LINK_FULL_DUPLEX; 4496 4497 return duplex; 4498 } 4499 4500 static int 4501 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4502 { 4503 struct hns3_mac *mac = &hw->mac; 4504 int ret; 4505 4506 duplex = hns3_check_speed_dup(duplex, speed); 4507 if (mac->link_speed == speed && mac->link_duplex == duplex) 4508 return 0; 4509 4510 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4511 if (ret) 4512 return ret; 4513 4514 ret = hns3_port_shaper_update(hw, speed); 4515 if (ret) 4516 return ret; 4517 4518 mac->link_speed = speed; 4519 mac->link_duplex = duplex; 4520 4521 return 0; 4522 } 4523 4524 static int 4525 hns3_update_fiber_link_info(struct hns3_hw *hw) 4526 { 4527 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4528 uint32_t speed; 4529 int ret; 4530 4531 /* If IMP do not support get SFP/qSFP speed, return directly */ 4532 if (!pf->support_sfp_query) 4533 return 0; 4534 4535 ret = hns3_get_sfp_speed(hw, &speed); 4536 if (ret == -EOPNOTSUPP) { 4537 pf->support_sfp_query = false; 4538 return ret; 4539 } else if (ret) 4540 return ret; 4541 4542 if (speed == ETH_SPEED_NUM_NONE) 4543 return 0; /* do nothing if no SFP */ 4544 4545 /* Config full duplex for SFP */ 4546 return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); 4547 } 4548 4549 static void 4550 hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4551 { 4552 struct hns3_phy_params_bd0_cmd *req; 4553 4554 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4555 mac->link_speed = rte_le_to_cpu_32(req->speed); 4556 mac->link_duplex = hns3_get_bit(req->duplex, 4557 HNS3_PHY_DUPLEX_CFG_B); 4558 mac->link_autoneg = hns3_get_bit(req->autoneg, 4559 HNS3_PHY_AUTONEG_CFG_B); 4560 mac->supported_capa = rte_le_to_cpu_32(req->supported); 4561 mac->advertising = rte_le_to_cpu_32(req->advertising); 4562 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4563 mac->support_autoneg = !!(mac->supported_capa & 4564 HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4565 } 4566 4567 static int 4568 hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4569 { 4570 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4571 uint16_t i; 4572 int ret; 4573 4574 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4575 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4576 true); 4577 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4578 } 4579 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4580 4581 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4582 if (ret) { 4583 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4584 return ret; 4585 } 4586 4587 hns3_parse_phy_params(desc, mac); 4588 4589 return 0; 4590 } 4591 4592 static int 4593 hns3_update_phy_link_info(struct hns3_hw *hw) 4594 { 4595 struct hns3_mac *mac = &hw->mac; 4596 struct hns3_mac mac_info; 4597 int ret; 4598 4599 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4600 ret = hns3_get_phy_params(hw, &mac_info); 4601 if (ret) 4602 return ret; 4603 4604 if (mac_info.link_speed != mac->link_speed) { 4605 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4606 if (ret) 4607 return ret; 4608 } 4609 4610 mac->link_speed = mac_info.link_speed; 4611 mac->link_duplex = mac_info.link_duplex; 4612 mac->link_autoneg = mac_info.link_autoneg; 4613 mac->supported_capa = mac_info.supported_capa; 4614 mac->advertising = mac_info.advertising; 4615 mac->lp_advertising = mac_info.lp_advertising; 4616 mac->support_autoneg = mac_info.support_autoneg; 4617 4618 return 0; 4619 } 4620 4621 static int 4622 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4623 { 4624 struct hns3_adapter *hns = eth_dev->data->dev_private; 4625 struct hns3_hw *hw = &hns->hw; 4626 int ret = 0; 4627 4628 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4629 ret = hns3_update_phy_link_info(hw); 4630 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4631 ret = hns3_update_fiber_link_info(hw); 4632 4633 return ret; 4634 } 4635 4636 static int 4637 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4638 { 4639 struct hns3_config_mac_mode_cmd *req; 4640 struct hns3_cmd_desc desc; 4641 uint32_t loop_en = 0; 4642 uint8_t val = 0; 4643 int ret; 4644 4645 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4646 4647 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4648 if (enable) 4649 val = 1; 4650 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4651 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4652 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4653 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4654 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4655 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4656 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4657 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4658 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4659 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4660 4661 /* 4662 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4663 * when receiving frames. Otherwise, CRC will be stripped. 4664 */ 4665 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4666 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4667 else 4668 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4669 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4670 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4671 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4672 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4673 4674 ret = hns3_cmd_send(hw, &desc, 1); 4675 if (ret) 4676 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4677 4678 return ret; 4679 } 4680 4681 static int 4682 hns3_get_mac_link_status(struct hns3_hw *hw) 4683 { 4684 struct hns3_link_status_cmd *req; 4685 struct hns3_cmd_desc desc; 4686 int link_status; 4687 int ret; 4688 4689 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4690 ret = hns3_cmd_send(hw, &desc, 1); 4691 if (ret) { 4692 hns3_err(hw, "get link status cmd failed %d", ret); 4693 return ETH_LINK_DOWN; 4694 } 4695 4696 req = (struct hns3_link_status_cmd *)desc.data; 4697 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4698 4699 return !!link_status; 4700 } 4701 4702 static bool 4703 hns3_update_link_status(struct hns3_hw *hw) 4704 { 4705 int state; 4706 4707 state = hns3_get_mac_link_status(hw); 4708 if (state != hw->mac.link_status) { 4709 hw->mac.link_status = state; 4710 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4711 hns3_config_mac_tnl_int(hw, 4712 state == ETH_LINK_UP ? true : false); 4713 return true; 4714 } 4715 4716 return false; 4717 } 4718 4719 /* 4720 * Current, the PF driver get link status by two ways: 4721 * 1) Periodic polling in the intr thread context, driver call 4722 * hns3_update_link_status to update link status. 4723 * 2) Firmware report async interrupt, driver process the event in the intr 4724 * thread context, and call hns3_update_link_status to update link status. 4725 * 4726 * If detect link status changed, driver need report LSE. One method is add the 4727 * report LSE logic in hns3_update_link_status. 4728 * 4729 * But the PF driver ops(link_update) also call hns3_update_link_status to 4730 * update link status. 4731 * If we report LSE in hns3_update_link_status, it may lead to deadlock in the 4732 * bonding application. 4733 * 4734 * So add the one new API which used only in intr thread context. 4735 */ 4736 void 4737 hns3_update_link_status_and_event(struct hns3_hw *hw) 4738 { 4739 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4740 bool changed = hns3_update_link_status(hw); 4741 if (changed) 4742 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4743 } 4744 4745 static void 4746 hns3_service_handler(void *param) 4747 { 4748 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4749 struct hns3_adapter *hns = eth_dev->data->dev_private; 4750 struct hns3_hw *hw = &hns->hw; 4751 4752 if (!hns3_is_reset_pending(hns)) { 4753 hns3_update_link_status_and_event(hw); 4754 hns3_update_link_info(eth_dev); 4755 } else { 4756 hns3_warn(hw, "Cancel the query when reset is pending"); 4757 } 4758 4759 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4760 } 4761 4762 static int 4763 hns3_init_hardware(struct hns3_adapter *hns) 4764 { 4765 struct hns3_hw *hw = &hns->hw; 4766 int ret; 4767 4768 ret = hns3_map_tqp(hw); 4769 if (ret) { 4770 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4771 return ret; 4772 } 4773 4774 ret = hns3_init_umv_space(hw); 4775 if (ret) { 4776 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4777 return ret; 4778 } 4779 4780 ret = hns3_mac_init(hw); 4781 if (ret) { 4782 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4783 goto err_mac_init; 4784 } 4785 4786 ret = hns3_init_mgr_tbl(hw); 4787 if (ret) { 4788 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4789 goto err_mac_init; 4790 } 4791 4792 ret = hns3_promisc_init(hw); 4793 if (ret) { 4794 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4795 ret); 4796 goto err_mac_init; 4797 } 4798 4799 ret = hns3_init_vlan_config(hns); 4800 if (ret) { 4801 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4802 goto err_mac_init; 4803 } 4804 4805 ret = hns3_dcb_init(hw); 4806 if (ret) { 4807 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4808 goto err_mac_init; 4809 } 4810 4811 ret = hns3_init_fd_config(hns); 4812 if (ret) { 4813 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4814 goto err_mac_init; 4815 } 4816 4817 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4818 if (ret) { 4819 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4820 goto err_mac_init; 4821 } 4822 4823 ret = hns3_config_gro(hw, false); 4824 if (ret) { 4825 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4826 goto err_mac_init; 4827 } 4828 4829 /* 4830 * In the initialization clearing the all hardware mapping relationship 4831 * configurations between queues and interrupt vectors is needed, so 4832 * some error caused by the residual configurations, such as the 4833 * unexpected interrupt, can be avoid. 4834 */ 4835 ret = hns3_init_ring_with_vector(hw); 4836 if (ret) { 4837 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4838 goto err_mac_init; 4839 } 4840 4841 /* 4842 * Requiring firmware to enable some features, driver can 4843 * still work without it. 4844 */ 4845 ret = hns3_firmware_compat_config(hw, true); 4846 if (ret) 4847 PMD_INIT_LOG(WARNING, "firmware compatible features not " 4848 "supported, ret = %d.", ret); 4849 4850 return 0; 4851 4852 err_mac_init: 4853 hns3_uninit_umv_space(hw); 4854 return ret; 4855 } 4856 4857 static int 4858 hns3_clear_hw(struct hns3_hw *hw) 4859 { 4860 struct hns3_cmd_desc desc; 4861 int ret; 4862 4863 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4864 4865 ret = hns3_cmd_send(hw, &desc, 1); 4866 if (ret && ret != -EOPNOTSUPP) 4867 return ret; 4868 4869 return 0; 4870 } 4871 4872 static void 4873 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4874 { 4875 uint32_t val; 4876 4877 /* 4878 * The new firmware support report more hardware error types by 4879 * msix mode. These errors are defined as RAS errors in hardware 4880 * and belong to a different type from the MSI-x errors processed 4881 * by the network driver. 4882 * 4883 * Network driver should open the new error report on initialition 4884 */ 4885 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4886 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4887 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4888 } 4889 4890 static int 4891 hns3_init_pf(struct rte_eth_dev *eth_dev) 4892 { 4893 struct rte_device *dev = eth_dev->device; 4894 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4895 struct hns3_adapter *hns = eth_dev->data->dev_private; 4896 struct hns3_hw *hw = &hns->hw; 4897 int ret; 4898 4899 PMD_INIT_FUNC_TRACE(); 4900 4901 /* Get hardware io base address from pcie BAR2 IO space */ 4902 hw->io_base = pci_dev->mem_resource[2].addr; 4903 4904 /* Firmware command queue initialize */ 4905 ret = hns3_cmd_init_queue(hw); 4906 if (ret) { 4907 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 4908 goto err_cmd_init_queue; 4909 } 4910 4911 hns3_clear_all_event_cause(hw); 4912 4913 /* Firmware command initialize */ 4914 ret = hns3_cmd_init(hw); 4915 if (ret) { 4916 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 4917 goto err_cmd_init; 4918 } 4919 4920 /* 4921 * To ensure that the hardware environment is clean during 4922 * initialization, the driver actively clear the hardware environment 4923 * during initialization, including PF and corresponding VFs' vlan, mac, 4924 * flow table configurations, etc. 4925 */ 4926 ret = hns3_clear_hw(hw); 4927 if (ret) { 4928 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 4929 goto err_cmd_init; 4930 } 4931 4932 /* Hardware statistics of imissed registers cleared. */ 4933 ret = hns3_update_imissed_stats(hw, true); 4934 if (ret) { 4935 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 4936 return ret; 4937 } 4938 4939 hns3_config_all_msix_error(hw, true); 4940 4941 ret = rte_intr_callback_register(&pci_dev->intr_handle, 4942 hns3_interrupt_handler, 4943 eth_dev); 4944 if (ret) { 4945 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 4946 goto err_intr_callback_register; 4947 } 4948 4949 /* Enable interrupt */ 4950 rte_intr_enable(&pci_dev->intr_handle); 4951 hns3_pf_enable_irq0(hw); 4952 4953 /* Get configuration */ 4954 ret = hns3_get_configuration(hw); 4955 if (ret) { 4956 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 4957 goto err_get_config; 4958 } 4959 4960 ret = hns3_tqp_stats_init(hw); 4961 if (ret) 4962 goto err_get_config; 4963 4964 ret = hns3_init_hardware(hns); 4965 if (ret) { 4966 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 4967 goto err_init_hw; 4968 } 4969 4970 /* Initialize flow director filter list & hash */ 4971 ret = hns3_fdir_filter_init(hns); 4972 if (ret) { 4973 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 4974 goto err_fdir; 4975 } 4976 4977 hns3_rss_set_default_args(hw); 4978 4979 ret = hns3_enable_hw_error_intr(hns, true); 4980 if (ret) { 4981 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 4982 ret); 4983 goto err_enable_intr; 4984 } 4985 4986 hns3_tm_conf_init(eth_dev); 4987 4988 return 0; 4989 4990 err_enable_intr: 4991 hns3_fdir_filter_uninit(hns); 4992 err_fdir: 4993 (void)hns3_firmware_compat_config(hw, false); 4994 hns3_uninit_umv_space(hw); 4995 err_init_hw: 4996 hns3_tqp_stats_uninit(hw); 4997 err_get_config: 4998 hns3_pf_disable_irq0(hw); 4999 rte_intr_disable(&pci_dev->intr_handle); 5000 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5001 eth_dev); 5002 err_intr_callback_register: 5003 err_cmd_init: 5004 hns3_cmd_uninit(hw); 5005 hns3_cmd_destroy_queue(hw); 5006 err_cmd_init_queue: 5007 hw->io_base = NULL; 5008 5009 return ret; 5010 } 5011 5012 static void 5013 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5014 { 5015 struct hns3_adapter *hns = eth_dev->data->dev_private; 5016 struct rte_device *dev = eth_dev->device; 5017 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5018 struct hns3_hw *hw = &hns->hw; 5019 5020 PMD_INIT_FUNC_TRACE(); 5021 5022 hns3_tm_conf_uninit(eth_dev); 5023 hns3_enable_hw_error_intr(hns, false); 5024 hns3_rss_uninit(hns); 5025 (void)hns3_config_gro(hw, false); 5026 hns3_promisc_uninit(hw); 5027 hns3_fdir_filter_uninit(hns); 5028 (void)hns3_firmware_compat_config(hw, false); 5029 hns3_uninit_umv_space(hw); 5030 hns3_tqp_stats_uninit(hw); 5031 hns3_config_mac_tnl_int(hw, false); 5032 hns3_pf_disable_irq0(hw); 5033 rte_intr_disable(&pci_dev->intr_handle); 5034 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5035 eth_dev); 5036 hns3_config_all_msix_error(hw, false); 5037 hns3_cmd_uninit(hw); 5038 hns3_cmd_destroy_queue(hw); 5039 hw->io_base = NULL; 5040 } 5041 5042 static int 5043 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5044 { 5045 struct hns3_hw *hw = &hns->hw; 5046 int ret; 5047 5048 ret = hns3_dcb_cfg_update(hns); 5049 if (ret) 5050 return ret; 5051 5052 /* 5053 * The hns3_dcb_cfg_update may configure TM module, so 5054 * hns3_tm_conf_update must called later. 5055 */ 5056 ret = hns3_tm_conf_update(hw); 5057 if (ret) { 5058 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5059 return ret; 5060 } 5061 5062 hns3_enable_rxd_adv_layout(hw); 5063 5064 ret = hns3_init_queues(hns, reset_queue); 5065 if (ret) { 5066 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5067 return ret; 5068 } 5069 5070 ret = hns3_cfg_mac_mode(hw, true); 5071 if (ret) { 5072 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5073 goto err_config_mac_mode; 5074 } 5075 return 0; 5076 5077 err_config_mac_mode: 5078 hns3_dev_release_mbufs(hns); 5079 /* 5080 * Here is exception handling, hns3_reset_all_tqps will have the 5081 * corresponding error message if it is handled incorrectly, so it is 5082 * not necessary to check hns3_reset_all_tqps return value, here keep 5083 * ret as the error code causing the exception. 5084 */ 5085 (void)hns3_reset_all_tqps(hns); 5086 return ret; 5087 } 5088 5089 static int 5090 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5091 { 5092 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5093 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5094 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5095 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5096 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5097 uint32_t intr_vector; 5098 uint16_t q_id; 5099 int ret; 5100 5101 /* 5102 * hns3 needs a separate interrupt to be used as event interrupt which 5103 * could not be shared with task queue pair, so KERNEL drivers need 5104 * support multiple interrupt vectors. 5105 */ 5106 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5107 !rte_intr_cap_multiple(intr_handle)) 5108 return 0; 5109 5110 rte_intr_disable(intr_handle); 5111 intr_vector = hw->used_rx_queues; 5112 /* creates event fd for each intr vector when MSIX is used */ 5113 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5114 return -EINVAL; 5115 5116 if (intr_handle->intr_vec == NULL) { 5117 intr_handle->intr_vec = 5118 rte_zmalloc("intr_vec", 5119 hw->used_rx_queues * sizeof(int), 0); 5120 if (intr_handle->intr_vec == NULL) { 5121 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5122 hw->used_rx_queues); 5123 ret = -ENOMEM; 5124 goto alloc_intr_vec_error; 5125 } 5126 } 5127 5128 if (rte_intr_allow_others(intr_handle)) { 5129 vec = RTE_INTR_VEC_RXTX_OFFSET; 5130 base = RTE_INTR_VEC_RXTX_OFFSET; 5131 } 5132 5133 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5134 ret = hns3_bind_ring_with_vector(hw, vec, true, 5135 HNS3_RING_TYPE_RX, q_id); 5136 if (ret) 5137 goto bind_vector_error; 5138 intr_handle->intr_vec[q_id] = vec; 5139 /* 5140 * If there are not enough efds (e.g. not enough interrupt), 5141 * remaining queues will be bond to the last interrupt. 5142 */ 5143 if (vec < base + intr_handle->nb_efd - 1) 5144 vec++; 5145 } 5146 rte_intr_enable(intr_handle); 5147 return 0; 5148 5149 bind_vector_error: 5150 rte_free(intr_handle->intr_vec); 5151 intr_handle->intr_vec = NULL; 5152 alloc_intr_vec_error: 5153 rte_intr_efd_disable(intr_handle); 5154 return ret; 5155 } 5156 5157 static int 5158 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5159 { 5160 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5161 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5162 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5163 uint16_t q_id; 5164 int ret; 5165 5166 if (dev->data->dev_conf.intr_conf.rxq == 0) 5167 return 0; 5168 5169 if (rte_intr_dp_is_en(intr_handle)) { 5170 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5171 ret = hns3_bind_ring_with_vector(hw, 5172 intr_handle->intr_vec[q_id], true, 5173 HNS3_RING_TYPE_RX, q_id); 5174 if (ret) 5175 return ret; 5176 } 5177 } 5178 5179 return 0; 5180 } 5181 5182 static void 5183 hns3_restore_filter(struct rte_eth_dev *dev) 5184 { 5185 hns3_restore_rss_filter(dev); 5186 } 5187 5188 static int 5189 hns3_dev_start(struct rte_eth_dev *dev) 5190 { 5191 struct hns3_adapter *hns = dev->data->dev_private; 5192 struct hns3_hw *hw = &hns->hw; 5193 int ret; 5194 5195 PMD_INIT_FUNC_TRACE(); 5196 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5197 return -EBUSY; 5198 5199 rte_spinlock_lock(&hw->lock); 5200 hw->adapter_state = HNS3_NIC_STARTING; 5201 5202 ret = hns3_do_start(hns, true); 5203 if (ret) { 5204 hw->adapter_state = HNS3_NIC_CONFIGURED; 5205 rte_spinlock_unlock(&hw->lock); 5206 return ret; 5207 } 5208 ret = hns3_map_rx_interrupt(dev); 5209 if (ret) 5210 goto map_rx_inter_err; 5211 5212 /* 5213 * There are three register used to control the status of a TQP 5214 * (contains a pair of Tx queue and Rx queue) in the new version network 5215 * engine. One is used to control the enabling of Tx queue, the other is 5216 * used to control the enabling of Rx queue, and the last is the master 5217 * switch used to control the enabling of the tqp. The Tx register and 5218 * TQP register must be enabled at the same time to enable a Tx queue. 5219 * The same applies to the Rx queue. For the older network engine, this 5220 * function only refresh the enabled flag, and it is used to update the 5221 * status of queue in the dpdk framework. 5222 */ 5223 ret = hns3_start_all_txqs(dev); 5224 if (ret) 5225 goto map_rx_inter_err; 5226 5227 ret = hns3_start_all_rxqs(dev); 5228 if (ret) 5229 goto start_all_rxqs_fail; 5230 5231 hw->adapter_state = HNS3_NIC_STARTED; 5232 rte_spinlock_unlock(&hw->lock); 5233 5234 hns3_rx_scattered_calc(dev); 5235 hns3_set_rxtx_function(dev); 5236 hns3_mp_req_start_rxtx(dev); 5237 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5238 5239 hns3_restore_filter(dev); 5240 5241 /* Enable interrupt of all rx queues before enabling queues */ 5242 hns3_dev_all_rx_queue_intr_enable(hw, true); 5243 5244 /* 5245 * After finished the initialization, enable tqps to receive/transmit 5246 * packets and refresh all queue status. 5247 */ 5248 hns3_start_tqps(hw); 5249 5250 hns3_tm_dev_start_proc(hw); 5251 5252 hns3_info(hw, "hns3 dev start successful!"); 5253 5254 return 0; 5255 5256 start_all_rxqs_fail: 5257 hns3_stop_all_txqs(dev); 5258 map_rx_inter_err: 5259 (void)hns3_do_stop(hns); 5260 hw->adapter_state = HNS3_NIC_CONFIGURED; 5261 rte_spinlock_unlock(&hw->lock); 5262 5263 return ret; 5264 } 5265 5266 static int 5267 hns3_do_stop(struct hns3_adapter *hns) 5268 { 5269 struct hns3_hw *hw = &hns->hw; 5270 int ret; 5271 5272 /* 5273 * The "hns3_do_stop" function will also be called by .stop_service to 5274 * prepare reset. At the time of global or IMP reset, the command cannot 5275 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5276 * accessed during the reset process. So the mbuf can not be released 5277 * during reset and is required to be released after the reset is 5278 * completed. 5279 */ 5280 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5281 hns3_dev_release_mbufs(hns); 5282 5283 ret = hns3_cfg_mac_mode(hw, false); 5284 if (ret) 5285 return ret; 5286 hw->mac.link_status = ETH_LINK_DOWN; 5287 5288 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5289 hns3_configure_all_mac_addr(hns, true); 5290 ret = hns3_reset_all_tqps(hns); 5291 if (ret) { 5292 hns3_err(hw, "failed to reset all queues ret = %d.", 5293 ret); 5294 return ret; 5295 } 5296 } 5297 hw->mac.default_addr_setted = false; 5298 return 0; 5299 } 5300 5301 static void 5302 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5303 { 5304 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5305 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5306 struct hns3_adapter *hns = dev->data->dev_private; 5307 struct hns3_hw *hw = &hns->hw; 5308 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5309 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5310 uint16_t q_id; 5311 5312 if (dev->data->dev_conf.intr_conf.rxq == 0) 5313 return; 5314 5315 /* unmap the ring with vector */ 5316 if (rte_intr_allow_others(intr_handle)) { 5317 vec = RTE_INTR_VEC_RXTX_OFFSET; 5318 base = RTE_INTR_VEC_RXTX_OFFSET; 5319 } 5320 if (rte_intr_dp_is_en(intr_handle)) { 5321 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5322 (void)hns3_bind_ring_with_vector(hw, vec, false, 5323 HNS3_RING_TYPE_RX, 5324 q_id); 5325 if (vec < base + intr_handle->nb_efd - 1) 5326 vec++; 5327 } 5328 } 5329 /* Clean datapath event and queue/vec mapping */ 5330 rte_intr_efd_disable(intr_handle); 5331 if (intr_handle->intr_vec) { 5332 rte_free(intr_handle->intr_vec); 5333 intr_handle->intr_vec = NULL; 5334 } 5335 } 5336 5337 static int 5338 hns3_dev_stop(struct rte_eth_dev *dev) 5339 { 5340 struct hns3_adapter *hns = dev->data->dev_private; 5341 struct hns3_hw *hw = &hns->hw; 5342 5343 PMD_INIT_FUNC_TRACE(); 5344 dev->data->dev_started = 0; 5345 5346 hw->adapter_state = HNS3_NIC_STOPPING; 5347 hns3_set_rxtx_function(dev); 5348 rte_wmb(); 5349 /* Disable datapath on secondary process. */ 5350 hns3_mp_req_stop_rxtx(dev); 5351 /* Prevent crashes when queues are still in use. */ 5352 rte_delay_ms(hw->tqps_num); 5353 5354 rte_spinlock_lock(&hw->lock); 5355 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5356 hns3_tm_dev_stop_proc(hw); 5357 hns3_config_mac_tnl_int(hw, false); 5358 hns3_stop_tqps(hw); 5359 hns3_do_stop(hns); 5360 hns3_unmap_rx_interrupt(dev); 5361 hw->adapter_state = HNS3_NIC_CONFIGURED; 5362 } 5363 hns3_rx_scattered_reset(dev); 5364 rte_eal_alarm_cancel(hns3_service_handler, dev); 5365 rte_spinlock_unlock(&hw->lock); 5366 5367 return 0; 5368 } 5369 5370 static int 5371 hns3_dev_close(struct rte_eth_dev *eth_dev) 5372 { 5373 struct hns3_adapter *hns = eth_dev->data->dev_private; 5374 struct hns3_hw *hw = &hns->hw; 5375 int ret = 0; 5376 5377 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5378 rte_free(eth_dev->process_private); 5379 eth_dev->process_private = NULL; 5380 return 0; 5381 } 5382 5383 if (hw->adapter_state == HNS3_NIC_STARTED) 5384 ret = hns3_dev_stop(eth_dev); 5385 5386 hw->adapter_state = HNS3_NIC_CLOSING; 5387 hns3_reset_abort(hns); 5388 hw->adapter_state = HNS3_NIC_CLOSED; 5389 5390 hns3_configure_all_mc_mac_addr(hns, true); 5391 hns3_remove_all_vlan_table(hns); 5392 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5393 hns3_uninit_pf(eth_dev); 5394 hns3_free_all_queues(eth_dev); 5395 rte_free(hw->reset.wait_data); 5396 rte_free(eth_dev->process_private); 5397 eth_dev->process_private = NULL; 5398 hns3_mp_uninit_primary(); 5399 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5400 5401 return ret; 5402 } 5403 5404 static int 5405 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5406 { 5407 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5408 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5409 5410 fc_conf->pause_time = pf->pause_time; 5411 5412 /* return fc current mode */ 5413 switch (hw->current_mode) { 5414 case HNS3_FC_FULL: 5415 fc_conf->mode = RTE_FC_FULL; 5416 break; 5417 case HNS3_FC_TX_PAUSE: 5418 fc_conf->mode = RTE_FC_TX_PAUSE; 5419 break; 5420 case HNS3_FC_RX_PAUSE: 5421 fc_conf->mode = RTE_FC_RX_PAUSE; 5422 break; 5423 case HNS3_FC_NONE: 5424 default: 5425 fc_conf->mode = RTE_FC_NONE; 5426 break; 5427 } 5428 5429 return 0; 5430 } 5431 5432 static void 5433 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) 5434 { 5435 switch (mode) { 5436 case RTE_FC_NONE: 5437 hw->requested_mode = HNS3_FC_NONE; 5438 break; 5439 case RTE_FC_RX_PAUSE: 5440 hw->requested_mode = HNS3_FC_RX_PAUSE; 5441 break; 5442 case RTE_FC_TX_PAUSE: 5443 hw->requested_mode = HNS3_FC_TX_PAUSE; 5444 break; 5445 case RTE_FC_FULL: 5446 hw->requested_mode = HNS3_FC_FULL; 5447 break; 5448 default: 5449 hw->requested_mode = HNS3_FC_NONE; 5450 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is " 5451 "configured to RTE_FC_NONE", mode); 5452 break; 5453 } 5454 } 5455 5456 static int 5457 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5458 { 5459 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5460 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5461 int ret; 5462 5463 if (fc_conf->high_water || fc_conf->low_water || 5464 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 5465 hns3_err(hw, "Unsupported flow control settings specified, " 5466 "high_water(%u), low_water(%u), send_xon(%u) and " 5467 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5468 fc_conf->high_water, fc_conf->low_water, 5469 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 5470 return -EINVAL; 5471 } 5472 if (fc_conf->autoneg) { 5473 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5474 return -EINVAL; 5475 } 5476 if (!fc_conf->pause_time) { 5477 hns3_err(hw, "Invalid pause time %u setting.", 5478 fc_conf->pause_time); 5479 return -EINVAL; 5480 } 5481 5482 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5483 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 5484 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 5485 "current_fc_status = %d", hw->current_fc_status); 5486 return -EOPNOTSUPP; 5487 } 5488 5489 hns3_get_fc_mode(hw, fc_conf->mode); 5490 if (hw->requested_mode == hw->current_mode && 5491 pf->pause_time == fc_conf->pause_time) 5492 return 0; 5493 5494 rte_spinlock_lock(&hw->lock); 5495 ret = hns3_fc_enable(dev, fc_conf); 5496 rte_spinlock_unlock(&hw->lock); 5497 5498 return ret; 5499 } 5500 5501 static int 5502 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 5503 struct rte_eth_pfc_conf *pfc_conf) 5504 { 5505 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5506 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5507 uint8_t priority; 5508 int ret; 5509 5510 if (!hns3_dev_dcb_supported(hw)) { 5511 hns3_err(hw, "This port does not support dcb configurations."); 5512 return -EOPNOTSUPP; 5513 } 5514 5515 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 5516 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 5517 hns3_err(hw, "Unsupported flow control settings specified, " 5518 "high_water(%u), low_water(%u), send_xon(%u) and " 5519 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5520 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 5521 pfc_conf->fc.send_xon, 5522 pfc_conf->fc.mac_ctrl_frame_fwd); 5523 return -EINVAL; 5524 } 5525 if (pfc_conf->fc.autoneg) { 5526 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5527 return -EINVAL; 5528 } 5529 if (pfc_conf->fc.pause_time == 0) { 5530 hns3_err(hw, "Invalid pause time %u setting.", 5531 pfc_conf->fc.pause_time); 5532 return -EINVAL; 5533 } 5534 5535 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5536 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 5537 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 5538 "current_fc_status = %d", hw->current_fc_status); 5539 return -EOPNOTSUPP; 5540 } 5541 5542 priority = pfc_conf->priority; 5543 hns3_get_fc_mode(hw, pfc_conf->fc.mode); 5544 if (hw->dcb_info.pfc_en & BIT(priority) && 5545 hw->requested_mode == hw->current_mode && 5546 pfc_conf->fc.pause_time == pf->pause_time) 5547 return 0; 5548 5549 rte_spinlock_lock(&hw->lock); 5550 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 5551 rte_spinlock_unlock(&hw->lock); 5552 5553 return ret; 5554 } 5555 5556 static int 5557 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 5558 { 5559 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5560 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5561 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 5562 int i; 5563 5564 rte_spinlock_lock(&hw->lock); 5565 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 5566 dcb_info->nb_tcs = pf->local_max_tc; 5567 else 5568 dcb_info->nb_tcs = 1; 5569 5570 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 5571 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 5572 for (i = 0; i < dcb_info->nb_tcs; i++) 5573 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 5574 5575 for (i = 0; i < hw->num_tc; i++) { 5576 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 5577 dcb_info->tc_queue.tc_txq[0][i].base = 5578 hw->tc_queue[i].tqp_offset; 5579 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 5580 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 5581 hw->tc_queue[i].tqp_count; 5582 } 5583 rte_spinlock_unlock(&hw->lock); 5584 5585 return 0; 5586 } 5587 5588 static int 5589 hns3_reinit_dev(struct hns3_adapter *hns) 5590 { 5591 struct hns3_hw *hw = &hns->hw; 5592 int ret; 5593 5594 ret = hns3_cmd_init(hw); 5595 if (ret) { 5596 hns3_err(hw, "Failed to init cmd: %d", ret); 5597 return ret; 5598 } 5599 5600 ret = hns3_reset_all_tqps(hns); 5601 if (ret) { 5602 hns3_err(hw, "Failed to reset all queues: %d", ret); 5603 return ret; 5604 } 5605 5606 ret = hns3_init_hardware(hns); 5607 if (ret) { 5608 hns3_err(hw, "Failed to init hardware: %d", ret); 5609 return ret; 5610 } 5611 5612 ret = hns3_enable_hw_error_intr(hns, true); 5613 if (ret) { 5614 hns3_err(hw, "fail to enable hw error interrupts: %d", 5615 ret); 5616 return ret; 5617 } 5618 hns3_info(hw, "Reset done, driver initialization finished."); 5619 5620 return 0; 5621 } 5622 5623 static bool 5624 is_pf_reset_done(struct hns3_hw *hw) 5625 { 5626 uint32_t val, reg, reg_bit; 5627 5628 switch (hw->reset.level) { 5629 case HNS3_IMP_RESET: 5630 reg = HNS3_GLOBAL_RESET_REG; 5631 reg_bit = HNS3_IMP_RESET_BIT; 5632 break; 5633 case HNS3_GLOBAL_RESET: 5634 reg = HNS3_GLOBAL_RESET_REG; 5635 reg_bit = HNS3_GLOBAL_RESET_BIT; 5636 break; 5637 case HNS3_FUNC_RESET: 5638 reg = HNS3_FUN_RST_ING; 5639 reg_bit = HNS3_FUN_RST_ING_B; 5640 break; 5641 case HNS3_FLR_RESET: 5642 default: 5643 hns3_err(hw, "Wait for unsupported reset level: %d", 5644 hw->reset.level); 5645 return true; 5646 } 5647 val = hns3_read_dev(hw, reg); 5648 if (hns3_get_bit(val, reg_bit)) 5649 return false; 5650 else 5651 return true; 5652 } 5653 5654 bool 5655 hns3_is_reset_pending(struct hns3_adapter *hns) 5656 { 5657 struct hns3_hw *hw = &hns->hw; 5658 enum hns3_reset_level reset; 5659 5660 hns3_check_event_cause(hns, NULL); 5661 reset = hns3_get_reset_level(hns, &hw->reset.pending); 5662 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { 5663 hns3_warn(hw, "High level reset %d is pending", reset); 5664 return true; 5665 } 5666 reset = hns3_get_reset_level(hns, &hw->reset.request); 5667 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { 5668 hns3_warn(hw, "High level reset %d is request", reset); 5669 return true; 5670 } 5671 return false; 5672 } 5673 5674 static int 5675 hns3_wait_hardware_ready(struct hns3_adapter *hns) 5676 { 5677 struct hns3_hw *hw = &hns->hw; 5678 struct hns3_wait_data *wait_data = hw->reset.wait_data; 5679 struct timeval tv; 5680 5681 if (wait_data->result == HNS3_WAIT_SUCCESS) 5682 return 0; 5683 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 5684 gettimeofday(&tv, NULL); 5685 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 5686 tv.tv_sec, tv.tv_usec); 5687 return -ETIME; 5688 } else if (wait_data->result == HNS3_WAIT_REQUEST) 5689 return -EAGAIN; 5690 5691 wait_data->hns = hns; 5692 wait_data->check_completion = is_pf_reset_done; 5693 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 5694 HNS3_RESET_WAIT_MS + get_timeofday_ms(); 5695 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 5696 wait_data->count = HNS3_RESET_WAIT_CNT; 5697 wait_data->result = HNS3_WAIT_REQUEST; 5698 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 5699 return -EAGAIN; 5700 } 5701 5702 static int 5703 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 5704 { 5705 struct hns3_cmd_desc desc; 5706 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 5707 5708 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 5709 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 5710 req->fun_reset_vfid = func_id; 5711 5712 return hns3_cmd_send(hw, &desc, 1); 5713 } 5714 5715 static int 5716 hns3_imp_reset_cmd(struct hns3_hw *hw) 5717 { 5718 struct hns3_cmd_desc desc; 5719 5720 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 5721 desc.data[0] = 0xeedd; 5722 5723 return hns3_cmd_send(hw, &desc, 1); 5724 } 5725 5726 static void 5727 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 5728 { 5729 struct hns3_hw *hw = &hns->hw; 5730 struct timeval tv; 5731 uint32_t val; 5732 5733 gettimeofday(&tv, NULL); 5734 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 5735 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 5736 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 5737 tv.tv_sec, tv.tv_usec); 5738 return; 5739 } 5740 5741 switch (reset_level) { 5742 case HNS3_IMP_RESET: 5743 hns3_imp_reset_cmd(hw); 5744 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 5745 tv.tv_sec, tv.tv_usec); 5746 break; 5747 case HNS3_GLOBAL_RESET: 5748 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 5749 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 5750 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 5751 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 5752 tv.tv_sec, tv.tv_usec); 5753 break; 5754 case HNS3_FUNC_RESET: 5755 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 5756 tv.tv_sec, tv.tv_usec); 5757 /* schedule again to check later */ 5758 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 5759 hns3_schedule_reset(hns); 5760 break; 5761 default: 5762 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 5763 return; 5764 } 5765 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 5766 } 5767 5768 static enum hns3_reset_level 5769 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 5770 { 5771 struct hns3_hw *hw = &hns->hw; 5772 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 5773 5774 /* Return the highest priority reset level amongst all */ 5775 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 5776 reset_level = HNS3_IMP_RESET; 5777 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 5778 reset_level = HNS3_GLOBAL_RESET; 5779 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 5780 reset_level = HNS3_FUNC_RESET; 5781 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 5782 reset_level = HNS3_FLR_RESET; 5783 5784 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 5785 return HNS3_NONE_RESET; 5786 5787 return reset_level; 5788 } 5789 5790 static void 5791 hns3_record_imp_error(struct hns3_adapter *hns) 5792 { 5793 struct hns3_hw *hw = &hns->hw; 5794 uint32_t reg_val; 5795 5796 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5797 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 5798 hns3_warn(hw, "Detected IMP RD poison!"); 5799 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 5800 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5801 } 5802 5803 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 5804 hns3_warn(hw, "Detected IMP CMDQ error!"); 5805 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 5806 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5807 } 5808 } 5809 5810 static int 5811 hns3_prepare_reset(struct hns3_adapter *hns) 5812 { 5813 struct hns3_hw *hw = &hns->hw; 5814 uint32_t reg_val; 5815 int ret; 5816 5817 switch (hw->reset.level) { 5818 case HNS3_FUNC_RESET: 5819 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 5820 if (ret) 5821 return ret; 5822 5823 /* 5824 * After performaning pf reset, it is not necessary to do the 5825 * mailbox handling or send any command to firmware, because 5826 * any mailbox handling or command to firmware is only valid 5827 * after hns3_cmd_init is called. 5828 */ 5829 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 5830 hw->reset.stats.request_cnt++; 5831 break; 5832 case HNS3_IMP_RESET: 5833 hns3_record_imp_error(hns); 5834 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5835 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 5836 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 5837 break; 5838 default: 5839 break; 5840 } 5841 return 0; 5842 } 5843 5844 static int 5845 hns3_set_rst_done(struct hns3_hw *hw) 5846 { 5847 struct hns3_pf_rst_done_cmd *req; 5848 struct hns3_cmd_desc desc; 5849 5850 req = (struct hns3_pf_rst_done_cmd *)desc.data; 5851 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 5852 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 5853 return hns3_cmd_send(hw, &desc, 1); 5854 } 5855 5856 static int 5857 hns3_stop_service(struct hns3_adapter *hns) 5858 { 5859 struct hns3_hw *hw = &hns->hw; 5860 struct rte_eth_dev *eth_dev; 5861 5862 eth_dev = &rte_eth_devices[hw->data->port_id]; 5863 if (hw->adapter_state == HNS3_NIC_STARTED) { 5864 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 5865 hns3_update_link_status_and_event(hw); 5866 } 5867 hw->mac.link_status = ETH_LINK_DOWN; 5868 5869 hns3_set_rxtx_function(eth_dev); 5870 rte_wmb(); 5871 /* Disable datapath on secondary process. */ 5872 hns3_mp_req_stop_rxtx(eth_dev); 5873 rte_delay_ms(hw->tqps_num); 5874 5875 rte_spinlock_lock(&hw->lock); 5876 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 5877 hw->adapter_state == HNS3_NIC_STOPPING) { 5878 hns3_enable_all_queues(hw, false); 5879 hns3_do_stop(hns); 5880 hw->reset.mbuf_deferred_free = true; 5881 } else 5882 hw->reset.mbuf_deferred_free = false; 5883 5884 /* 5885 * It is cumbersome for hardware to pick-and-choose entries for deletion 5886 * from table space. Hence, for function reset software intervention is 5887 * required to delete the entries 5888 */ 5889 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 5890 hns3_configure_all_mc_mac_addr(hns, true); 5891 rte_spinlock_unlock(&hw->lock); 5892 5893 return 0; 5894 } 5895 5896 static int 5897 hns3_start_service(struct hns3_adapter *hns) 5898 { 5899 struct hns3_hw *hw = &hns->hw; 5900 struct rte_eth_dev *eth_dev; 5901 5902 if (hw->reset.level == HNS3_IMP_RESET || 5903 hw->reset.level == HNS3_GLOBAL_RESET) 5904 hns3_set_rst_done(hw); 5905 eth_dev = &rte_eth_devices[hw->data->port_id]; 5906 hns3_set_rxtx_function(eth_dev); 5907 hns3_mp_req_start_rxtx(eth_dev); 5908 if (hw->adapter_state == HNS3_NIC_STARTED) { 5909 /* 5910 * This API parent function already hold the hns3_hw.lock, the 5911 * hns3_service_handler may report lse, in bonding application 5912 * it will call driver's ops which may acquire the hns3_hw.lock 5913 * again, thus lead to deadlock. 5914 * We defer calls hns3_service_handler to avoid the deadlock. 5915 */ 5916 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 5917 hns3_service_handler, eth_dev); 5918 5919 /* Enable interrupt of all rx queues before enabling queues */ 5920 hns3_dev_all_rx_queue_intr_enable(hw, true); 5921 /* 5922 * Enable state of each rxq and txq will be recovered after 5923 * reset, so we need to restore them before enable all tqps; 5924 */ 5925 hns3_restore_tqp_enable_state(hw); 5926 /* 5927 * When finished the initialization, enable queues to receive 5928 * and transmit packets. 5929 */ 5930 hns3_enable_all_queues(hw, true); 5931 } 5932 5933 return 0; 5934 } 5935 5936 static int 5937 hns3_restore_conf(struct hns3_adapter *hns) 5938 { 5939 struct hns3_hw *hw = &hns->hw; 5940 int ret; 5941 5942 ret = hns3_configure_all_mac_addr(hns, false); 5943 if (ret) 5944 return ret; 5945 5946 ret = hns3_configure_all_mc_mac_addr(hns, false); 5947 if (ret) 5948 goto err_mc_mac; 5949 5950 ret = hns3_dev_promisc_restore(hns); 5951 if (ret) 5952 goto err_promisc; 5953 5954 ret = hns3_restore_vlan_table(hns); 5955 if (ret) 5956 goto err_promisc; 5957 5958 ret = hns3_restore_vlan_conf(hns); 5959 if (ret) 5960 goto err_promisc; 5961 5962 ret = hns3_restore_all_fdir_filter(hns); 5963 if (ret) 5964 goto err_promisc; 5965 5966 ret = hns3_restore_rx_interrupt(hw); 5967 if (ret) 5968 goto err_promisc; 5969 5970 ret = hns3_restore_gro_conf(hw); 5971 if (ret) 5972 goto err_promisc; 5973 5974 ret = hns3_restore_fec(hw); 5975 if (ret) 5976 goto err_promisc; 5977 5978 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 5979 ret = hns3_do_start(hns, false); 5980 if (ret) 5981 goto err_promisc; 5982 hns3_info(hw, "hns3 dev restart successful!"); 5983 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 5984 hw->adapter_state = HNS3_NIC_CONFIGURED; 5985 return 0; 5986 5987 err_promisc: 5988 hns3_configure_all_mc_mac_addr(hns, true); 5989 err_mc_mac: 5990 hns3_configure_all_mac_addr(hns, true); 5991 return ret; 5992 } 5993 5994 static void 5995 hns3_reset_service(void *param) 5996 { 5997 struct hns3_adapter *hns = (struct hns3_adapter *)param; 5998 struct hns3_hw *hw = &hns->hw; 5999 enum hns3_reset_level reset_level; 6000 struct timeval tv_delta; 6001 struct timeval tv_start; 6002 struct timeval tv; 6003 uint64_t msec; 6004 int ret; 6005 6006 /* 6007 * The interrupt is not triggered within the delay time. 6008 * The interrupt may have been lost. It is necessary to handle 6009 * the interrupt to recover from the error. 6010 */ 6011 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6012 SCHEDULE_DEFERRED) { 6013 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6014 __ATOMIC_RELAXED); 6015 hns3_err(hw, "Handling interrupts in delayed tasks"); 6016 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6017 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6018 if (reset_level == HNS3_NONE_RESET) { 6019 hns3_err(hw, "No reset level is set, try IMP reset"); 6020 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6021 } 6022 } 6023 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6024 6025 /* 6026 * Check if there is any ongoing reset in the hardware. This status can 6027 * be checked from reset_pending. If there is then, we need to wait for 6028 * hardware to complete reset. 6029 * a. If we are able to figure out in reasonable time that hardware 6030 * has fully resetted then, we can proceed with driver, client 6031 * reset. 6032 * b. else, we can come back later to check this status so re-sched 6033 * now. 6034 */ 6035 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6036 if (reset_level != HNS3_NONE_RESET) { 6037 gettimeofday(&tv_start, NULL); 6038 ret = hns3_reset_process(hns, reset_level); 6039 gettimeofday(&tv, NULL); 6040 timersub(&tv, &tv_start, &tv_delta); 6041 msec = tv_delta.tv_sec * MSEC_PER_SEC + 6042 tv_delta.tv_usec / USEC_PER_MSEC; 6043 if (msec > HNS3_RESET_PROCESS_MS) 6044 hns3_err(hw, "%d handle long time delta %" PRIx64 6045 " ms time=%ld.%.6ld", 6046 hw->reset.level, msec, 6047 tv.tv_sec, tv.tv_usec); 6048 if (ret == -EAGAIN) 6049 return; 6050 } 6051 6052 /* Check if we got any *new* reset requests to be honored */ 6053 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6054 if (reset_level != HNS3_NONE_RESET) 6055 hns3_msix_process(hns, reset_level); 6056 } 6057 6058 static unsigned int 6059 hns3_get_speed_capa_num(uint16_t device_id) 6060 { 6061 unsigned int num; 6062 6063 switch (device_id) { 6064 case HNS3_DEV_ID_25GE: 6065 case HNS3_DEV_ID_25GE_RDMA: 6066 num = 2; 6067 break; 6068 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6069 case HNS3_DEV_ID_200G_RDMA: 6070 num = 1; 6071 break; 6072 default: 6073 num = 0; 6074 break; 6075 } 6076 6077 return num; 6078 } 6079 6080 static int 6081 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6082 uint16_t device_id) 6083 { 6084 switch (device_id) { 6085 case HNS3_DEV_ID_25GE: 6086 /* fallthrough */ 6087 case HNS3_DEV_ID_25GE_RDMA: 6088 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6089 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6090 6091 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6092 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6093 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6094 break; 6095 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6096 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6097 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6098 break; 6099 case HNS3_DEV_ID_200G_RDMA: 6100 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6101 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6102 break; 6103 default: 6104 return -ENOTSUP; 6105 } 6106 6107 return 0; 6108 } 6109 6110 static int 6111 hns3_fec_get_capability(struct rte_eth_dev *dev, 6112 struct rte_eth_fec_capa *speed_fec_capa, 6113 unsigned int num) 6114 { 6115 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6116 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6117 uint16_t device_id = pci_dev->id.device_id; 6118 unsigned int capa_num; 6119 int ret; 6120 6121 capa_num = hns3_get_speed_capa_num(device_id); 6122 if (capa_num == 0) { 6123 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6124 device_id); 6125 return -ENOTSUP; 6126 } 6127 6128 if (speed_fec_capa == NULL || num < capa_num) 6129 return capa_num; 6130 6131 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6132 if (ret) 6133 return -ENOTSUP; 6134 6135 return capa_num; 6136 } 6137 6138 static int 6139 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6140 { 6141 struct hns3_config_fec_cmd *req; 6142 struct hns3_cmd_desc desc; 6143 int ret; 6144 6145 /* 6146 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6147 * in device of link speed 6148 * below 10 Gbps. 6149 */ 6150 if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { 6151 *state = 0; 6152 return 0; 6153 } 6154 6155 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6156 req = (struct hns3_config_fec_cmd *)desc.data; 6157 ret = hns3_cmd_send(hw, &desc, 1); 6158 if (ret) { 6159 hns3_err(hw, "get current fec auto state failed, ret = %d", 6160 ret); 6161 return ret; 6162 } 6163 6164 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6165 return 0; 6166 } 6167 6168 static int 6169 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6170 { 6171 #define QUERY_ACTIVE_SPEED 1 6172 struct hns3_sfp_speed_cmd *resp; 6173 uint32_t tmp_fec_capa; 6174 uint8_t auto_state; 6175 struct hns3_cmd_desc desc; 6176 int ret; 6177 6178 /* 6179 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6180 * configured FEC mode is returned. 6181 * If link is up, current FEC mode is returned. 6182 */ 6183 if (hw->mac.link_status == ETH_LINK_DOWN) { 6184 ret = get_current_fec_auto_state(hw, &auto_state); 6185 if (ret) 6186 return ret; 6187 6188 if (auto_state == 0x1) { 6189 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6190 return 0; 6191 } 6192 } 6193 6194 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); 6195 resp = (struct hns3_sfp_speed_cmd *)desc.data; 6196 resp->query_type = QUERY_ACTIVE_SPEED; 6197 6198 ret = hns3_cmd_send(hw, &desc, 1); 6199 if (ret == -EOPNOTSUPP) { 6200 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6201 return ret; 6202 } else if (ret) { 6203 hns3_err(hw, "get FEC failed, ret = %d", ret); 6204 return ret; 6205 } 6206 6207 /* 6208 * FEC mode order defined in hns3 hardware is inconsistend with 6209 * that defined in the ethdev library. So the sequence needs 6210 * to be converted. 6211 */ 6212 switch (resp->active_fec) { 6213 case HNS3_HW_FEC_MODE_NOFEC: 6214 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6215 break; 6216 case HNS3_HW_FEC_MODE_BASER: 6217 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6218 break; 6219 case HNS3_HW_FEC_MODE_RS: 6220 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6221 break; 6222 default: 6223 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6224 break; 6225 } 6226 6227 *fec_capa = tmp_fec_capa; 6228 return 0; 6229 } 6230 6231 static int 6232 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6233 { 6234 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6235 6236 return hns3_fec_get_internal(hw, fec_capa); 6237 } 6238 6239 static int 6240 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6241 { 6242 struct hns3_config_fec_cmd *req; 6243 struct hns3_cmd_desc desc; 6244 int ret; 6245 6246 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6247 6248 req = (struct hns3_config_fec_cmd *)desc.data; 6249 switch (mode) { 6250 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6251 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6252 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6253 break; 6254 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6255 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6256 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6257 break; 6258 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6259 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6260 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6261 break; 6262 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6263 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6264 break; 6265 default: 6266 return 0; 6267 } 6268 ret = hns3_cmd_send(hw, &desc, 1); 6269 if (ret) 6270 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6271 6272 return ret; 6273 } 6274 6275 static uint32_t 6276 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6277 { 6278 struct hns3_mac *mac = &hw->mac; 6279 uint32_t cur_capa; 6280 6281 switch (mac->link_speed) { 6282 case ETH_SPEED_NUM_10G: 6283 cur_capa = fec_capa[1].capa; 6284 break; 6285 case ETH_SPEED_NUM_25G: 6286 case ETH_SPEED_NUM_100G: 6287 case ETH_SPEED_NUM_200G: 6288 cur_capa = fec_capa[0].capa; 6289 break; 6290 default: 6291 cur_capa = 0; 6292 break; 6293 } 6294 6295 return cur_capa; 6296 } 6297 6298 static bool 6299 is_fec_mode_one_bit_set(uint32_t mode) 6300 { 6301 int cnt = 0; 6302 uint8_t i; 6303 6304 for (i = 0; i < sizeof(mode); i++) 6305 if (mode >> i & 0x1) 6306 cnt++; 6307 6308 return cnt == 1 ? true : false; 6309 } 6310 6311 static int 6312 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6313 { 6314 #define FEC_CAPA_NUM 2 6315 struct hns3_adapter *hns = dev->data->dev_private; 6316 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6317 struct hns3_pf *pf = &hns->pf; 6318 6319 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6320 uint32_t cur_capa; 6321 uint32_t num = FEC_CAPA_NUM; 6322 int ret; 6323 6324 ret = hns3_fec_get_capability(dev, fec_capa, num); 6325 if (ret < 0) 6326 return ret; 6327 6328 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 6329 if (!is_fec_mode_one_bit_set(mode)) 6330 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD," 6331 "FEC mode should be only one bit set", mode); 6332 6333 /* 6334 * Check whether the configured mode is within the FEC capability. 6335 * If not, the configured mode will not be supported. 6336 */ 6337 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6338 if (!(cur_capa & mode)) { 6339 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6340 return -EINVAL; 6341 } 6342 6343 ret = hns3_set_fec_hw(hw, mode); 6344 if (ret) 6345 return ret; 6346 6347 pf->fec_mode = mode; 6348 return 0; 6349 } 6350 6351 static int 6352 hns3_restore_fec(struct hns3_hw *hw) 6353 { 6354 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6355 struct hns3_pf *pf = &hns->pf; 6356 uint32_t mode = pf->fec_mode; 6357 int ret; 6358 6359 ret = hns3_set_fec_hw(hw, mode); 6360 if (ret) 6361 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6362 mode, ret); 6363 6364 return ret; 6365 } 6366 6367 static int 6368 hns3_query_dev_fec_info(struct hns3_hw *hw) 6369 { 6370 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6371 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 6372 int ret; 6373 6374 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 6375 if (ret) 6376 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6377 6378 return ret; 6379 } 6380 6381 static bool 6382 hns3_optical_module_existed(struct hns3_hw *hw) 6383 { 6384 struct hns3_cmd_desc desc; 6385 bool existed; 6386 int ret; 6387 6388 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 6389 ret = hns3_cmd_send(hw, &desc, 1); 6390 if (ret) { 6391 hns3_err(hw, 6392 "fail to get optical module exist state, ret = %d.\n", 6393 ret); 6394 return false; 6395 } 6396 existed = !!desc.data[0]; 6397 6398 return existed; 6399 } 6400 6401 static int 6402 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 6403 uint32_t len, uint8_t *data) 6404 { 6405 #define HNS3_SFP_INFO_CMD_NUM 6 6406 #define HNS3_SFP_INFO_MAX_LEN \ 6407 (HNS3_SFP_INFO_BD0_LEN + \ 6408 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 6409 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 6410 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 6411 uint16_t read_len; 6412 uint16_t copy_len; 6413 int ret; 6414 int i; 6415 6416 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6417 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 6418 true); 6419 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 6420 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 6421 } 6422 6423 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 6424 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 6425 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 6426 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 6427 6428 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 6429 if (ret) { 6430 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 6431 ret); 6432 return ret; 6433 } 6434 6435 /* The data format in BD0 is different with the others. */ 6436 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 6437 memcpy(data, sfp_info_bd0->data, copy_len); 6438 read_len = copy_len; 6439 6440 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6441 if (read_len >= len) 6442 break; 6443 6444 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 6445 memcpy(data + read_len, desc[i].data, copy_len); 6446 read_len += copy_len; 6447 } 6448 6449 return (int)read_len; 6450 } 6451 6452 static int 6453 hns3_get_module_eeprom(struct rte_eth_dev *dev, 6454 struct rte_dev_eeprom_info *info) 6455 { 6456 struct hns3_adapter *hns = dev->data->dev_private; 6457 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6458 uint32_t offset = info->offset; 6459 uint32_t len = info->length; 6460 uint8_t *data = info->data; 6461 uint32_t read_len = 0; 6462 6463 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 6464 return -ENOTSUP; 6465 6466 if (!hns3_optical_module_existed(hw)) { 6467 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 6468 return -EIO; 6469 } 6470 6471 while (read_len < len) { 6472 int ret; 6473 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 6474 len - read_len, 6475 data + read_len); 6476 if (ret < 0) 6477 return -EIO; 6478 read_len += ret; 6479 } 6480 6481 return 0; 6482 } 6483 6484 static int 6485 hns3_get_module_info(struct rte_eth_dev *dev, 6486 struct rte_eth_dev_module_info *modinfo) 6487 { 6488 #define HNS3_SFF8024_ID_SFP 0x03 6489 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 6490 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 6491 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 6492 #define HNS3_SFF_8636_V1_3 0x03 6493 struct hns3_adapter *hns = dev->data->dev_private; 6494 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6495 struct rte_dev_eeprom_info info; 6496 struct hns3_sfp_type sfp_type; 6497 int ret; 6498 6499 memset(&sfp_type, 0, sizeof(sfp_type)); 6500 memset(&info, 0, sizeof(info)); 6501 info.data = (uint8_t *)&sfp_type; 6502 info.length = sizeof(sfp_type); 6503 ret = hns3_get_module_eeprom(dev, &info); 6504 if (ret) 6505 return ret; 6506 6507 switch (sfp_type.type) { 6508 case HNS3_SFF8024_ID_SFP: 6509 modinfo->type = RTE_ETH_MODULE_SFF_8472; 6510 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 6511 break; 6512 case HNS3_SFF8024_ID_QSFP_8438: 6513 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6514 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6515 break; 6516 case HNS3_SFF8024_ID_QSFP_8436_8636: 6517 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 6518 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6519 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6520 } else { 6521 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6522 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6523 } 6524 break; 6525 case HNS3_SFF8024_ID_QSFP28_8636: 6526 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6527 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6528 break; 6529 default: 6530 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 6531 sfp_type.type, sfp_type.ext_type); 6532 return -EINVAL; 6533 } 6534 6535 return 0; 6536 } 6537 6538 static int 6539 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 6540 { 6541 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 6542 6543 RTE_SET_USED(key); 6544 6545 if (strcmp(value, "vec") == 0) 6546 hint = HNS3_IO_FUNC_HINT_VEC; 6547 else if (strcmp(value, "sve") == 0) 6548 hint = HNS3_IO_FUNC_HINT_SVE; 6549 else if (strcmp(value, "simple") == 0) 6550 hint = HNS3_IO_FUNC_HINT_SIMPLE; 6551 else if (strcmp(value, "common") == 0) 6552 hint = HNS3_IO_FUNC_HINT_COMMON; 6553 6554 /* If the hint is valid then update output parameters */ 6555 if (hint != HNS3_IO_FUNC_HINT_NONE) 6556 *(uint32_t *)extra_args = hint; 6557 6558 return 0; 6559 } 6560 6561 static const char * 6562 hns3_get_io_hint_func_name(uint32_t hint) 6563 { 6564 switch (hint) { 6565 case HNS3_IO_FUNC_HINT_VEC: 6566 return "vec"; 6567 case HNS3_IO_FUNC_HINT_SVE: 6568 return "sve"; 6569 case HNS3_IO_FUNC_HINT_SIMPLE: 6570 return "simple"; 6571 case HNS3_IO_FUNC_HINT_COMMON: 6572 return "common"; 6573 default: 6574 return "none"; 6575 } 6576 } 6577 6578 void 6579 hns3_parse_devargs(struct rte_eth_dev *dev) 6580 { 6581 struct hns3_adapter *hns = dev->data->dev_private; 6582 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 6583 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 6584 struct hns3_hw *hw = &hns->hw; 6585 struct rte_kvargs *kvlist; 6586 6587 if (dev->device->devargs == NULL) 6588 return; 6589 6590 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 6591 if (!kvlist) 6592 return; 6593 6594 rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 6595 &hns3_parse_io_hint_func, &rx_func_hint); 6596 rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 6597 &hns3_parse_io_hint_func, &tx_func_hint); 6598 rte_kvargs_free(kvlist); 6599 6600 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 6601 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 6602 hns3_get_io_hint_func_name(rx_func_hint)); 6603 hns->rx_func_hint = rx_func_hint; 6604 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 6605 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 6606 hns3_get_io_hint_func_name(tx_func_hint)); 6607 hns->tx_func_hint = tx_func_hint; 6608 } 6609 6610 static const struct eth_dev_ops hns3_eth_dev_ops = { 6611 .dev_configure = hns3_dev_configure, 6612 .dev_start = hns3_dev_start, 6613 .dev_stop = hns3_dev_stop, 6614 .dev_close = hns3_dev_close, 6615 .promiscuous_enable = hns3_dev_promiscuous_enable, 6616 .promiscuous_disable = hns3_dev_promiscuous_disable, 6617 .allmulticast_enable = hns3_dev_allmulticast_enable, 6618 .allmulticast_disable = hns3_dev_allmulticast_disable, 6619 .mtu_set = hns3_dev_mtu_set, 6620 .stats_get = hns3_stats_get, 6621 .stats_reset = hns3_stats_reset, 6622 .xstats_get = hns3_dev_xstats_get, 6623 .xstats_get_names = hns3_dev_xstats_get_names, 6624 .xstats_reset = hns3_dev_xstats_reset, 6625 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 6626 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 6627 .dev_infos_get = hns3_dev_infos_get, 6628 .fw_version_get = hns3_fw_version_get, 6629 .rx_queue_setup = hns3_rx_queue_setup, 6630 .tx_queue_setup = hns3_tx_queue_setup, 6631 .rx_queue_release = hns3_dev_rx_queue_release, 6632 .tx_queue_release = hns3_dev_tx_queue_release, 6633 .rx_queue_start = hns3_dev_rx_queue_start, 6634 .rx_queue_stop = hns3_dev_rx_queue_stop, 6635 .tx_queue_start = hns3_dev_tx_queue_start, 6636 .tx_queue_stop = hns3_dev_tx_queue_stop, 6637 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 6638 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 6639 .rxq_info_get = hns3_rxq_info_get, 6640 .txq_info_get = hns3_txq_info_get, 6641 .rx_burst_mode_get = hns3_rx_burst_mode_get, 6642 .tx_burst_mode_get = hns3_tx_burst_mode_get, 6643 .flow_ctrl_get = hns3_flow_ctrl_get, 6644 .flow_ctrl_set = hns3_flow_ctrl_set, 6645 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 6646 .mac_addr_add = hns3_add_mac_addr, 6647 .mac_addr_remove = hns3_remove_mac_addr, 6648 .mac_addr_set = hns3_set_default_mac_addr, 6649 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 6650 .link_update = hns3_dev_link_update, 6651 .rss_hash_update = hns3_dev_rss_hash_update, 6652 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 6653 .reta_update = hns3_dev_rss_reta_update, 6654 .reta_query = hns3_dev_rss_reta_query, 6655 .flow_ops_get = hns3_dev_flow_ops_get, 6656 .vlan_filter_set = hns3_vlan_filter_set, 6657 .vlan_tpid_set = hns3_vlan_tpid_set, 6658 .vlan_offload_set = hns3_vlan_offload_set, 6659 .vlan_pvid_set = hns3_vlan_pvid_set, 6660 .get_reg = hns3_get_regs, 6661 .get_module_info = hns3_get_module_info, 6662 .get_module_eeprom = hns3_get_module_eeprom, 6663 .get_dcb_info = hns3_get_dcb_info, 6664 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 6665 .fec_get_capability = hns3_fec_get_capability, 6666 .fec_get = hns3_fec_get, 6667 .fec_set = hns3_fec_set, 6668 .tm_ops_get = hns3_tm_ops_get, 6669 .tx_done_cleanup = hns3_tx_done_cleanup, 6670 }; 6671 6672 static const struct hns3_reset_ops hns3_reset_ops = { 6673 .reset_service = hns3_reset_service, 6674 .stop_service = hns3_stop_service, 6675 .prepare_reset = hns3_prepare_reset, 6676 .wait_hardware_ready = hns3_wait_hardware_ready, 6677 .reinit_dev = hns3_reinit_dev, 6678 .restore_conf = hns3_restore_conf, 6679 .start_service = hns3_start_service, 6680 }; 6681 6682 static int 6683 hns3_dev_init(struct rte_eth_dev *eth_dev) 6684 { 6685 struct hns3_adapter *hns = eth_dev->data->dev_private; 6686 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 6687 struct rte_ether_addr *eth_addr; 6688 struct hns3_hw *hw = &hns->hw; 6689 int ret; 6690 6691 PMD_INIT_FUNC_TRACE(); 6692 6693 eth_dev->process_private = (struct hns3_process_private *) 6694 rte_zmalloc_socket("hns3_filter_list", 6695 sizeof(struct hns3_process_private), 6696 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 6697 if (eth_dev->process_private == NULL) { 6698 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 6699 return -ENOMEM; 6700 } 6701 /* initialize flow filter lists */ 6702 hns3_filterlist_init(eth_dev); 6703 6704 hns3_set_rxtx_function(eth_dev); 6705 eth_dev->dev_ops = &hns3_eth_dev_ops; 6706 eth_dev->rx_queue_count = hns3_rx_queue_count; 6707 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6708 ret = hns3_mp_init_secondary(); 6709 if (ret) { 6710 PMD_INIT_LOG(ERR, "Failed to init for secondary " 6711 "process, ret = %d", ret); 6712 goto err_mp_init_secondary; 6713 } 6714 6715 hw->secondary_cnt++; 6716 return 0; 6717 } 6718 6719 ret = hns3_mp_init_primary(); 6720 if (ret) { 6721 PMD_INIT_LOG(ERR, 6722 "Failed to init for primary process, ret = %d", 6723 ret); 6724 goto err_mp_init_primary; 6725 } 6726 6727 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 6728 hns->is_vf = false; 6729 hw->data = eth_dev->data; 6730 hns3_parse_devargs(eth_dev); 6731 6732 /* 6733 * Set default max packet size according to the mtu 6734 * default vale in DPDK frame. 6735 */ 6736 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 6737 6738 ret = hns3_reset_init(hw); 6739 if (ret) 6740 goto err_init_reset; 6741 hw->reset.ops = &hns3_reset_ops; 6742 6743 ret = hns3_init_pf(eth_dev); 6744 if (ret) { 6745 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 6746 goto err_init_pf; 6747 } 6748 6749 /* Allocate memory for storing MAC addresses */ 6750 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 6751 sizeof(struct rte_ether_addr) * 6752 HNS3_UC_MACADDR_NUM, 0); 6753 if (eth_dev->data->mac_addrs == NULL) { 6754 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 6755 "to store MAC addresses", 6756 sizeof(struct rte_ether_addr) * 6757 HNS3_UC_MACADDR_NUM); 6758 ret = -ENOMEM; 6759 goto err_rte_zmalloc; 6760 } 6761 6762 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 6763 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 6764 rte_eth_random_addr(hw->mac.mac_addr); 6765 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 6766 (struct rte_ether_addr *)hw->mac.mac_addr); 6767 hns3_warn(hw, "default mac_addr from firmware is an invalid " 6768 "unicast address, using random MAC address %s", 6769 mac_str); 6770 } 6771 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 6772 ð_dev->data->mac_addrs[0]); 6773 6774 hw->adapter_state = HNS3_NIC_INITIALIZED; 6775 6776 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6777 SCHEDULE_PENDING) { 6778 hns3_err(hw, "Reschedule reset service after dev_init"); 6779 hns3_schedule_reset(hns); 6780 } else { 6781 /* IMP will wait ready flag before reset */ 6782 hns3_notify_reset_ready(hw, false); 6783 } 6784 6785 hns3_info(hw, "hns3 dev initialization successful!"); 6786 return 0; 6787 6788 err_rte_zmalloc: 6789 hns3_uninit_pf(eth_dev); 6790 6791 err_init_pf: 6792 rte_free(hw->reset.wait_data); 6793 6794 err_init_reset: 6795 hns3_mp_uninit_primary(); 6796 6797 err_mp_init_primary: 6798 err_mp_init_secondary: 6799 eth_dev->dev_ops = NULL; 6800 eth_dev->rx_pkt_burst = NULL; 6801 eth_dev->rx_descriptor_status = NULL; 6802 eth_dev->tx_pkt_burst = NULL; 6803 eth_dev->tx_pkt_prepare = NULL; 6804 eth_dev->tx_descriptor_status = NULL; 6805 rte_free(eth_dev->process_private); 6806 eth_dev->process_private = NULL; 6807 return ret; 6808 } 6809 6810 static int 6811 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 6812 { 6813 struct hns3_adapter *hns = eth_dev->data->dev_private; 6814 struct hns3_hw *hw = &hns->hw; 6815 6816 PMD_INIT_FUNC_TRACE(); 6817 6818 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6819 rte_free(eth_dev->process_private); 6820 eth_dev->process_private = NULL; 6821 return 0; 6822 } 6823 6824 if (hw->adapter_state < HNS3_NIC_CLOSING) 6825 hns3_dev_close(eth_dev); 6826 6827 hw->adapter_state = HNS3_NIC_REMOVED; 6828 return 0; 6829 } 6830 6831 static int 6832 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6833 struct rte_pci_device *pci_dev) 6834 { 6835 return rte_eth_dev_pci_generic_probe(pci_dev, 6836 sizeof(struct hns3_adapter), 6837 hns3_dev_init); 6838 } 6839 6840 static int 6841 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 6842 { 6843 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 6844 } 6845 6846 static const struct rte_pci_id pci_id_hns3_map[] = { 6847 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 6848 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 6849 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 6850 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 6851 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 6852 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 6853 { .vendor_id = 0, }, /* sentinel */ 6854 }; 6855 6856 static struct rte_pci_driver rte_hns3_pmd = { 6857 .id_table = pci_id_hns3_map, 6858 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 6859 .probe = eth_hns3_pci_probe, 6860 .remove = eth_hns3_pci_remove, 6861 }; 6862 6863 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 6864 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 6865 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 6866 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 6867 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 6868 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "); 6869 RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); 6870 RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE); 6871