1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <bus_pci_driver.h> 7 #include <ethdev_pci.h> 8 9 #include "hns3_common.h" 10 #include "hns3_dump.h" 11 #include "hns3_logs.h" 12 #include "hns3_rxtx.h" 13 #include "hns3_intr.h" 14 #include "hns3_regs.h" 15 #include "hns3_dcb.h" 16 #include "hns3_mp.h" 17 #include "hns3_flow.h" 18 #include "hns3_ptp.h" 19 #include "hns3_ethdev.h" 20 21 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 22 #define HNS3_SERVICE_QUICK_INTERVAL 10 23 #define HNS3_INVALID_PVID 0xFFFF 24 25 #define HNS3_FILTER_TYPE_VF 0 26 #define HNS3_FILTER_TYPE_PORT 1 27 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 28 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 29 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 30 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 31 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 32 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_EGRESS_B) 34 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 35 | HNS3_FILTER_FE_ROCE_INGRESS_B) 36 37 /* Reset related Registers */ 38 #define HNS3_GLOBAL_RESET_BIT 0 39 #define HNS3_CORE_RESET_BIT 1 40 #define HNS3_IMP_RESET_BIT 2 41 #define HNS3_FUN_RST_ING_B 0 42 43 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 44 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 45 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 46 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 47 #define HNS3_VECTOR0_TRIGGER_IMP_RESET_B 7U 48 49 #define HNS3_RESET_WAIT_MS 100 50 #define HNS3_RESET_WAIT_CNT 200 51 52 enum hns3_evt_cause { 53 HNS3_VECTOR0_EVENT_RST, 54 HNS3_VECTOR0_EVENT_MBX, 55 HNS3_VECTOR0_EVENT_ERR, 56 HNS3_VECTOR0_EVENT_PTP, 57 HNS3_VECTOR0_EVENT_OTHER, 58 }; 59 60 struct hns3_intr_state { 61 uint32_t vector0_state; 62 uint32_t cmdq_state; 63 uint32_t hw_err_state; 64 }; 65 66 #define HNS3_SPEEDS_SUPP_FEC (RTE_ETH_LINK_SPEED_10G | \ 67 RTE_ETH_LINK_SPEED_25G | \ 68 RTE_ETH_LINK_SPEED_40G | \ 69 RTE_ETH_LINK_SPEED_50G | \ 70 RTE_ETH_LINK_SPEED_100G | \ 71 RTE_ETH_LINK_SPEED_200G) 72 73 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 74 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 75 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 76 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 77 78 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 80 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 81 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 82 83 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 84 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 85 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 86 87 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 88 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 89 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 90 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 91 92 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 93 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 94 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 95 96 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 97 RTE_ETH_FEC_MODE_CAPA_MASK(RS) | 98 RTE_ETH_FEC_MODE_CAPA_MASK(LLRS) } 99 }; 100 101 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 102 uint64_t *levels); 103 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 104 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 105 int on); 106 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 107 static bool hns3_update_link_status(struct hns3_hw *hw); 108 109 static int hns3_add_mc_mac_addr(struct hns3_hw *hw, 110 struct rte_ether_addr *mac_addr); 111 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw, 112 struct rte_ether_addr *mac_addr); 113 static int hns3_restore_fec(struct hns3_hw *hw); 114 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 115 static int hns3_do_stop(struct hns3_adapter *hns); 116 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 117 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 118 119 120 static void 121 hns3_pf_disable_irq0(struct hns3_hw *hw) 122 { 123 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 124 } 125 126 static void 127 hns3_pf_enable_irq0(struct hns3_hw *hw) 128 { 129 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 130 } 131 132 static enum hns3_evt_cause 133 hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) 134 { 135 struct hns3_hw *hw = &hns->hw; 136 137 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 138 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 139 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 140 hw->reset.stats.imp_cnt++; 141 hns3_warn(hw, "IMP reset detected, clear reset status"); 142 143 return HNS3_VECTOR0_EVENT_RST; 144 } 145 146 static enum hns3_evt_cause 147 hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) 148 { 149 struct hns3_hw *hw = &hns->hw; 150 151 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 152 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 153 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 154 hw->reset.stats.global_cnt++; 155 hns3_warn(hw, "Global reset detected, clear reset status"); 156 157 return HNS3_VECTOR0_EVENT_RST; 158 } 159 160 static void 161 hns3_query_intr_state(struct hns3_hw *hw, struct hns3_intr_state *state) 162 { 163 state->vector0_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 164 state->cmdq_state = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 165 state->hw_err_state = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 166 } 167 168 static enum hns3_evt_cause 169 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 170 { 171 struct hns3_hw *hw = &hns->hw; 172 struct hns3_intr_state state; 173 uint32_t val; 174 enum hns3_evt_cause ret; 175 176 hns3_query_intr_state(hw, &state); 177 178 /* 179 * Assumption: If by any chance reset and mailbox events are reported 180 * together then we will only process reset event and defer the 181 * processing of the mailbox events. Since, we would have not cleared 182 * RX CMDQ event this time we would receive again another interrupt 183 * from H/W just for the mailbox. 184 */ 185 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & state.vector0_state) { /* IMP */ 186 ret = hns3_proc_imp_reset_event(hns, &val); 187 goto out; 188 } 189 190 /* Global reset */ 191 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & state.vector0_state) { 192 ret = hns3_proc_global_reset_event(hns, &val); 193 goto out; 194 } 195 196 /* Check for vector0 1588 event source */ 197 if (BIT(HNS3_VECTOR0_1588_INT_B) & state.vector0_state) { 198 val = BIT(HNS3_VECTOR0_1588_INT_B); 199 ret = HNS3_VECTOR0_EVENT_PTP; 200 goto out; 201 } 202 203 /* check for vector0 msix event source */ 204 if (state.vector0_state & HNS3_VECTOR0_REG_MSIX_MASK || 205 state.hw_err_state & HNS3_RAS_REG_NFE_MASK) { 206 val = state.vector0_state | state.hw_err_state; 207 ret = HNS3_VECTOR0_EVENT_ERR; 208 goto out; 209 } 210 211 /* check for vector0 mailbox(=CMDQ RX) event source */ 212 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & state.cmdq_state) { 213 state.cmdq_state &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 214 val = state.cmdq_state; 215 ret = HNS3_VECTOR0_EVENT_MBX; 216 goto out; 217 } 218 219 val = state.vector0_state; 220 ret = HNS3_VECTOR0_EVENT_OTHER; 221 222 out: 223 *clearval = val; 224 return ret; 225 } 226 227 void 228 hns3_clear_reset_event(struct hns3_hw *hw) 229 { 230 uint32_t clearval = 0; 231 232 switch (hw->reset.level) { 233 case HNS3_IMP_RESET: 234 clearval = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 235 break; 236 case HNS3_GLOBAL_RESET: 237 clearval = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 238 break; 239 default: 240 break; 241 } 242 243 if (clearval == 0) 244 return; 245 246 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, clearval); 247 248 hns3_pf_enable_irq0(hw); 249 } 250 251 static void 252 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 253 { 254 if (event_type == HNS3_VECTOR0_EVENT_RST || 255 event_type == HNS3_VECTOR0_EVENT_PTP) 256 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 257 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 258 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 259 } 260 261 static void 262 hns3_clear_all_event_cause(struct hns3_hw *hw) 263 { 264 uint32_t vector0_int_stats; 265 266 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 267 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 268 hns3_warn(hw, "Probe during IMP reset interrupt"); 269 270 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 271 hns3_warn(hw, "Probe during Global reset interrupt"); 272 273 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 274 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 275 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 276 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 277 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 278 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 279 BIT(HNS3_VECTOR0_1588_INT_B)); 280 } 281 282 static void 283 hns3_handle_mac_tnl(struct hns3_hw *hw) 284 { 285 struct hns3_cmd_desc desc; 286 uint32_t status; 287 int ret; 288 289 /* query and clear mac tnl interrupt */ 290 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 291 ret = hns3_cmd_send(hw, &desc, 1); 292 if (ret) { 293 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 294 return; 295 } 296 297 status = rte_le_to_cpu_32(desc.data[0]); 298 if (status) { 299 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 300 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 301 false); 302 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 303 ret = hns3_cmd_send(hw, &desc, 1); 304 if (ret) 305 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 306 ret); 307 } 308 } 309 310 static void 311 hns3_delay_before_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 312 { 313 #define IMPRESET_WAIT_MS_TIME 5 314 315 if (event_type == HNS3_VECTOR0_EVENT_RST && 316 regclr & BIT(HNS3_VECTOR0_IMPRESET_INT_B) && 317 hw->revision >= PCI_REVISION_ID_HIP09_A) { 318 rte_delay_ms(IMPRESET_WAIT_MS_TIME); 319 hns3_dbg(hw, "wait firmware watchdog initialization completed."); 320 } 321 } 322 323 static bool 324 hns3_reset_event_valid(struct hns3_hw *hw) 325 { 326 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 327 enum hns3_reset_level new_req = HNS3_NONE_RESET; 328 enum hns3_reset_level last_req; 329 uint32_t vector0_int; 330 331 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 332 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int) 333 new_req = HNS3_IMP_RESET; 334 else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int) 335 new_req = HNS3_GLOBAL_RESET; 336 if (new_req == HNS3_NONE_RESET) 337 return true; 338 339 last_req = hns3_get_reset_level(hns, &hw->reset.pending); 340 if (last_req == HNS3_NONE_RESET) 341 return true; 342 343 if (new_req > last_req) 344 return true; 345 346 hns3_warn(hw, "last_req (%u) less than or equal to new_req (%u) ignore", 347 last_req, new_req); 348 return false; 349 } 350 351 static void 352 hns3_interrupt_handler(void *param) 353 { 354 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 355 struct hns3_adapter *hns = dev->data->dev_private; 356 struct hns3_hw *hw = &hns->hw; 357 enum hns3_evt_cause event_cause; 358 struct hns3_intr_state state; 359 uint32_t clearval = 0; 360 361 if (!hns3_reset_event_valid(hw)) 362 return; 363 364 /* Disable interrupt */ 365 hns3_pf_disable_irq0(hw); 366 367 event_cause = hns3_check_event_cause(hns, &clearval); 368 hns3_query_intr_state(hw, &state); 369 hns3_delay_before_clear_event_cause(hw, event_cause, clearval); 370 hns3_clear_event_cause(hw, event_cause, clearval); 371 /* vector 0 interrupt is shared with reset and mailbox source events. */ 372 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 373 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 374 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 375 state.vector0_state, state.hw_err_state, 376 state.cmdq_state); 377 hns3_handle_mac_tnl(hw); 378 hns3_handle_error(hns); 379 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 380 hns3_warn(hw, "received reset interrupt"); 381 hns3_schedule_reset(hns); 382 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 383 hns3pf_handle_mbx_msg(hw); 384 } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { 385 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 386 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 387 state.vector0_state, state.hw_err_state, 388 state.cmdq_state); 389 } 390 391 /* Enable interrupt if it is not cause by reset */ 392 if (event_cause == HNS3_VECTOR0_EVENT_ERR || 393 event_cause == HNS3_VECTOR0_EVENT_MBX || 394 event_cause == HNS3_VECTOR0_EVENT_PTP || 395 event_cause == HNS3_VECTOR0_EVENT_OTHER) 396 hns3_pf_enable_irq0(hw); 397 } 398 399 static int 400 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 401 { 402 #define HNS3_VLAN_ID_OFFSET_STEP 160 403 #define HNS3_VLAN_BYTE_SIZE 8 404 struct hns3_vlan_filter_pf_cfg_cmd *req; 405 struct hns3_hw *hw = &hns->hw; 406 uint8_t vlan_offset_byte_val; 407 struct hns3_cmd_desc desc; 408 uint8_t vlan_offset_byte; 409 uint8_t vlan_offset_base; 410 int ret; 411 412 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 413 414 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 415 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 416 HNS3_VLAN_BYTE_SIZE; 417 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 418 419 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 420 req->vlan_offset = vlan_offset_base; 421 req->vlan_cfg = on ? 0 : 1; 422 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 423 424 ret = hns3_cmd_send(hw, &desc, 1); 425 if (ret) 426 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 427 vlan_id, ret); 428 429 return ret; 430 } 431 432 static void 433 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 434 { 435 struct hns3_user_vlan_table *vlan_entry; 436 struct hns3_pf *pf = &hns->pf; 437 438 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 439 if (vlan_entry->vlan_id == vlan_id) { 440 if (vlan_entry->hd_tbl_status) 441 hns3_set_port_vlan_filter(hns, vlan_id, 0); 442 LIST_REMOVE(vlan_entry, next); 443 rte_free(vlan_entry); 444 break; 445 } 446 } 447 } 448 449 static void 450 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 451 bool writen_to_tbl) 452 { 453 struct hns3_user_vlan_table *vlan_entry; 454 struct hns3_hw *hw = &hns->hw; 455 struct hns3_pf *pf = &hns->pf; 456 457 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 458 if (vlan_entry->vlan_id == vlan_id) 459 return; 460 } 461 462 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 463 if (vlan_entry == NULL) { 464 hns3_err(hw, "Failed to malloc hns3 vlan table"); 465 return; 466 } 467 468 vlan_entry->hd_tbl_status = writen_to_tbl; 469 vlan_entry->vlan_id = vlan_id; 470 471 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 472 } 473 474 static int 475 hns3_restore_vlan_table(struct hns3_adapter *hns) 476 { 477 struct hns3_user_vlan_table *vlan_entry; 478 struct hns3_hw *hw = &hns->hw; 479 struct hns3_pf *pf = &hns->pf; 480 uint16_t vlan_id; 481 int ret = 0; 482 483 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 484 return hns3_vlan_pvid_configure(hns, 485 hw->port_base_vlan_cfg.pvid, 1); 486 487 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 488 if (vlan_entry->hd_tbl_status) { 489 vlan_id = vlan_entry->vlan_id; 490 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 491 if (ret) 492 break; 493 } 494 } 495 496 return ret; 497 } 498 499 static int 500 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 501 { 502 struct hns3_hw *hw = &hns->hw; 503 bool writen_to_tbl = false; 504 int ret = 0; 505 506 /* 507 * When vlan filter is enabled, hardware regards packets without vlan 508 * as packets with vlan 0. So, to receive packets without vlan, vlan id 509 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 510 */ 511 if (on == 0 && vlan_id == 0) 512 return 0; 513 514 /* 515 * When port base vlan enabled, we use port base vlan as the vlan 516 * filter condition. In this case, we don't update vlan filter table 517 * when user add new vlan or remove exist vlan, just update the 518 * vlan list. The vlan id in vlan list will be written in vlan filter 519 * table until port base vlan disabled 520 */ 521 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 522 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 523 writen_to_tbl = true; 524 } 525 526 if (ret == 0) { 527 if (on) 528 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 529 else 530 hns3_rm_dev_vlan_table(hns, vlan_id); 531 } 532 return ret; 533 } 534 535 static int 536 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 537 { 538 struct hns3_adapter *hns = dev->data->dev_private; 539 struct hns3_hw *hw = &hns->hw; 540 int ret; 541 542 rte_spinlock_lock(&hw->lock); 543 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 544 rte_spinlock_unlock(&hw->lock); 545 return ret; 546 } 547 548 static int 549 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 550 uint16_t tpid) 551 { 552 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 553 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 554 struct hns3_hw *hw = &hns->hw; 555 struct hns3_cmd_desc desc; 556 int ret; 557 558 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && 559 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) { 560 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 561 return -EINVAL; 562 } 563 564 if (tpid != RTE_ETHER_TYPE_VLAN) { 565 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 566 return -EINVAL; 567 } 568 569 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 570 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 571 572 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 573 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 574 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 575 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 576 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 577 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 578 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 579 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 580 } 581 582 ret = hns3_cmd_send(hw, &desc, 1); 583 if (ret) { 584 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 585 ret); 586 return ret; 587 } 588 589 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 590 591 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 592 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 593 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 594 595 ret = hns3_cmd_send(hw, &desc, 1); 596 if (ret) 597 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 598 ret); 599 return ret; 600 } 601 602 static int 603 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 604 uint16_t tpid) 605 { 606 struct hns3_adapter *hns = dev->data->dev_private; 607 struct hns3_hw *hw = &hns->hw; 608 int ret; 609 610 rte_spinlock_lock(&hw->lock); 611 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 612 rte_spinlock_unlock(&hw->lock); 613 return ret; 614 } 615 616 static int 617 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 618 struct hns3_rx_vtag_cfg *vcfg) 619 { 620 struct hns3_vport_vtag_rx_cfg_cmd *req; 621 struct hns3_hw *hw = &hns->hw; 622 struct hns3_cmd_desc desc; 623 uint16_t vport_id; 624 uint8_t bitmap; 625 int ret; 626 627 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 628 629 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 630 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 631 vcfg->strip_tag1_en ? 1 : 0); 632 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 633 vcfg->strip_tag2_en ? 1 : 0); 634 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 635 vcfg->vlan1_vlan_prionly ? 1 : 0); 636 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 637 vcfg->vlan2_vlan_prionly ? 1 : 0); 638 639 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 640 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 641 vcfg->strip_tag1_discard_en ? 1 : 0); 642 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 643 vcfg->strip_tag2_discard_en ? 1 : 0); 644 /* 645 * In current version VF is not supported when PF is driven by DPDK 646 * driver, just need to configure parameters for PF vport. 647 */ 648 vport_id = HNS3_PF_FUNC_ID; 649 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 650 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 651 req->vf_bitmap[req->vf_offset] = bitmap; 652 653 ret = hns3_cmd_send(hw, &desc, 1); 654 if (ret) 655 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 656 return ret; 657 } 658 659 static int 660 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 661 { 662 struct hns3_rx_vtag_cfg rxvlan_cfg; 663 struct hns3_hw *hw = &hns->hw; 664 int ret; 665 666 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 667 rxvlan_cfg.strip_tag1_en = false; 668 rxvlan_cfg.strip_tag2_en = enable; 669 rxvlan_cfg.strip_tag2_discard_en = false; 670 } else { 671 rxvlan_cfg.strip_tag1_en = enable; 672 rxvlan_cfg.strip_tag2_en = true; 673 rxvlan_cfg.strip_tag2_discard_en = true; 674 } 675 676 rxvlan_cfg.strip_tag1_discard_en = false; 677 rxvlan_cfg.vlan1_vlan_prionly = false; 678 rxvlan_cfg.vlan2_vlan_prionly = false; 679 rxvlan_cfg.rx_vlan_offload_en = enable; 680 681 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 682 if (ret) { 683 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 684 enable ? "enable" : "disable", ret); 685 return ret; 686 } 687 688 memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg, 689 sizeof(struct hns3_rx_vtag_cfg)); 690 691 return ret; 692 } 693 694 static int 695 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 696 uint8_t fe_type, bool filter_en, uint8_t vf_id) 697 { 698 struct hns3_vlan_filter_ctrl_cmd *req; 699 struct hns3_cmd_desc desc; 700 int ret; 701 702 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 703 704 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 705 req->vlan_type = vlan_type; 706 req->vlan_fe = filter_en ? fe_type : 0; 707 req->vf_id = vf_id; 708 709 ret = hns3_cmd_send(hw, &desc, 1); 710 if (ret) 711 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 712 713 return ret; 714 } 715 716 static int 717 hns3_vlan_filter_init(struct hns3_adapter *hns) 718 { 719 struct hns3_hw *hw = &hns->hw; 720 int ret; 721 722 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 723 HNS3_FILTER_FE_EGRESS, false, 724 HNS3_PF_FUNC_ID); 725 if (ret) { 726 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 727 return ret; 728 } 729 730 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 731 HNS3_FILTER_FE_INGRESS, false, 732 HNS3_PF_FUNC_ID); 733 if (ret) 734 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 735 736 return ret; 737 } 738 739 static int 740 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 741 { 742 struct hns3_hw *hw = &hns->hw; 743 int ret; 744 745 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 746 HNS3_FILTER_FE_INGRESS, enable, 747 HNS3_PF_FUNC_ID); 748 if (ret) 749 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 750 enable ? "enable" : "disable", ret); 751 752 return ret; 753 } 754 755 static int 756 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 757 { 758 struct hns3_adapter *hns = dev->data->dev_private; 759 struct hns3_hw *hw = &hns->hw; 760 struct rte_eth_rxmode *rxmode; 761 unsigned int tmp_mask; 762 bool enable; 763 int ret = 0; 764 765 rte_spinlock_lock(&hw->lock); 766 rxmode = &dev->data->dev_conf.rxmode; 767 tmp_mask = (unsigned int)mask; 768 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { 769 /* ignore vlan filter configuration during promiscuous mode */ 770 if (!dev->data->promiscuous) { 771 /* Enable or disable VLAN filter */ 772 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? 773 true : false; 774 775 ret = hns3_enable_vlan_filter(hns, enable); 776 if (ret) { 777 rte_spinlock_unlock(&hw->lock); 778 hns3_err(hw, "failed to %s rx filter, ret = %d", 779 enable ? "enable" : "disable", ret); 780 return ret; 781 } 782 } 783 } 784 785 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { 786 /* Enable or disable VLAN stripping */ 787 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? 788 true : false; 789 790 ret = hns3_en_hw_strip_rxvtag(hns, enable); 791 if (ret) { 792 rte_spinlock_unlock(&hw->lock); 793 hns3_err(hw, "failed to %s rx strip, ret = %d", 794 enable ? "enable" : "disable", ret); 795 return ret; 796 } 797 } 798 799 rte_spinlock_unlock(&hw->lock); 800 801 return ret; 802 } 803 804 static int 805 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 806 struct hns3_tx_vtag_cfg *vcfg) 807 { 808 struct hns3_vport_vtag_tx_cfg_cmd *req; 809 struct hns3_cmd_desc desc; 810 struct hns3_hw *hw = &hns->hw; 811 uint16_t vport_id; 812 uint8_t bitmap; 813 int ret; 814 815 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 816 817 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 818 req->def_vlan_tag1 = vcfg->default_tag1; 819 req->def_vlan_tag2 = vcfg->default_tag2; 820 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 821 vcfg->accept_tag1 ? 1 : 0); 822 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 823 vcfg->accept_untag1 ? 1 : 0); 824 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 825 vcfg->accept_tag2 ? 1 : 0); 826 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 827 vcfg->accept_untag2 ? 1 : 0); 828 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 829 vcfg->insert_tag1_en ? 1 : 0); 830 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 831 vcfg->insert_tag2_en ? 1 : 0); 832 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 833 834 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 835 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 836 vcfg->tag_shift_mode_en ? 1 : 0); 837 838 /* 839 * In current version VF is not supported when PF is driven by DPDK 840 * driver, just need to configure parameters for PF vport. 841 */ 842 vport_id = HNS3_PF_FUNC_ID; 843 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 844 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 845 req->vf_bitmap[req->vf_offset] = bitmap; 846 847 ret = hns3_cmd_send(hw, &desc, 1); 848 if (ret) 849 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 850 851 return ret; 852 } 853 854 static int 855 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 856 uint16_t pvid) 857 { 858 struct hns3_hw *hw = &hns->hw; 859 struct hns3_tx_vtag_cfg txvlan_cfg; 860 int ret; 861 862 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 863 txvlan_cfg.accept_tag1 = true; 864 txvlan_cfg.insert_tag1_en = false; 865 txvlan_cfg.default_tag1 = 0; 866 } else { 867 txvlan_cfg.accept_tag1 = 868 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 869 txvlan_cfg.insert_tag1_en = true; 870 txvlan_cfg.default_tag1 = pvid; 871 } 872 873 txvlan_cfg.accept_untag1 = true; 874 txvlan_cfg.accept_tag2 = true; 875 txvlan_cfg.accept_untag2 = true; 876 txvlan_cfg.insert_tag2_en = false; 877 txvlan_cfg.default_tag2 = 0; 878 txvlan_cfg.tag_shift_mode_en = true; 879 880 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 881 if (ret) { 882 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 883 ret); 884 return ret; 885 } 886 887 memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg, 888 sizeof(struct hns3_tx_vtag_cfg)); 889 890 return ret; 891 } 892 893 894 static void 895 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 896 { 897 struct hns3_user_vlan_table *vlan_entry; 898 struct hns3_pf *pf = &hns->pf; 899 900 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 901 if (vlan_entry->hd_tbl_status) { 902 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 903 vlan_entry->hd_tbl_status = false; 904 } 905 } 906 907 if (is_del_list) { 908 vlan_entry = LIST_FIRST(&pf->vlan_list); 909 while (vlan_entry) { 910 LIST_REMOVE(vlan_entry, next); 911 rte_free(vlan_entry); 912 vlan_entry = LIST_FIRST(&pf->vlan_list); 913 } 914 } 915 } 916 917 static void 918 hns3_add_all_vlan_table(struct hns3_adapter *hns) 919 { 920 struct hns3_user_vlan_table *vlan_entry; 921 struct hns3_pf *pf = &hns->pf; 922 923 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 924 if (!vlan_entry->hd_tbl_status) { 925 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 926 vlan_entry->hd_tbl_status = true; 927 } 928 } 929 } 930 931 static void 932 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 933 { 934 struct hns3_hw *hw = &hns->hw; 935 int ret; 936 937 hns3_rm_all_vlan_table(hns, true); 938 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 939 ret = hns3_set_port_vlan_filter(hns, 940 hw->port_base_vlan_cfg.pvid, 0); 941 if (ret) { 942 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 943 ret); 944 return; 945 } 946 } 947 } 948 949 static int 950 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 951 uint16_t port_base_vlan_state, uint16_t new_pvid) 952 { 953 struct hns3_hw *hw = &hns->hw; 954 uint16_t old_pvid; 955 int ret; 956 957 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 958 old_pvid = hw->port_base_vlan_cfg.pvid; 959 if (old_pvid != HNS3_INVALID_PVID) { 960 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 961 if (ret) { 962 hns3_err(hw, "failed to remove old pvid %u, " 963 "ret = %d", old_pvid, ret); 964 return ret; 965 } 966 } 967 968 hns3_rm_all_vlan_table(hns, false); 969 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 970 if (ret) { 971 hns3_err(hw, "failed to add new pvid %u, ret = %d", 972 new_pvid, ret); 973 return ret; 974 } 975 } else { 976 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 977 if (ret) { 978 hns3_err(hw, "failed to remove pvid %u, ret = %d", 979 new_pvid, ret); 980 return ret; 981 } 982 983 hns3_add_all_vlan_table(hns); 984 } 985 return 0; 986 } 987 988 static int 989 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 990 { 991 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 992 struct hns3_rx_vtag_cfg rx_vlan_cfg; 993 bool rx_strip_en; 994 int ret; 995 996 rx_strip_en = old_cfg->rx_vlan_offload_en; 997 if (on) { 998 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 999 rx_vlan_cfg.strip_tag2_en = true; 1000 rx_vlan_cfg.strip_tag2_discard_en = true; 1001 } else { 1002 rx_vlan_cfg.strip_tag1_en = false; 1003 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 1004 rx_vlan_cfg.strip_tag2_discard_en = false; 1005 } 1006 rx_vlan_cfg.strip_tag1_discard_en = false; 1007 rx_vlan_cfg.vlan1_vlan_prionly = false; 1008 rx_vlan_cfg.vlan2_vlan_prionly = false; 1009 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 1010 1011 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 1012 if (ret) 1013 return ret; 1014 1015 memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg, 1016 sizeof(struct hns3_rx_vtag_cfg)); 1017 1018 return ret; 1019 } 1020 1021 static int 1022 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 1023 { 1024 struct hns3_hw *hw = &hns->hw; 1025 uint16_t port_base_vlan_state; 1026 int ret, err; 1027 1028 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 1029 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 1030 hns3_warn(hw, "Invalid operation! As current pvid set " 1031 "is %u, disable pvid %u is invalid", 1032 hw->port_base_vlan_cfg.pvid, pvid); 1033 return 0; 1034 } 1035 1036 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 1037 HNS3_PORT_BASE_VLAN_DISABLE; 1038 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 1039 if (ret) { 1040 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 1041 ret); 1042 return ret; 1043 } 1044 1045 ret = hns3_en_pvid_strip(hns, on); 1046 if (ret) { 1047 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1048 "ret = %d", ret); 1049 goto pvid_vlan_strip_fail; 1050 } 1051 1052 if (pvid == HNS3_INVALID_PVID) 1053 goto out; 1054 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1055 if (ret) { 1056 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1057 ret); 1058 goto vlan_filter_set_fail; 1059 } 1060 1061 out: 1062 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1063 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1064 return ret; 1065 1066 vlan_filter_set_fail: 1067 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1068 HNS3_PORT_BASE_VLAN_ENABLE); 1069 if (err) 1070 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1071 1072 pvid_vlan_strip_fail: 1073 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1074 hw->port_base_vlan_cfg.pvid); 1075 if (err) 1076 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1077 1078 return ret; 1079 } 1080 1081 static int 1082 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1083 { 1084 struct hns3_adapter *hns = dev->data->dev_private; 1085 struct hns3_hw *hw = &hns->hw; 1086 bool pvid_en_state_change; 1087 uint16_t pvid_state; 1088 int ret; 1089 1090 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1091 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1092 RTE_ETHER_MAX_VLAN_ID); 1093 return -EINVAL; 1094 } 1095 1096 /* 1097 * If PVID configuration state change, should refresh the PVID 1098 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1099 */ 1100 pvid_state = hw->port_base_vlan_cfg.state; 1101 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1102 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1103 pvid_en_state_change = false; 1104 else 1105 pvid_en_state_change = true; 1106 1107 rte_spinlock_lock(&hw->lock); 1108 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1109 rte_spinlock_unlock(&hw->lock); 1110 if (ret) 1111 return ret; 1112 /* 1113 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1114 * need be processed by PMD. 1115 */ 1116 if (pvid_en_state_change && 1117 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1118 hns3_update_all_queues_pvid_proc_en(hw); 1119 1120 return 0; 1121 } 1122 1123 static int 1124 hns3_default_vlan_config(struct hns3_adapter *hns) 1125 { 1126 struct hns3_hw *hw = &hns->hw; 1127 int ret; 1128 1129 /* 1130 * When vlan filter is enabled, hardware regards packets without vlan 1131 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1132 * table, packets without vlan won't be received. So, add vlan 0 as 1133 * the default vlan. 1134 */ 1135 ret = hns3_vlan_filter_configure(hns, 0, 1); 1136 if (ret) 1137 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 static int 1142 hns3_init_vlan_config(struct hns3_adapter *hns) 1143 { 1144 struct hns3_hw *hw = &hns->hw; 1145 int ret; 1146 1147 /* 1148 * This function can be called in the initialization and reset process, 1149 * when in reset process, it means that hardware had been reseted 1150 * successfully and we need to restore the hardware configuration to 1151 * ensure that the hardware configuration remains unchanged before and 1152 * after reset. 1153 */ 1154 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1155 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1156 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1157 } 1158 1159 ret = hns3_vlan_filter_init(hns); 1160 if (ret) { 1161 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1162 return ret; 1163 } 1164 1165 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER, 1166 RTE_ETHER_TYPE_VLAN); 1167 if (ret) { 1168 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1169 return ret; 1170 } 1171 1172 /* 1173 * When in the reinit dev stage of the reset process, the following 1174 * vlan-related configurations may differ from those at initialization, 1175 * we will restore configurations to hardware in hns3_restore_vlan_table 1176 * and hns3_restore_vlan_conf later. 1177 */ 1178 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1179 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1180 if (ret) { 1181 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1182 return ret; 1183 } 1184 1185 ret = hns3_en_hw_strip_rxvtag(hns, false); 1186 if (ret) { 1187 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1188 ret); 1189 return ret; 1190 } 1191 } 1192 1193 return hns3_default_vlan_config(hns); 1194 } 1195 1196 static int 1197 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1198 { 1199 struct hns3_pf *pf = &hns->pf; 1200 struct hns3_hw *hw = &hns->hw; 1201 uint64_t offloads; 1202 bool enable; 1203 int ret; 1204 1205 if (!hw->data->promiscuous) { 1206 /* restore vlan filter states */ 1207 offloads = hw->data->dev_conf.rxmode.offloads; 1208 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false; 1209 ret = hns3_enable_vlan_filter(hns, enable); 1210 if (ret) { 1211 hns3_err(hw, "failed to restore vlan rx filter conf, " 1212 "ret = %d", ret); 1213 return ret; 1214 } 1215 } 1216 1217 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1218 if (ret) { 1219 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1220 return ret; 1221 } 1222 1223 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1224 if (ret) 1225 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1226 1227 return ret; 1228 } 1229 1230 static int 1231 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1232 { 1233 struct hns3_adapter *hns = dev->data->dev_private; 1234 struct rte_eth_dev_data *data = dev->data; 1235 struct rte_eth_txmode *txmode; 1236 struct hns3_hw *hw = &hns->hw; 1237 int mask; 1238 int ret; 1239 1240 txmode = &data->dev_conf.txmode; 1241 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1242 hns3_warn(hw, 1243 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1244 "configuration is not supported! Ignore these two " 1245 "parameters: hw_vlan_reject_tagged(%u), " 1246 "hw_vlan_reject_untagged(%u)", 1247 txmode->hw_vlan_reject_tagged, 1248 txmode->hw_vlan_reject_untagged); 1249 1250 /* Apply vlan offload setting */ 1251 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK; 1252 ret = hns3_vlan_offload_set(dev, mask); 1253 if (ret) { 1254 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1255 ret); 1256 return ret; 1257 } 1258 1259 /* 1260 * If pvid config is not set in rte_eth_conf, driver needn't to set 1261 * VLAN pvid related configuration to hardware. 1262 */ 1263 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1264 return 0; 1265 1266 /* Apply pvid setting */ 1267 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1268 txmode->hw_vlan_insert_pvid); 1269 if (ret) 1270 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1271 txmode->pvid, ret); 1272 1273 return ret; 1274 } 1275 1276 static int 1277 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1278 unsigned int tso_mss_max) 1279 { 1280 struct hns3_cfg_tso_status_cmd *req; 1281 struct hns3_cmd_desc desc; 1282 uint16_t tso_mss; 1283 1284 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1285 1286 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1287 1288 tso_mss = 0; 1289 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1290 tso_mss_min); 1291 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1292 1293 tso_mss = 0; 1294 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1295 tso_mss_max); 1296 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1297 1298 return hns3_cmd_send(hw, &desc, 1); 1299 } 1300 1301 static int 1302 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1303 uint16_t *allocated_size, bool is_alloc) 1304 { 1305 struct hns3_umv_spc_alc_cmd *req; 1306 struct hns3_cmd_desc desc; 1307 int ret; 1308 1309 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1310 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1311 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1312 req->space_size = rte_cpu_to_le_32(space_size); 1313 1314 ret = hns3_cmd_send(hw, &desc, 1); 1315 if (ret) { 1316 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1317 is_alloc ? "allocate" : "free", ret); 1318 return ret; 1319 } 1320 1321 if (is_alloc && allocated_size) 1322 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1323 1324 return 0; 1325 } 1326 1327 static int 1328 hns3_init_umv_space(struct hns3_hw *hw) 1329 { 1330 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1331 struct hns3_pf *pf = &hns->pf; 1332 uint16_t allocated_size = 0; 1333 int ret; 1334 1335 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1336 true); 1337 if (ret) 1338 return ret; 1339 1340 if (allocated_size < pf->wanted_umv_size) 1341 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1342 pf->wanted_umv_size, allocated_size); 1343 1344 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1345 pf->wanted_umv_size; 1346 pf->used_umv_size = 0; 1347 return 0; 1348 } 1349 1350 static int 1351 hns3_uninit_umv_space(struct hns3_hw *hw) 1352 { 1353 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1354 struct hns3_pf *pf = &hns->pf; 1355 int ret; 1356 1357 if (pf->max_umv_size == 0) 1358 return 0; 1359 1360 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1361 if (ret) 1362 return ret; 1363 1364 pf->max_umv_size = 0; 1365 1366 return 0; 1367 } 1368 1369 static bool 1370 hns3_is_umv_space_full(struct hns3_hw *hw) 1371 { 1372 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1373 struct hns3_pf *pf = &hns->pf; 1374 bool is_full; 1375 1376 is_full = (pf->used_umv_size >= pf->max_umv_size); 1377 1378 return is_full; 1379 } 1380 1381 static void 1382 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1383 { 1384 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1385 struct hns3_pf *pf = &hns->pf; 1386 1387 if (is_free) { 1388 if (pf->used_umv_size > 0) 1389 pf->used_umv_size--; 1390 } else 1391 pf->used_umv_size++; 1392 } 1393 1394 static void 1395 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1396 const uint8_t *addr, bool is_mc) 1397 { 1398 const unsigned char *mac_addr = addr; 1399 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1400 ((uint32_t)mac_addr[2] << 16) | 1401 ((uint32_t)mac_addr[1] << 8) | 1402 (uint32_t)mac_addr[0]; 1403 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1404 1405 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1406 if (is_mc) { 1407 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1408 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1409 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1410 } 1411 1412 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1413 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1414 } 1415 1416 static int 1417 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1418 uint8_t resp_code, 1419 enum hns3_mac_vlan_tbl_opcode op) 1420 { 1421 if (cmdq_resp) { 1422 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1423 cmdq_resp); 1424 return -EIO; 1425 } 1426 1427 if (op == HNS3_MAC_VLAN_ADD) { 1428 if (resp_code == 0 || resp_code == 1) { 1429 return 0; 1430 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1431 hns3_err(hw, "add mac addr failed for uc_overflow"); 1432 return -ENOSPC; 1433 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1434 hns3_err(hw, "add mac addr failed for mc_overflow"); 1435 return -ENOSPC; 1436 } 1437 1438 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1439 resp_code); 1440 return -EIO; 1441 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1442 if (resp_code == 0) { 1443 return 0; 1444 } else if (resp_code == 1) { 1445 hns3_dbg(hw, "remove mac addr failed for miss"); 1446 return -ENOENT; 1447 } 1448 1449 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1450 resp_code); 1451 return -EIO; 1452 } else if (op == HNS3_MAC_VLAN_LKUP) { 1453 if (resp_code == 0) { 1454 return 0; 1455 } else if (resp_code == 1) { 1456 hns3_dbg(hw, "lookup mac addr failed for miss"); 1457 return -ENOENT; 1458 } 1459 1460 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1461 resp_code); 1462 return -EIO; 1463 } 1464 1465 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1466 op); 1467 1468 return -EINVAL; 1469 } 1470 1471 static int 1472 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1473 struct hns3_mac_vlan_tbl_entry_cmd *req, 1474 struct hns3_cmd_desc *desc, uint8_t desc_num) 1475 { 1476 uint8_t resp_code; 1477 uint16_t retval; 1478 int ret; 1479 int i; 1480 1481 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { 1482 for (i = 0; i < desc_num - 1; i++) { 1483 hns3_cmd_setup_basic_desc(&desc[i], 1484 HNS3_OPC_MAC_VLAN_ADD, true); 1485 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1486 if (i == 0) 1487 memcpy(desc[i].data, req, 1488 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1489 } 1490 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, 1491 true); 1492 } else { 1493 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, 1494 true); 1495 memcpy(desc[0].data, req, 1496 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1497 } 1498 ret = hns3_cmd_send(hw, desc, desc_num); 1499 if (ret) { 1500 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1501 ret); 1502 return ret; 1503 } 1504 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1505 retval = rte_le_to_cpu_16(desc[0].retval); 1506 1507 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1508 HNS3_MAC_VLAN_LKUP); 1509 } 1510 1511 static int 1512 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1513 struct hns3_mac_vlan_tbl_entry_cmd *req, 1514 struct hns3_cmd_desc *desc, uint8_t desc_num) 1515 { 1516 uint8_t resp_code; 1517 uint16_t retval; 1518 int cfg_status; 1519 int ret; 1520 int i; 1521 1522 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { 1523 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); 1524 memcpy(desc->data, req, 1525 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1526 ret = hns3_cmd_send(hw, desc, desc_num); 1527 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; 1528 retval = rte_le_to_cpu_16(desc->retval); 1529 1530 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1531 HNS3_MAC_VLAN_ADD); 1532 } else { 1533 for (i = 0; i < desc_num; i++) { 1534 hns3_cmd_reuse_desc(&desc[i], false); 1535 if (i == desc_num - 1) 1536 desc[i].flag &= 1537 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1538 else 1539 desc[i].flag |= 1540 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1541 } 1542 memcpy(desc[0].data, req, 1543 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1544 desc[0].retval = 0; 1545 ret = hns3_cmd_send(hw, desc, desc_num); 1546 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1547 retval = rte_le_to_cpu_16(desc[0].retval); 1548 1549 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1550 HNS3_MAC_VLAN_ADD); 1551 } 1552 1553 if (ret) { 1554 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1555 return ret; 1556 } 1557 1558 return cfg_status; 1559 } 1560 1561 static int 1562 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1563 struct hns3_mac_vlan_tbl_entry_cmd *req) 1564 { 1565 struct hns3_cmd_desc desc; 1566 uint8_t resp_code; 1567 uint16_t retval; 1568 int ret; 1569 1570 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1571 1572 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1573 1574 ret = hns3_cmd_send(hw, &desc, 1); 1575 if (ret) { 1576 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1577 return ret; 1578 } 1579 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1580 retval = rte_le_to_cpu_16(desc.retval); 1581 1582 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1583 HNS3_MAC_VLAN_REMOVE); 1584 } 1585 1586 static int 1587 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1588 { 1589 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1590 struct hns3_mac_vlan_tbl_entry_cmd req; 1591 struct hns3_pf *pf = &hns->pf; 1592 struct hns3_cmd_desc desc; 1593 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1594 uint16_t egress_port = 0; 1595 uint8_t vf_id; 1596 int ret; 1597 1598 /* check if mac addr is valid */ 1599 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1600 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1601 mac_addr); 1602 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1603 mac_str); 1604 return -EINVAL; 1605 } 1606 1607 memset(&req, 0, sizeof(req)); 1608 1609 /* 1610 * In current version VF is not supported when PF is driven by DPDK 1611 * driver, just need to configure parameters for PF vport. 1612 */ 1613 vf_id = HNS3_PF_FUNC_ID; 1614 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1615 HNS3_MAC_EPORT_VFID_S, vf_id); 1616 1617 req.egress_port = rte_cpu_to_le_16(egress_port); 1618 1619 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1620 1621 /* 1622 * Lookup the mac address in the mac_vlan table, and add 1623 * it if the entry is inexistent. Repeated unicast entry 1624 * is not allowed in the mac vlan table. 1625 */ 1626 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, 1627 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1628 if (ret == -ENOENT) { 1629 if (!hns3_is_umv_space_full(hw)) { 1630 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, 1631 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1632 if (!ret) 1633 hns3_update_umv_space(hw, false); 1634 return ret; 1635 } 1636 1637 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1638 1639 return -ENOSPC; 1640 } 1641 1642 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1643 1644 /* check if we just hit the duplicate */ 1645 if (ret == 0) { 1646 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1647 return 0; 1648 } 1649 1650 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1651 mac_str); 1652 1653 return ret; 1654 } 1655 1656 static int 1657 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1658 { 1659 struct hns3_mac_vlan_tbl_entry_cmd req; 1660 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1661 int ret; 1662 1663 /* check if mac addr is valid */ 1664 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1665 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1666 mac_addr); 1667 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1668 mac_str); 1669 return -EINVAL; 1670 } 1671 1672 memset(&req, 0, sizeof(req)); 1673 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1674 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1675 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1676 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1677 return 0; 1678 else if (ret == 0) 1679 hns3_update_umv_space(hw, true); 1680 1681 return ret; 1682 } 1683 1684 static int 1685 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1686 struct rte_ether_addr *mac_addr) 1687 { 1688 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1689 struct rte_ether_addr *oaddr; 1690 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1691 int ret, ret_val; 1692 1693 rte_spinlock_lock(&hw->lock); 1694 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1695 ret = hw->ops.del_uc_mac_addr(hw, oaddr); 1696 if (ret) { 1697 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1698 oaddr); 1699 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1700 mac_str, ret); 1701 1702 rte_spinlock_unlock(&hw->lock); 1703 return ret; 1704 } 1705 1706 ret = hw->ops.add_uc_mac_addr(hw, mac_addr); 1707 if (ret) { 1708 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1709 mac_addr); 1710 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1711 goto err_add_uc_addr; 1712 } 1713 1714 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1715 if (ret) { 1716 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1717 goto err_pause_addr_cfg; 1718 } 1719 1720 rte_ether_addr_copy(mac_addr, 1721 (struct rte_ether_addr *)hw->mac.mac_addr); 1722 rte_spinlock_unlock(&hw->lock); 1723 1724 return 0; 1725 1726 err_pause_addr_cfg: 1727 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr); 1728 if (ret_val) { 1729 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1730 mac_addr); 1731 hns3_warn(hw, 1732 "Failed to roll back to del set mac addr(%s): %d", 1733 mac_str, ret_val); 1734 } 1735 1736 err_add_uc_addr: 1737 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr); 1738 if (ret_val) { 1739 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1740 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1741 mac_str, ret_val); 1742 } 1743 rte_spinlock_unlock(&hw->lock); 1744 1745 return ret; 1746 } 1747 1748 static void 1749 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1750 { 1751 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1752 uint8_t word_num; 1753 uint8_t bit_num; 1754 1755 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1756 word_num = vfid / 32; 1757 bit_num = vfid % 32; 1758 if (clr) 1759 desc[1].data[word_num] &= 1760 rte_cpu_to_le_32(~(1UL << bit_num)); 1761 else 1762 desc[1].data[word_num] |= 1763 rte_cpu_to_le_32(1UL << bit_num); 1764 } else { 1765 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1766 bit_num = vfid % 32; 1767 if (clr) 1768 desc[2].data[word_num] &= 1769 rte_cpu_to_le_32(~(1UL << bit_num)); 1770 else 1771 desc[2].data[word_num] |= 1772 rte_cpu_to_le_32(1UL << bit_num); 1773 } 1774 } 1775 1776 static int 1777 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1778 { 1779 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; 1780 struct hns3_mac_vlan_tbl_entry_cmd req; 1781 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1782 uint8_t vf_id; 1783 int ret; 1784 int idx; 1785 1786 /* Check if mac addr is valid */ 1787 if (!rte_is_multicast_ether_addr(mac_addr)) { 1788 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1789 mac_addr); 1790 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1791 mac_str); 1792 return -EINVAL; 1793 } 1794 1795 memset(&req, 0, sizeof(req)); 1796 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1797 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1798 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1799 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1800 if (ret) { 1801 /* This mac addr do not exist, add new entry for it */ 1802 for (idx = 0; idx < HNS3_MC_MAC_VLAN_OPS_DESC_NUM; idx++) 1803 memset(desc[idx].data, 0, sizeof(desc[idx].data)); 1804 } 1805 1806 /* 1807 * In current version VF is not supported when PF is driven by DPDK 1808 * driver, just need to configure parameters for PF vport. 1809 */ 1810 vf_id = HNS3_PF_FUNC_ID; 1811 hns3_update_desc_vfid(desc, vf_id, false); 1812 ret = hns3_add_mac_vlan_tbl(hw, &req, desc, 1813 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1814 if (ret) { 1815 if (ret == -ENOSPC) 1816 hns3_err(hw, "mc mac vlan table is full"); 1817 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1818 mac_addr); 1819 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1820 } 1821 1822 return ret; 1823 } 1824 1825 static int 1826 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1827 { 1828 struct hns3_mac_vlan_tbl_entry_cmd req; 1829 struct hns3_cmd_desc desc[3]; 1830 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1831 uint8_t vf_id; 1832 int ret; 1833 1834 /* Check if mac addr is valid */ 1835 if (!rte_is_multicast_ether_addr(mac_addr)) { 1836 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1837 mac_addr); 1838 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1839 mac_str); 1840 return -EINVAL; 1841 } 1842 1843 memset(&req, 0, sizeof(req)); 1844 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1845 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1846 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1847 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1848 if (ret == 0) { 1849 /* 1850 * This mac addr exist, remove this handle's VFID for it. 1851 * In current version VF is not supported when PF is driven by 1852 * DPDK driver, just need to configure parameters for PF vport. 1853 */ 1854 vf_id = HNS3_PF_FUNC_ID; 1855 hns3_update_desc_vfid(desc, vf_id, true); 1856 1857 /* All the vfid is zero, so need to delete this entry */ 1858 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1859 } else if (ret == -ENOENT) { 1860 /* This mac addr doesn't exist. */ 1861 return 0; 1862 } 1863 1864 if (ret) { 1865 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1866 mac_addr); 1867 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1868 } 1869 1870 return ret; 1871 } 1872 1873 static int 1874 hns3_check_mq_mode(struct rte_eth_dev *dev) 1875 { 1876 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1877 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1878 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1879 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1880 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 1881 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 1882 uint8_t num_tc; 1883 int max_tc = 0; 1884 int i; 1885 1886 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) || 1887 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB || 1888 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) { 1889 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 1890 rx_mq_mode, tx_mq_mode); 1891 return -EOPNOTSUPP; 1892 } 1893 1894 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 1895 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 1896 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 1897 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 1898 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 1899 dcb_rx_conf->nb_tcs, pf->tc_max); 1900 return -EINVAL; 1901 } 1902 1903 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 1904 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 1905 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, " 1906 "nb_tcs(%d) != %d or %d in rx direction.", 1907 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 1908 return -EINVAL; 1909 } 1910 1911 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 1912 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 1913 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 1914 return -EINVAL; 1915 } 1916 1917 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 1918 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 1919 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 1920 "is not equal to one in tx direction.", 1921 i, dcb_rx_conf->dcb_tc[i]); 1922 return -EINVAL; 1923 } 1924 if (dcb_rx_conf->dcb_tc[i] > max_tc) 1925 max_tc = dcb_rx_conf->dcb_tc[i]; 1926 } 1927 1928 num_tc = max_tc + 1; 1929 if (num_tc > dcb_rx_conf->nb_tcs) { 1930 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 1931 num_tc, dcb_rx_conf->nb_tcs); 1932 return -EINVAL; 1933 } 1934 } 1935 1936 return 0; 1937 } 1938 1939 static int 1940 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 1941 enum hns3_ring_type queue_type, uint16_t queue_id) 1942 { 1943 struct hns3_cmd_desc desc; 1944 struct hns3_ctrl_vector_chain_cmd *req = 1945 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 1946 enum hns3_opcode_type op; 1947 uint16_t tqp_type_and_id = 0; 1948 uint16_t type; 1949 uint16_t gl; 1950 int ret; 1951 1952 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 1953 hns3_cmd_setup_basic_desc(&desc, op, false); 1954 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 1955 HNS3_TQP_INT_ID_L_S); 1956 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 1957 HNS3_TQP_INT_ID_H_S); 1958 1959 if (queue_type == HNS3_RING_TYPE_RX) 1960 gl = HNS3_RING_GL_RX; 1961 else 1962 gl = HNS3_RING_GL_TX; 1963 1964 type = queue_type; 1965 1966 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 1967 type); 1968 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 1969 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 1970 gl); 1971 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 1972 req->int_cause_num = 1; 1973 ret = hns3_cmd_send(hw, &desc, 1); 1974 if (ret) { 1975 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 1976 en ? "Map" : "Unmap", queue_id, vector_id, ret); 1977 return ret; 1978 } 1979 1980 return 0; 1981 } 1982 1983 static int 1984 hns3_setup_dcb(struct rte_eth_dev *dev) 1985 { 1986 struct hns3_adapter *hns = dev->data->dev_private; 1987 struct hns3_hw *hw = &hns->hw; 1988 int ret; 1989 1990 if (!hns3_dev_get_support(hw, DCB)) { 1991 hns3_err(hw, "this port does not support dcb configurations."); 1992 return -EOPNOTSUPP; 1993 } 1994 1995 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 1996 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 1997 return -EOPNOTSUPP; 1998 } 1999 2000 ret = hns3_dcb_configure(hns); 2001 if (ret) 2002 hns3_err(hw, "failed to config dcb: %d", ret); 2003 2004 return ret; 2005 } 2006 2007 static int 2008 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2009 { 2010 int ret; 2011 2012 /* 2013 * Some hardware doesn't support auto-negotiation, but users may not 2014 * configure link_speeds (default 0), which means auto-negotiation. 2015 * In this case, it should return success. 2016 */ 2017 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG && 2018 hw->mac.support_autoneg == 0) 2019 return 0; 2020 2021 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 2022 ret = hns3_check_port_speed(hw, link_speeds); 2023 if (ret) 2024 return ret; 2025 } 2026 2027 return 0; 2028 } 2029 2030 static int 2031 hns3_check_dev_conf(struct rte_eth_dev *dev) 2032 { 2033 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2034 struct rte_eth_conf *conf = &dev->data->dev_conf; 2035 int ret; 2036 2037 ret = hns3_check_mq_mode(dev); 2038 if (ret) 2039 return ret; 2040 2041 return hns3_check_link_speed(hw, conf->link_speeds); 2042 } 2043 2044 static int 2045 hns3_dev_configure(struct rte_eth_dev *dev) 2046 { 2047 struct hns3_adapter *hns = dev->data->dev_private; 2048 struct rte_eth_conf *conf = &dev->data->dev_conf; 2049 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2050 struct hns3_hw *hw = &hns->hw; 2051 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2052 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2053 struct rte_eth_rss_conf rss_conf; 2054 bool gro_en; 2055 int ret; 2056 2057 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2058 2059 /* 2060 * Some versions of hardware network engine does not support 2061 * individually enable/disable/reset the Tx or Rx queue. These devices 2062 * must enable/disable/reset Tx and Rx queues at the same time. When the 2063 * numbers of Tx queues allocated by upper applications are not equal to 2064 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2065 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2066 * work as usual. But these fake queues are imperceptible, and can not 2067 * be used by upper applications. 2068 */ 2069 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2070 if (ret) { 2071 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2072 hw->cfg_max_queues = 0; 2073 return ret; 2074 } 2075 2076 hw->adapter_state = HNS3_NIC_CONFIGURING; 2077 ret = hns3_check_dev_conf(dev); 2078 if (ret) 2079 goto cfg_err; 2080 2081 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2082 ret = hns3_setup_dcb(dev); 2083 if (ret) 2084 goto cfg_err; 2085 } 2086 2087 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2088 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2089 rss_conf = conf->rx_adv_conf.rss_conf; 2090 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2091 if (ret) 2092 goto cfg_err; 2093 } 2094 2095 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu); 2096 if (ret != 0) 2097 goto cfg_err; 2098 2099 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2100 if (ret) 2101 goto cfg_err; 2102 2103 ret = hns3_dev_configure_vlan(dev); 2104 if (ret) 2105 goto cfg_err; 2106 2107 /* config hardware GRO */ 2108 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 2109 ret = hns3_config_gro(hw, gro_en); 2110 if (ret) 2111 goto cfg_err; 2112 2113 hns3_init_rx_ptype_tble(dev); 2114 hw->adapter_state = HNS3_NIC_CONFIGURED; 2115 2116 return 0; 2117 2118 cfg_err: 2119 hw->cfg_max_queues = 0; 2120 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2121 hw->adapter_state = HNS3_NIC_INITIALIZED; 2122 2123 return ret; 2124 } 2125 2126 static int 2127 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2128 { 2129 struct hns3_config_max_frm_size_cmd *req; 2130 struct hns3_cmd_desc desc; 2131 2132 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2133 2134 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2135 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2136 req->min_frm_size = RTE_ETHER_MIN_LEN; 2137 2138 return hns3_cmd_send(hw, &desc, 1); 2139 } 2140 2141 static int 2142 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2143 { 2144 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2145 int err; 2146 int ret; 2147 2148 ret = hns3_set_mac_mtu(hw, mps); 2149 if (ret) { 2150 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2151 return ret; 2152 } 2153 2154 ret = hns3_buffer_alloc(hw); 2155 if (ret) { 2156 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2157 goto rollback; 2158 } 2159 2160 hns->pf.mps = mps; 2161 2162 return 0; 2163 2164 rollback: 2165 err = hns3_set_mac_mtu(hw, hns->pf.mps); 2166 if (err) 2167 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2168 2169 return ret; 2170 } 2171 2172 static int 2173 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2174 { 2175 struct hns3_adapter *hns = dev->data->dev_private; 2176 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2177 struct hns3_hw *hw = &hns->hw; 2178 int ret; 2179 2180 if (dev->data->dev_started) { 2181 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2182 "before configuration", dev->data->port_id); 2183 return -EBUSY; 2184 } 2185 2186 rte_spinlock_lock(&hw->lock); 2187 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2188 2189 /* 2190 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2191 * assign to "uint16_t" type variable. 2192 */ 2193 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2194 if (ret) { 2195 rte_spinlock_unlock(&hw->lock); 2196 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2197 dev->data->port_id, mtu, ret); 2198 return ret; 2199 } 2200 2201 rte_spinlock_unlock(&hw->lock); 2202 2203 return 0; 2204 } 2205 2206 static uint32_t 2207 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2208 { 2209 uint32_t speed_capa = 0; 2210 2211 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2212 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD; 2213 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2214 speed_capa |= RTE_ETH_LINK_SPEED_10M; 2215 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2216 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 2217 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2218 speed_capa |= RTE_ETH_LINK_SPEED_100M; 2219 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2220 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2221 2222 return speed_capa; 2223 } 2224 2225 static uint32_t 2226 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2227 { 2228 uint32_t speed_capa = 0; 2229 2230 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2231 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2232 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2233 speed_capa |= RTE_ETH_LINK_SPEED_10G; 2234 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2235 speed_capa |= RTE_ETH_LINK_SPEED_25G; 2236 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2237 speed_capa |= RTE_ETH_LINK_SPEED_40G; 2238 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2239 speed_capa |= RTE_ETH_LINK_SPEED_50G; 2240 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2241 speed_capa |= RTE_ETH_LINK_SPEED_100G; 2242 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2243 speed_capa |= RTE_ETH_LINK_SPEED_200G; 2244 2245 return speed_capa; 2246 } 2247 2248 uint32_t 2249 hns3_get_speed_capa(struct hns3_hw *hw) 2250 { 2251 struct hns3_mac *mac = &hw->mac; 2252 uint32_t speed_capa; 2253 2254 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2255 speed_capa = 2256 hns3_get_copper_port_speed_capa(mac->supported_speed); 2257 else 2258 speed_capa = 2259 hns3_get_firber_port_speed_capa(mac->supported_speed); 2260 2261 if (mac->support_autoneg == 0) 2262 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 2263 2264 return speed_capa; 2265 } 2266 2267 static int 2268 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2269 { 2270 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2271 int ret; 2272 2273 (void)hns3_update_link_status(hw); 2274 2275 ret = hns3_update_link_info(eth_dev); 2276 if (ret) 2277 hw->mac.link_status = RTE_ETH_LINK_DOWN; 2278 2279 return ret; 2280 } 2281 2282 static void 2283 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2284 struct rte_eth_link *new_link) 2285 { 2286 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2287 struct hns3_mac *mac = &hw->mac; 2288 2289 switch (mac->link_speed) { 2290 case RTE_ETH_SPEED_NUM_10M: 2291 case RTE_ETH_SPEED_NUM_100M: 2292 case RTE_ETH_SPEED_NUM_1G: 2293 case RTE_ETH_SPEED_NUM_10G: 2294 case RTE_ETH_SPEED_NUM_25G: 2295 case RTE_ETH_SPEED_NUM_40G: 2296 case RTE_ETH_SPEED_NUM_50G: 2297 case RTE_ETH_SPEED_NUM_100G: 2298 case RTE_ETH_SPEED_NUM_200G: 2299 if (mac->link_status) 2300 new_link->link_speed = mac->link_speed; 2301 break; 2302 default: 2303 if (mac->link_status) 2304 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 2305 break; 2306 } 2307 2308 if (!mac->link_status) 2309 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE; 2310 2311 new_link->link_duplex = mac->link_duplex; 2312 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 2313 new_link->link_autoneg = mac->link_autoneg; 2314 } 2315 2316 static int 2317 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2318 { 2319 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2320 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2321 2322 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2323 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2324 struct hns3_mac *mac = &hw->mac; 2325 struct rte_eth_link new_link; 2326 int ret; 2327 2328 memset(&new_link, 0, sizeof(new_link)); 2329 /* When port is stopped, report link down. */ 2330 if (eth_dev->data->dev_started == 0) { 2331 new_link.link_autoneg = mac->link_autoneg; 2332 new_link.link_duplex = mac->link_duplex; 2333 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2334 new_link.link_status = RTE_ETH_LINK_DOWN; 2335 goto out; 2336 } 2337 2338 do { 2339 ret = hns3_update_port_link_info(eth_dev); 2340 if (ret) { 2341 hns3_err(hw, "failed to get port link info, ret = %d.", 2342 ret); 2343 break; 2344 } 2345 2346 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP) 2347 break; 2348 2349 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2350 } while (retry_cnt--); 2351 2352 hns3_setup_linkstatus(eth_dev, &new_link); 2353 2354 out: 2355 return rte_eth_linkstatus_set(eth_dev, &new_link); 2356 } 2357 2358 static int 2359 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2360 { 2361 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2362 int ret; 2363 2364 /* 2365 * The "tx_pkt_burst" will be restored. But the secondary process does 2366 * not support the mechanism for notifying the primary process. 2367 */ 2368 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2369 hns3_err(hw, "secondary process does not support to set link up."); 2370 return -ENOTSUP; 2371 } 2372 2373 /* 2374 * If device isn't started Rx/Tx function is still disabled, setting 2375 * link up is not allowed. But it is probably better to return success 2376 * to reduce the impact on the upper layer. 2377 */ 2378 if (hw->adapter_state != HNS3_NIC_STARTED) { 2379 hns3_info(hw, "device isn't started, can't set link up."); 2380 return 0; 2381 } 2382 2383 if (!hw->set_link_down) 2384 return 0; 2385 2386 rte_spinlock_lock(&hw->lock); 2387 ret = hns3_cfg_mac_mode(hw, true); 2388 if (ret) { 2389 rte_spinlock_unlock(&hw->lock); 2390 hns3_err(hw, "failed to set link up, ret = %d", ret); 2391 return ret; 2392 } 2393 2394 hw->set_link_down = false; 2395 hns3_start_tx_datapath(dev); 2396 rte_spinlock_unlock(&hw->lock); 2397 2398 return 0; 2399 } 2400 2401 static int 2402 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2403 { 2404 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2405 int ret; 2406 2407 /* 2408 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2409 * process does not support the mechanism for notifying the primary 2410 * process. 2411 */ 2412 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2413 hns3_err(hw, "secondary process does not support to set link down."); 2414 return -ENOTSUP; 2415 } 2416 2417 /* 2418 * If device isn't started or the API has been called, link status is 2419 * down, return success. 2420 */ 2421 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2422 return 0; 2423 2424 rte_spinlock_lock(&hw->lock); 2425 hns3_stop_tx_datapath(dev); 2426 ret = hns3_cfg_mac_mode(hw, false); 2427 if (ret) { 2428 hns3_start_tx_datapath(dev); 2429 rte_spinlock_unlock(&hw->lock); 2430 hns3_err(hw, "failed to set link down, ret = %d", ret); 2431 return ret; 2432 } 2433 2434 hw->set_link_down = true; 2435 rte_spinlock_unlock(&hw->lock); 2436 2437 return 0; 2438 } 2439 2440 static int 2441 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2442 { 2443 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2444 struct hns3_pf *pf = &hns->pf; 2445 2446 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2447 return -EINVAL; 2448 2449 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2450 2451 return 0; 2452 } 2453 2454 static int 2455 hns3_query_function_status(struct hns3_hw *hw) 2456 { 2457 #define HNS3_QUERY_MAX_CNT 10 2458 #define HNS3_QUERY_SLEEP_MSCOEND 1 2459 struct hns3_func_status_cmd *req; 2460 struct hns3_cmd_desc desc; 2461 int timeout = 0; 2462 int ret; 2463 2464 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2465 req = (struct hns3_func_status_cmd *)desc.data; 2466 2467 do { 2468 ret = hns3_cmd_send(hw, &desc, 1); 2469 if (ret) { 2470 PMD_INIT_LOG(ERR, "query function status failed %d", 2471 ret); 2472 return ret; 2473 } 2474 2475 /* Check pf reset is done */ 2476 if (req->pf_state) 2477 break; 2478 2479 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2480 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2481 2482 return hns3_parse_func_status(hw, req); 2483 } 2484 2485 static int 2486 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2487 { 2488 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2489 struct hns3_pf *pf = &hns->pf; 2490 2491 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2492 /* 2493 * The total_tqps_num obtained from firmware is maximum tqp 2494 * numbers of this port, which should be used for PF and VFs. 2495 * There is no need for pf to have so many tqp numbers in 2496 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2497 * coming from config file, is assigned to maximum queue number 2498 * for the PF of this port by user. So users can modify the 2499 * maximum queue number of PF according to their own application 2500 * scenarios, which is more flexible to use. In addition, many 2501 * memories can be saved due to allocating queue statistics 2502 * room according to the actual number of queues required. The 2503 * maximum queue number of PF for network engine with 2504 * revision_id greater than 0x30 is assigned by config file. 2505 */ 2506 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2507 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2508 "must be greater than 0.", 2509 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2510 return -EINVAL; 2511 } 2512 2513 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2514 hw->total_tqps_num); 2515 } else { 2516 /* 2517 * Due to the limitation on the number of PF interrupts 2518 * available, the maximum queue number assigned to PF on 2519 * the network engine with revision_id 0x21 is 64. 2520 */ 2521 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2522 HNS3_MAX_TQP_NUM_HIP08_PF); 2523 } 2524 2525 return 0; 2526 } 2527 2528 static int 2529 hns3_query_pf_resource(struct hns3_hw *hw) 2530 { 2531 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2532 struct hns3_pf *pf = &hns->pf; 2533 struct hns3_pf_res_cmd *req; 2534 struct hns3_cmd_desc desc; 2535 int ret; 2536 2537 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2538 ret = hns3_cmd_send(hw, &desc, 1); 2539 if (ret) { 2540 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2541 return ret; 2542 } 2543 2544 req = (struct hns3_pf_res_cmd *)desc.data; 2545 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2546 rte_le_to_cpu_16(req->ext_tqp_num); 2547 ret = hns3_get_pf_max_tqp_num(hw); 2548 if (ret) 2549 return ret; 2550 2551 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2552 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2553 2554 if (req->tx_buf_size) 2555 pf->tx_buf_size = 2556 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2557 else 2558 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2559 2560 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2561 2562 if (req->dv_buf_size) 2563 pf->dv_buf_size = 2564 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2565 else 2566 pf->dv_buf_size = HNS3_DEFAULT_DV; 2567 2568 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2569 2570 hw->num_msi = 2571 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2572 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2573 2574 return 0; 2575 } 2576 2577 static void 2578 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2579 { 2580 struct hns3_cfg_param_cmd *req; 2581 uint64_t mac_addr_tmp_high; 2582 uint8_t ext_rss_size_max; 2583 uint64_t mac_addr_tmp; 2584 uint32_t i; 2585 2586 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2587 2588 /* get the configuration */ 2589 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2590 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 2591 2592 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2593 HNS3_CFG_PHY_ADDR_M, 2594 HNS3_CFG_PHY_ADDR_S); 2595 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2596 HNS3_CFG_MEDIA_TP_M, 2597 HNS3_CFG_MEDIA_TP_S); 2598 /* get mac address */ 2599 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 2600 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2601 HNS3_CFG_MAC_ADDR_H_M, 2602 HNS3_CFG_MAC_ADDR_H_S); 2603 2604 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 2605 2606 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2607 HNS3_CFG_DEFAULT_SPEED_M, 2608 HNS3_CFG_DEFAULT_SPEED_S); 2609 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2610 HNS3_CFG_RSS_SIZE_M, 2611 HNS3_CFG_RSS_SIZE_S); 2612 2613 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2614 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 2615 2616 req = (struct hns3_cfg_param_cmd *)desc[1].data; 2617 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 2618 2619 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2620 HNS3_CFG_SPEED_ABILITY_M, 2621 HNS3_CFG_SPEED_ABILITY_S); 2622 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2623 HNS3_CFG_UMV_TBL_SPACE_M, 2624 HNS3_CFG_UMV_TBL_SPACE_S); 2625 if (!cfg->umv_space) 2626 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 2627 2628 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 2629 HNS3_CFG_EXT_RSS_SIZE_M, 2630 HNS3_CFG_EXT_RSS_SIZE_S); 2631 /* 2632 * Field ext_rss_size_max obtained from firmware will be more flexible 2633 * for future changes and expansions, which is an exponent of 2, instead 2634 * of reading out directly. If this field is not zero, hns3 PF PMD 2635 * uses it as rss_size_max under one TC. Device, whose revision 2636 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 2637 * maximum number of queues supported under a TC through this field. 2638 */ 2639 if (ext_rss_size_max) 2640 cfg->rss_size_max = 1U << ext_rss_size_max; 2641 } 2642 2643 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 2644 * @hw: pointer to struct hns3_hw 2645 * @hcfg: the config structure to be getted 2646 */ 2647 static int 2648 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 2649 { 2650 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 2651 struct hns3_cfg_param_cmd *req; 2652 uint32_t offset; 2653 uint32_t i; 2654 int ret; 2655 2656 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 2657 offset = 0; 2658 req = (struct hns3_cfg_param_cmd *)desc[i].data; 2659 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 2660 true); 2661 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 2662 i * HNS3_CFG_RD_LEN_BYTES); 2663 /* Len should be divided by 4 when send to hardware */ 2664 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 2665 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 2666 req->offset = rte_cpu_to_le_32(offset); 2667 } 2668 2669 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 2670 if (ret) { 2671 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 2672 return ret; 2673 } 2674 2675 hns3_parse_cfg(hcfg, desc); 2676 2677 return 0; 2678 } 2679 2680 static int 2681 hns3_parse_speed(int speed_cmd, uint32_t *speed) 2682 { 2683 switch (speed_cmd) { 2684 case HNS3_CFG_SPEED_10M: 2685 *speed = RTE_ETH_SPEED_NUM_10M; 2686 break; 2687 case HNS3_CFG_SPEED_100M: 2688 *speed = RTE_ETH_SPEED_NUM_100M; 2689 break; 2690 case HNS3_CFG_SPEED_1G: 2691 *speed = RTE_ETH_SPEED_NUM_1G; 2692 break; 2693 case HNS3_CFG_SPEED_10G: 2694 *speed = RTE_ETH_SPEED_NUM_10G; 2695 break; 2696 case HNS3_CFG_SPEED_25G: 2697 *speed = RTE_ETH_SPEED_NUM_25G; 2698 break; 2699 case HNS3_CFG_SPEED_40G: 2700 *speed = RTE_ETH_SPEED_NUM_40G; 2701 break; 2702 case HNS3_CFG_SPEED_50G: 2703 *speed = RTE_ETH_SPEED_NUM_50G; 2704 break; 2705 case HNS3_CFG_SPEED_100G: 2706 *speed = RTE_ETH_SPEED_NUM_100G; 2707 break; 2708 case HNS3_CFG_SPEED_200G: 2709 *speed = RTE_ETH_SPEED_NUM_200G; 2710 break; 2711 default: 2712 return -EINVAL; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int 2719 hns3_get_capability(struct hns3_hw *hw) 2720 { 2721 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2722 struct hns3_pf *pf = &hns->pf; 2723 int ret; 2724 2725 ret = hns3_query_mac_stats_reg_num(hw); 2726 if (ret) 2727 return ret; 2728 2729 if (hw->revision < PCI_REVISION_ID_HIP09_A) { 2730 hns3_set_default_dev_specifications(hw); 2731 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 2732 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 2733 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 2734 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 2735 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 2736 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 2737 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 2738 hw->rss_info.ipv6_sctp_offload_supported = false; 2739 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 2740 pf->support_multi_tc_pause = false; 2741 return 0; 2742 } 2743 2744 ret = hns3_query_dev_specifications(hw); 2745 if (ret) { 2746 PMD_INIT_LOG(ERR, 2747 "failed to query dev specifications, ret = %d", 2748 ret); 2749 return ret; 2750 } 2751 2752 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 2753 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 2754 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 2755 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 2756 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 2757 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 2758 hw->rss_info.ipv6_sctp_offload_supported = true; 2759 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 2760 pf->support_multi_tc_pause = true; 2761 2762 return 0; 2763 } 2764 2765 static int 2766 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 2767 { 2768 int ret; 2769 2770 switch (media_type) { 2771 case HNS3_MEDIA_TYPE_COPPER: 2772 if (!hns3_dev_get_support(hw, COPPER)) { 2773 PMD_INIT_LOG(ERR, 2774 "Media type is copper, not supported."); 2775 ret = -EOPNOTSUPP; 2776 } else { 2777 ret = 0; 2778 } 2779 break; 2780 case HNS3_MEDIA_TYPE_FIBER: 2781 case HNS3_MEDIA_TYPE_BACKPLANE: 2782 ret = 0; 2783 break; 2784 default: 2785 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 2786 ret = -EINVAL; 2787 break; 2788 } 2789 2790 return ret; 2791 } 2792 2793 static int 2794 hns3_get_board_configuration(struct hns3_hw *hw) 2795 { 2796 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2797 struct hns3_pf *pf = &hns->pf; 2798 struct hns3_cfg cfg; 2799 int ret; 2800 2801 memset(&cfg, 0, sizeof(cfg)); 2802 ret = hns3_get_board_cfg(hw, &cfg); 2803 if (ret) { 2804 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 2805 return ret; 2806 } 2807 2808 ret = hns3_check_media_type(hw, cfg.media_type); 2809 if (ret) 2810 return ret; 2811 2812 hw->mac.media_type = cfg.media_type; 2813 hw->rss_size_max = cfg.rss_size_max; 2814 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 2815 hw->mac.phy_addr = cfg.phy_addr; 2816 hw->dcb_info.num_pg = 1; 2817 hw->dcb_info.hw_pfc_map = 0; 2818 2819 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 2820 if (ret) { 2821 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 2822 cfg.default_speed, ret); 2823 return ret; 2824 } 2825 2826 pf->tc_max = cfg.tc_num; 2827 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 2828 PMD_INIT_LOG(WARNING, 2829 "Get TC num(%u) from flash, set TC num to 1", 2830 pf->tc_max); 2831 pf->tc_max = 1; 2832 } 2833 2834 /* Dev does not support DCB */ 2835 if (!hns3_dev_get_support(hw, DCB)) { 2836 pf->tc_max = 1; 2837 pf->pfc_max = 0; 2838 } else 2839 pf->pfc_max = pf->tc_max; 2840 2841 hw->dcb_info.num_tc = 1; 2842 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 2843 hw->tqps_num / hw->dcb_info.num_tc); 2844 hns3_set_bit(hw->hw_tc_map, 0, 1); 2845 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 2846 2847 pf->wanted_umv_size = cfg.umv_space; 2848 2849 return ret; 2850 } 2851 2852 static int 2853 hns3_get_configuration(struct hns3_hw *hw) 2854 { 2855 int ret; 2856 2857 ret = hns3_query_function_status(hw); 2858 if (ret) { 2859 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 2860 return ret; 2861 } 2862 2863 /* Get device capability */ 2864 ret = hns3_get_capability(hw); 2865 if (ret) { 2866 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 2867 return ret; 2868 } 2869 2870 /* Get pf resource */ 2871 ret = hns3_query_pf_resource(hw); 2872 if (ret) { 2873 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 2874 return ret; 2875 } 2876 2877 ret = hns3_get_board_configuration(hw); 2878 if (ret) { 2879 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 2880 return ret; 2881 } 2882 2883 ret = hns3_query_dev_fec_info(hw); 2884 if (ret) 2885 PMD_INIT_LOG(ERR, 2886 "failed to query FEC information, ret = %d", ret); 2887 2888 return ret; 2889 } 2890 2891 static int 2892 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 2893 uint16_t tqp_vid, bool is_pf) 2894 { 2895 struct hns3_tqp_map_cmd *req; 2896 struct hns3_cmd_desc desc; 2897 int ret; 2898 2899 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 2900 2901 req = (struct hns3_tqp_map_cmd *)desc.data; 2902 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 2903 req->tqp_vf = func_id; 2904 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 2905 if (!is_pf) 2906 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 2907 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 2908 2909 ret = hns3_cmd_send(hw, &desc, 1); 2910 if (ret) 2911 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 2912 2913 return ret; 2914 } 2915 2916 static int 2917 hns3_map_tqp(struct hns3_hw *hw) 2918 { 2919 uint16_t i; 2920 int ret; 2921 2922 /* 2923 * In current version, VF is not supported when PF is driven by DPDK 2924 * driver, so we assign total tqps_num tqps allocated to this port 2925 * to PF. 2926 */ 2927 for (i = 0; i < hw->total_tqps_num; i++) { 2928 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 2929 if (ret) 2930 return ret; 2931 } 2932 2933 return 0; 2934 } 2935 2936 static int 2937 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 2938 { 2939 struct hns3_config_mac_speed_dup_cmd *req; 2940 struct hns3_cmd_desc desc; 2941 int ret; 2942 2943 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 2944 2945 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 2946 2947 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 2948 2949 switch (speed) { 2950 case RTE_ETH_SPEED_NUM_10M: 2951 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2952 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 2953 break; 2954 case RTE_ETH_SPEED_NUM_100M: 2955 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2956 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 2957 break; 2958 case RTE_ETH_SPEED_NUM_1G: 2959 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2960 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 2961 break; 2962 case RTE_ETH_SPEED_NUM_10G: 2963 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2964 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 2965 break; 2966 case RTE_ETH_SPEED_NUM_25G: 2967 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2968 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 2969 break; 2970 case RTE_ETH_SPEED_NUM_40G: 2971 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2972 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 2973 break; 2974 case RTE_ETH_SPEED_NUM_50G: 2975 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2976 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 2977 break; 2978 case RTE_ETH_SPEED_NUM_100G: 2979 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2980 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 2981 break; 2982 case RTE_ETH_SPEED_NUM_200G: 2983 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2984 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 2985 break; 2986 default: 2987 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 2988 return -EINVAL; 2989 } 2990 2991 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 2992 2993 ret = hns3_cmd_send(hw, &desc, 1); 2994 if (ret) 2995 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 2996 2997 return ret; 2998 } 2999 3000 static int 3001 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3002 { 3003 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3004 struct hns3_pf *pf = &hns->pf; 3005 struct hns3_priv_buf *priv; 3006 uint32_t i, total_size; 3007 3008 total_size = pf->pkt_buf_size; 3009 3010 /* alloc tx buffer for all enabled tc */ 3011 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3012 priv = &buf_alloc->priv_buf[i]; 3013 3014 if (hw->hw_tc_map & BIT(i)) { 3015 if (total_size < pf->tx_buf_size) 3016 return -ENOMEM; 3017 3018 priv->tx_buf_size = pf->tx_buf_size; 3019 } else 3020 priv->tx_buf_size = 0; 3021 3022 total_size -= priv->tx_buf_size; 3023 } 3024 3025 return 0; 3026 } 3027 3028 static int 3029 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3030 { 3031 /* TX buffer size is unit by 128 byte */ 3032 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3033 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3034 struct hns3_tx_buff_alloc_cmd *req; 3035 struct hns3_cmd_desc desc; 3036 uint32_t buf_size; 3037 uint32_t i; 3038 int ret; 3039 3040 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3041 3042 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3043 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3044 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3045 3046 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3047 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3048 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3049 } 3050 3051 ret = hns3_cmd_send(hw, &desc, 1); 3052 if (ret) 3053 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3054 3055 return ret; 3056 } 3057 3058 static int 3059 hns3_get_tc_num(struct hns3_hw *hw) 3060 { 3061 int cnt = 0; 3062 uint8_t i; 3063 3064 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3065 if (hw->hw_tc_map & BIT(i)) 3066 cnt++; 3067 return cnt; 3068 } 3069 3070 static uint32_t 3071 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3072 { 3073 struct hns3_priv_buf *priv; 3074 uint32_t rx_priv = 0; 3075 int i; 3076 3077 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3078 priv = &buf_alloc->priv_buf[i]; 3079 if (priv->enable) 3080 rx_priv += priv->buf_size; 3081 } 3082 return rx_priv; 3083 } 3084 3085 static uint32_t 3086 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3087 { 3088 uint32_t total_tx_size = 0; 3089 uint32_t i; 3090 3091 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3092 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3093 3094 return total_tx_size; 3095 } 3096 3097 /* Get the number of pfc enabled TCs, which have private buffer */ 3098 static int 3099 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3100 { 3101 struct hns3_priv_buf *priv; 3102 int cnt = 0; 3103 uint8_t i; 3104 3105 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3106 priv = &buf_alloc->priv_buf[i]; 3107 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3108 cnt++; 3109 } 3110 3111 return cnt; 3112 } 3113 3114 /* Get the number of pfc disabled TCs, which have private buffer */ 3115 static int 3116 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3117 struct hns3_pkt_buf_alloc *buf_alloc) 3118 { 3119 struct hns3_priv_buf *priv; 3120 int cnt = 0; 3121 uint8_t i; 3122 3123 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3124 priv = &buf_alloc->priv_buf[i]; 3125 if (hw->hw_tc_map & BIT(i) && 3126 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3127 cnt++; 3128 } 3129 3130 return cnt; 3131 } 3132 3133 static bool 3134 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3135 uint32_t rx_all) 3136 { 3137 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3138 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3139 struct hns3_pf *pf = &hns->pf; 3140 uint32_t shared_buf, aligned_mps; 3141 uint32_t rx_priv; 3142 uint8_t tc_num; 3143 uint8_t i; 3144 3145 tc_num = hns3_get_tc_num(hw); 3146 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3147 3148 if (hns3_dev_get_support(hw, DCB)) 3149 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3150 pf->dv_buf_size; 3151 else 3152 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3153 + pf->dv_buf_size; 3154 3155 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3156 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3157 HNS3_BUF_SIZE_UNIT); 3158 3159 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3160 if (rx_all < rx_priv + shared_std) 3161 return false; 3162 3163 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3164 buf_alloc->s_buf.buf_size = shared_buf; 3165 if (hns3_dev_get_support(hw, DCB)) { 3166 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3167 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3168 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3169 HNS3_BUF_SIZE_UNIT); 3170 } else { 3171 buf_alloc->s_buf.self.high = 3172 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3173 buf_alloc->s_buf.self.low = aligned_mps; 3174 } 3175 3176 if (hns3_dev_get_support(hw, DCB)) { 3177 hi_thrd = shared_buf - pf->dv_buf_size; 3178 3179 if (tc_num <= NEED_RESERVE_TC_NUM) 3180 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3181 BUF_MAX_PERCENT; 3182 3183 if (tc_num) 3184 hi_thrd = hi_thrd / tc_num; 3185 3186 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3187 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3188 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3189 } else { 3190 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3191 lo_thrd = aligned_mps; 3192 } 3193 3194 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3195 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3196 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3197 } 3198 3199 return true; 3200 } 3201 3202 static bool 3203 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3204 struct hns3_pkt_buf_alloc *buf_alloc) 3205 { 3206 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3207 struct hns3_pf *pf = &hns->pf; 3208 struct hns3_priv_buf *priv; 3209 uint32_t aligned_mps; 3210 uint32_t rx_all; 3211 uint8_t i; 3212 3213 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3214 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3215 3216 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3217 priv = &buf_alloc->priv_buf[i]; 3218 3219 priv->enable = 0; 3220 priv->wl.low = 0; 3221 priv->wl.high = 0; 3222 priv->buf_size = 0; 3223 3224 if (!(hw->hw_tc_map & BIT(i))) 3225 continue; 3226 3227 priv->enable = 1; 3228 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3229 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3230 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3231 HNS3_BUF_SIZE_UNIT); 3232 } else { 3233 priv->wl.low = 0; 3234 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3235 aligned_mps; 3236 } 3237 3238 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3239 } 3240 3241 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3242 } 3243 3244 static bool 3245 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3246 struct hns3_pkt_buf_alloc *buf_alloc) 3247 { 3248 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3249 struct hns3_pf *pf = &hns->pf; 3250 struct hns3_priv_buf *priv; 3251 int no_pfc_priv_num; 3252 uint32_t rx_all; 3253 uint8_t mask; 3254 int i; 3255 3256 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3257 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3258 3259 /* let the last to be cleared first */ 3260 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3261 priv = &buf_alloc->priv_buf[i]; 3262 mask = BIT((uint8_t)i); 3263 if (hw->hw_tc_map & mask && 3264 !(hw->dcb_info.hw_pfc_map & mask)) { 3265 /* Clear the no pfc TC private buffer */ 3266 priv->wl.low = 0; 3267 priv->wl.high = 0; 3268 priv->buf_size = 0; 3269 priv->enable = 0; 3270 no_pfc_priv_num--; 3271 } 3272 3273 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3274 no_pfc_priv_num == 0) 3275 break; 3276 } 3277 3278 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3279 } 3280 3281 static bool 3282 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3283 struct hns3_pkt_buf_alloc *buf_alloc) 3284 { 3285 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3286 struct hns3_pf *pf = &hns->pf; 3287 struct hns3_priv_buf *priv; 3288 uint32_t rx_all; 3289 int pfc_priv_num; 3290 uint8_t mask; 3291 int i; 3292 3293 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3294 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3295 3296 /* let the last to be cleared first */ 3297 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3298 priv = &buf_alloc->priv_buf[i]; 3299 mask = BIT((uint8_t)i); 3300 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3301 /* Reduce the number of pfc TC with private buffer */ 3302 priv->wl.low = 0; 3303 priv->enable = 0; 3304 priv->wl.high = 0; 3305 priv->buf_size = 0; 3306 pfc_priv_num--; 3307 } 3308 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3309 pfc_priv_num == 0) 3310 break; 3311 } 3312 3313 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3314 } 3315 3316 static bool 3317 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3318 struct hns3_pkt_buf_alloc *buf_alloc) 3319 { 3320 #define COMPENSATE_BUFFER 0x3C00 3321 #define COMPENSATE_HALF_MPS_NUM 5 3322 #define PRIV_WL_GAP 0x1800 3323 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3324 struct hns3_pf *pf = &hns->pf; 3325 uint32_t tc_num = hns3_get_tc_num(hw); 3326 uint32_t half_mps = pf->mps >> 1; 3327 struct hns3_priv_buf *priv; 3328 uint32_t min_rx_priv; 3329 uint32_t rx_priv; 3330 uint8_t i; 3331 3332 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3333 if (tc_num) 3334 rx_priv = rx_priv / tc_num; 3335 3336 if (tc_num <= NEED_RESERVE_TC_NUM) 3337 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3338 3339 /* 3340 * Minimum value of private buffer in rx direction (min_rx_priv) is 3341 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3342 * buffer if rx_priv is greater than min_rx_priv. 3343 */ 3344 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3345 COMPENSATE_HALF_MPS_NUM * half_mps; 3346 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3347 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3348 if (rx_priv < min_rx_priv) 3349 return false; 3350 3351 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3352 priv = &buf_alloc->priv_buf[i]; 3353 priv->enable = 0; 3354 priv->wl.low = 0; 3355 priv->wl.high = 0; 3356 priv->buf_size = 0; 3357 3358 if (!(hw->hw_tc_map & BIT(i))) 3359 continue; 3360 3361 priv->enable = 1; 3362 priv->buf_size = rx_priv; 3363 priv->wl.high = rx_priv - pf->dv_buf_size; 3364 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3365 } 3366 3367 buf_alloc->s_buf.buf_size = 0; 3368 3369 return true; 3370 } 3371 3372 /* 3373 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3374 * @hw: pointer to struct hns3_hw 3375 * @buf_alloc: pointer to buffer calculation data 3376 * @return: 0: calculate successful, negative: fail 3377 */ 3378 static int 3379 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3380 { 3381 /* When DCB is not supported, rx private buffer is not allocated. */ 3382 if (!hns3_dev_get_support(hw, DCB)) { 3383 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3384 struct hns3_pf *pf = &hns->pf; 3385 uint32_t rx_all = pf->pkt_buf_size; 3386 3387 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3388 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3389 return -ENOMEM; 3390 3391 return 0; 3392 } 3393 3394 /* 3395 * Try to allocate privated packet buffer for all TCs without share 3396 * buffer. 3397 */ 3398 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3399 return 0; 3400 3401 /* 3402 * Try to allocate privated packet buffer for all TCs with share 3403 * buffer. 3404 */ 3405 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3406 return 0; 3407 3408 /* 3409 * For different application scenes, the enabled port number, TC number 3410 * and no_drop TC number are different. In order to obtain the better 3411 * performance, software could allocate the buffer size and configure 3412 * the waterline by trying to decrease the private buffer size according 3413 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 3414 * enabled tc. 3415 */ 3416 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3417 return 0; 3418 3419 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3420 return 0; 3421 3422 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3423 return 0; 3424 3425 return -ENOMEM; 3426 } 3427 3428 static int 3429 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3430 { 3431 struct hns3_rx_priv_buff_cmd *req; 3432 struct hns3_cmd_desc desc; 3433 uint32_t buf_size; 3434 int ret; 3435 int i; 3436 3437 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3438 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3439 3440 /* Alloc private buffer TCs */ 3441 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3442 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3443 3444 req->buf_num[i] = 3445 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3446 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3447 } 3448 3449 buf_size = buf_alloc->s_buf.buf_size; 3450 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3451 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3452 3453 ret = hns3_cmd_send(hw, &desc, 1); 3454 if (ret) 3455 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3456 3457 return ret; 3458 } 3459 3460 static int 3461 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3462 { 3463 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3464 struct hns3_rx_priv_wl_buf *req; 3465 struct hns3_priv_buf *priv; 3466 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3467 int i, j; 3468 int ret; 3469 3470 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3471 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3472 false); 3473 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3474 3475 /* The first descriptor set the NEXT bit to 1 */ 3476 if (i == 0) 3477 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3478 else 3479 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3480 3481 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3482 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3483 3484 priv = &buf_alloc->priv_buf[idx]; 3485 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3486 HNS3_BUF_UNIT_S); 3487 req->tc_wl[j].high |= 3488 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3489 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3490 HNS3_BUF_UNIT_S); 3491 req->tc_wl[j].low |= 3492 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3493 } 3494 } 3495 3496 /* Send 2 descriptor at one time */ 3497 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 3498 if (ret) 3499 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 3500 ret); 3501 return ret; 3502 } 3503 3504 static int 3505 hns3_common_thrd_config(struct hns3_hw *hw, 3506 struct hns3_pkt_buf_alloc *buf_alloc) 3507 { 3508 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 3509 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 3510 struct hns3_rx_com_thrd *req; 3511 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 3512 struct hns3_tc_thrd *tc; 3513 int tc_idx; 3514 int i, j; 3515 int ret; 3516 3517 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 3518 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 3519 false); 3520 req = (struct hns3_rx_com_thrd *)&desc[i].data; 3521 3522 /* The first descriptor set the NEXT bit to 1 */ 3523 if (i == 0) 3524 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3525 else 3526 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3527 3528 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3529 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 3530 tc = &s_buf->tc_thrd[tc_idx]; 3531 3532 req->com_thrd[j].high = 3533 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 3534 req->com_thrd[j].high |= 3535 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3536 req->com_thrd[j].low = 3537 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 3538 req->com_thrd[j].low |= 3539 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3540 } 3541 } 3542 3543 /* Send 2 descriptors at one time */ 3544 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 3545 if (ret) 3546 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 3547 3548 return ret; 3549 } 3550 3551 static int 3552 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3553 { 3554 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 3555 struct hns3_rx_com_wl *req; 3556 struct hns3_cmd_desc desc; 3557 int ret; 3558 3559 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 3560 3561 req = (struct hns3_rx_com_wl *)desc.data; 3562 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 3563 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3564 3565 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 3566 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3567 3568 ret = hns3_cmd_send(hw, &desc, 1); 3569 if (ret) 3570 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 3571 3572 return ret; 3573 } 3574 3575 int 3576 hns3_buffer_alloc(struct hns3_hw *hw) 3577 { 3578 struct hns3_pkt_buf_alloc pkt_buf; 3579 int ret; 3580 3581 memset(&pkt_buf, 0, sizeof(pkt_buf)); 3582 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 3583 if (ret) { 3584 PMD_INIT_LOG(ERR, 3585 "could not calc tx buffer size for all TCs %d", 3586 ret); 3587 return ret; 3588 } 3589 3590 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 3591 if (ret) { 3592 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 3593 return ret; 3594 } 3595 3596 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 3597 if (ret) { 3598 PMD_INIT_LOG(ERR, 3599 "could not calc rx priv buffer size for all TCs %d", 3600 ret); 3601 return ret; 3602 } 3603 3604 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 3605 if (ret) { 3606 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 3607 return ret; 3608 } 3609 3610 if (hns3_dev_get_support(hw, DCB)) { 3611 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 3612 if (ret) { 3613 PMD_INIT_LOG(ERR, 3614 "could not configure rx private waterline %d", 3615 ret); 3616 return ret; 3617 } 3618 3619 ret = hns3_common_thrd_config(hw, &pkt_buf); 3620 if (ret) { 3621 PMD_INIT_LOG(ERR, 3622 "could not configure common threshold %d", 3623 ret); 3624 return ret; 3625 } 3626 } 3627 3628 ret = hns3_common_wl_config(hw, &pkt_buf); 3629 if (ret) 3630 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 3631 ret); 3632 3633 return ret; 3634 } 3635 3636 static int 3637 hns3_mac_init(struct hns3_hw *hw) 3638 { 3639 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3640 struct hns3_mac *mac = &hw->mac; 3641 struct hns3_pf *pf = &hns->pf; 3642 int ret; 3643 3644 pf->support_sfp_query = true; 3645 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 3646 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 3647 if (ret) { 3648 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 3649 return ret; 3650 } 3651 3652 mac->link_status = RTE_ETH_LINK_DOWN; 3653 3654 return hns3_config_mtu(hw, pf->mps); 3655 } 3656 3657 static int 3658 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 3659 { 3660 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 3661 #define HNS3_ETHERTYPE_ALREADY_ADD 1 3662 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 3663 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 3664 int return_status; 3665 3666 if (cmdq_resp) { 3667 PMD_INIT_LOG(ERR, 3668 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.", 3669 cmdq_resp); 3670 return -EIO; 3671 } 3672 3673 switch (resp_code) { 3674 case HNS3_ETHERTYPE_SUCCESS_ADD: 3675 case HNS3_ETHERTYPE_ALREADY_ADD: 3676 return_status = 0; 3677 break; 3678 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 3679 PMD_INIT_LOG(ERR, 3680 "add mac ethertype failed for manager table overflow."); 3681 return_status = -EIO; 3682 break; 3683 case HNS3_ETHERTYPE_KEY_CONFLICT: 3684 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 3685 return_status = -EIO; 3686 break; 3687 default: 3688 PMD_INIT_LOG(ERR, 3689 "add mac ethertype failed for undefined, code=%u.", 3690 resp_code); 3691 return_status = -EIO; 3692 break; 3693 } 3694 3695 return return_status; 3696 } 3697 3698 static int 3699 hns3_add_mgr_tbl(struct hns3_hw *hw, 3700 const struct hns3_mac_mgr_tbl_entry_cmd *req) 3701 { 3702 struct hns3_cmd_desc desc; 3703 uint8_t resp_code; 3704 uint16_t retval; 3705 int ret; 3706 3707 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 3708 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 3709 3710 ret = hns3_cmd_send(hw, &desc, 1); 3711 if (ret) { 3712 PMD_INIT_LOG(ERR, 3713 "add mac ethertype failed for cmd_send, ret =%d.", 3714 ret); 3715 return ret; 3716 } 3717 3718 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 3719 retval = rte_le_to_cpu_16(desc.retval); 3720 3721 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 3722 } 3723 3724 static void 3725 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 3726 int *table_item_num) 3727 { 3728 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 3729 3730 /* 3731 * In current version, we add one item in management table as below: 3732 * 0x0180C200000E -- LLDP MC address 3733 */ 3734 tbl = mgr_table; 3735 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 3736 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 3737 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 3738 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 3739 tbl->i_port_bitmap = 0x1; 3740 *table_item_num = 1; 3741 } 3742 3743 static int 3744 hns3_init_mgr_tbl(struct hns3_hw *hw) 3745 { 3746 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 3747 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 3748 int table_item_num; 3749 int ret; 3750 int i; 3751 3752 memset(mgr_table, 0, sizeof(mgr_table)); 3753 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 3754 for (i = 0; i < table_item_num; i++) { 3755 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 3756 if (ret) { 3757 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 3758 ret); 3759 return ret; 3760 } 3761 } 3762 3763 return 0; 3764 } 3765 3766 static void 3767 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 3768 bool en_mc, bool en_bc, int vport_id) 3769 { 3770 if (!param) 3771 return; 3772 3773 memset(param, 0, sizeof(struct hns3_promisc_param)); 3774 if (en_uc) 3775 param->enable = HNS3_PROMISC_EN_UC; 3776 if (en_mc) 3777 param->enable |= HNS3_PROMISC_EN_MC; 3778 if (en_bc) 3779 param->enable |= HNS3_PROMISC_EN_BC; 3780 param->vf_id = vport_id; 3781 } 3782 3783 static int 3784 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 3785 { 3786 struct hns3_promisc_cfg_cmd *req; 3787 struct hns3_cmd_desc desc; 3788 int ret; 3789 3790 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 3791 3792 req = (struct hns3_promisc_cfg_cmd *)desc.data; 3793 req->vf_id = param->vf_id; 3794 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 3795 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 3796 3797 ret = hns3_cmd_send(hw, &desc, 1); 3798 if (ret) 3799 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 3800 3801 return ret; 3802 } 3803 3804 static int 3805 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 3806 { 3807 struct hns3_promisc_param param; 3808 bool en_bc_pmc = true; 3809 uint8_t vf_id; 3810 3811 /* 3812 * In current version VF is not supported when PF is driven by DPDK 3813 * driver, just need to configure parameters for PF vport. 3814 */ 3815 vf_id = HNS3_PF_FUNC_ID; 3816 3817 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 3818 return hns3_cmd_set_promisc_mode(hw, ¶m); 3819 } 3820 3821 static int 3822 hns3_promisc_init(struct hns3_hw *hw) 3823 { 3824 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3825 struct hns3_pf *pf = &hns->pf; 3826 struct hns3_promisc_param param; 3827 uint16_t func_id; 3828 int ret; 3829 3830 ret = hns3_set_promisc_mode(hw, false, false); 3831 if (ret) { 3832 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 3833 return ret; 3834 } 3835 3836 /* 3837 * In current version VFs are not supported when PF is driven by DPDK 3838 * driver. After PF has been taken over by DPDK, the original VF will 3839 * be invalid. So, there is a possibility of entry residues. It should 3840 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 3841 * during init. 3842 */ 3843 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 3844 hns3_promisc_param_init(¶m, false, false, false, func_id); 3845 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3846 if (ret) { 3847 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 3848 " ret = %d", func_id, ret); 3849 return ret; 3850 } 3851 } 3852 3853 return 0; 3854 } 3855 3856 static void 3857 hns3_promisc_uninit(struct hns3_hw *hw) 3858 { 3859 struct hns3_promisc_param param; 3860 uint16_t func_id; 3861 int ret; 3862 3863 func_id = HNS3_PF_FUNC_ID; 3864 3865 /* 3866 * In current version VFs are not supported when PF is driven by 3867 * DPDK driver, and VFs' promisc mode status has been cleared during 3868 * init and their status will not change. So just clear PF's promisc 3869 * mode status during uninit. 3870 */ 3871 hns3_promisc_param_init(¶m, false, false, false, func_id); 3872 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3873 if (ret) 3874 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 3875 " uninit, ret = %d", ret); 3876 } 3877 3878 static int 3879 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 3880 { 3881 bool allmulti = dev->data->all_multicast ? true : false; 3882 struct hns3_adapter *hns = dev->data->dev_private; 3883 struct hns3_hw *hw = &hns->hw; 3884 uint64_t offloads; 3885 int err; 3886 int ret; 3887 3888 rte_spinlock_lock(&hw->lock); 3889 ret = hns3_set_promisc_mode(hw, true, true); 3890 if (ret) { 3891 rte_spinlock_unlock(&hw->lock); 3892 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 3893 ret); 3894 return ret; 3895 } 3896 3897 /* 3898 * When promiscuous mode was enabled, disable the vlan filter to let 3899 * all packets coming in the receiving direction. 3900 */ 3901 offloads = dev->data->dev_conf.rxmode.offloads; 3902 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3903 ret = hns3_enable_vlan_filter(hns, false); 3904 if (ret) { 3905 hns3_err(hw, "failed to enable promiscuous mode due to " 3906 "failure to disable vlan filter, ret = %d", 3907 ret); 3908 err = hns3_set_promisc_mode(hw, false, allmulti); 3909 if (err) 3910 hns3_err(hw, "failed to restore promiscuous " 3911 "status after disable vlan filter " 3912 "failed during enabling promiscuous " 3913 "mode, ret = %d", ret); 3914 } 3915 } 3916 3917 rte_spinlock_unlock(&hw->lock); 3918 3919 return ret; 3920 } 3921 3922 static int 3923 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 3924 { 3925 bool allmulti = dev->data->all_multicast ? true : false; 3926 struct hns3_adapter *hns = dev->data->dev_private; 3927 struct hns3_hw *hw = &hns->hw; 3928 uint64_t offloads; 3929 int err; 3930 int ret; 3931 3932 /* If now in all_multicast mode, must remain in all_multicast mode. */ 3933 rte_spinlock_lock(&hw->lock); 3934 ret = hns3_set_promisc_mode(hw, false, allmulti); 3935 if (ret) { 3936 rte_spinlock_unlock(&hw->lock); 3937 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 3938 ret); 3939 return ret; 3940 } 3941 /* when promiscuous mode was disabled, restore the vlan filter status */ 3942 offloads = dev->data->dev_conf.rxmode.offloads; 3943 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3944 ret = hns3_enable_vlan_filter(hns, true); 3945 if (ret) { 3946 hns3_err(hw, "failed to disable promiscuous mode due to" 3947 " failure to restore vlan filter, ret = %d", 3948 ret); 3949 err = hns3_set_promisc_mode(hw, true, true); 3950 if (err) 3951 hns3_err(hw, "failed to restore promiscuous " 3952 "status after enabling vlan filter " 3953 "failed during disabling promiscuous " 3954 "mode, ret = %d", ret); 3955 } 3956 } 3957 rte_spinlock_unlock(&hw->lock); 3958 3959 return ret; 3960 } 3961 3962 static int 3963 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 3964 { 3965 struct hns3_adapter *hns = dev->data->dev_private; 3966 struct hns3_hw *hw = &hns->hw; 3967 int ret; 3968 3969 if (dev->data->promiscuous) 3970 return 0; 3971 3972 rte_spinlock_lock(&hw->lock); 3973 ret = hns3_set_promisc_mode(hw, false, true); 3974 rte_spinlock_unlock(&hw->lock); 3975 if (ret) 3976 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 3977 ret); 3978 3979 return ret; 3980 } 3981 3982 static int 3983 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 3984 { 3985 struct hns3_adapter *hns = dev->data->dev_private; 3986 struct hns3_hw *hw = &hns->hw; 3987 int ret; 3988 3989 /* If now in promiscuous mode, must remain in all_multicast mode. */ 3990 if (dev->data->promiscuous) 3991 return 0; 3992 3993 rte_spinlock_lock(&hw->lock); 3994 ret = hns3_set_promisc_mode(hw, false, false); 3995 rte_spinlock_unlock(&hw->lock); 3996 if (ret) 3997 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 3998 ret); 3999 4000 return ret; 4001 } 4002 4003 static int 4004 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4005 { 4006 struct hns3_hw *hw = &hns->hw; 4007 bool allmulti = hw->data->all_multicast ? true : false; 4008 int ret; 4009 4010 if (hw->data->promiscuous) { 4011 ret = hns3_set_promisc_mode(hw, true, true); 4012 if (ret) 4013 hns3_err(hw, "failed to restore promiscuous mode, " 4014 "ret = %d", ret); 4015 return ret; 4016 } 4017 4018 ret = hns3_set_promisc_mode(hw, false, allmulti); 4019 if (ret) 4020 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4021 ret); 4022 return ret; 4023 } 4024 4025 static int 4026 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4027 { 4028 struct hns3_sfp_info_cmd *resp; 4029 uint32_t local_pause, lp_pause; 4030 struct hns3_cmd_desc desc; 4031 int ret; 4032 4033 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4034 resp = (struct hns3_sfp_info_cmd *)desc.data; 4035 resp->query_type = HNS3_ACTIVE_QUERY; 4036 4037 ret = hns3_cmd_send(hw, &desc, 1); 4038 if (ret == -EOPNOTSUPP) { 4039 hns3_warn(hw, "firmware does not support get SFP info," 4040 " ret = %d.", ret); 4041 return ret; 4042 } else if (ret) { 4043 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4044 return ret; 4045 } 4046 4047 /* 4048 * In some case, the speed of MAC obtained from firmware may be 0, it 4049 * shouldn't be set to mac->speed. 4050 */ 4051 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4052 return 0; 4053 4054 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4055 /* 4056 * if resp->supported_speed is 0, it means it's an old version 4057 * firmware, do not update these params. 4058 */ 4059 if (resp->supported_speed) { 4060 mac_info->query_type = HNS3_ACTIVE_QUERY; 4061 mac_info->supported_speed = 4062 rte_le_to_cpu_32(resp->supported_speed); 4063 mac_info->support_autoneg = resp->autoneg_ability; 4064 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED 4065 : RTE_ETH_LINK_AUTONEG; 4066 mac_info->fec_capa = resp->fec_ability; 4067 local_pause = resp->pause_status & HNS3_FIBER_LOCAL_PAUSE_MASK; 4068 lp_pause = (resp->pause_status & HNS3_FIBER_LP_PAUSE_MASK) >> 4069 HNS3_FIBER_LP_PAUSE_S; 4070 mac_info->advertising = 4071 local_pause << HNS3_PHY_LINK_MODE_PAUSE_S; 4072 mac_info->lp_advertising = 4073 lp_pause << HNS3_PHY_LINK_MODE_PAUSE_S; 4074 } else { 4075 mac_info->query_type = HNS3_DEFAULT_QUERY; 4076 } 4077 4078 return 0; 4079 } 4080 4081 static uint8_t 4082 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4083 { 4084 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M)) 4085 duplex = RTE_ETH_LINK_FULL_DUPLEX; 4086 4087 return duplex; 4088 } 4089 4090 static int 4091 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4092 { 4093 struct hns3_mac *mac = &hw->mac; 4094 int ret; 4095 4096 duplex = hns3_check_speed_dup(duplex, speed); 4097 if (mac->link_speed == speed && mac->link_duplex == duplex) 4098 return 0; 4099 4100 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4101 if (ret) 4102 return ret; 4103 4104 ret = hns3_port_shaper_update(hw, speed); 4105 if (ret) 4106 return ret; 4107 4108 mac->link_speed = speed; 4109 mac->link_duplex = duplex; 4110 4111 return 0; 4112 } 4113 4114 static int 4115 hns3_update_fiber_link_info(struct hns3_hw *hw) 4116 { 4117 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4118 struct hns3_mac *mac = &hw->mac; 4119 struct hns3_mac mac_info; 4120 int ret; 4121 4122 /* If firmware do not support get SFP/qSFP speed, return directly */ 4123 if (!pf->support_sfp_query) 4124 return 0; 4125 4126 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4127 ret = hns3_get_sfp_info(hw, &mac_info); 4128 if (ret == -EOPNOTSUPP) { 4129 pf->support_sfp_query = false; 4130 return ret; 4131 } else if (ret) 4132 return ret; 4133 4134 /* Do nothing if no SFP */ 4135 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE) 4136 return 0; 4137 4138 /* 4139 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4140 * to reconfigure the speed of MAC. Otherwise, it indicates 4141 * that the current firmware only supports to obtain the 4142 * speed of the SFP, and the speed of MAC needs to reconfigure. 4143 */ 4144 mac->query_type = mac_info.query_type; 4145 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4146 if (mac_info.link_speed != mac->link_speed) { 4147 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4148 if (ret) 4149 return ret; 4150 } 4151 4152 mac->link_speed = mac_info.link_speed; 4153 mac->supported_speed = mac_info.supported_speed; 4154 mac->support_autoneg = mac_info.support_autoneg; 4155 mac->link_autoneg = mac_info.link_autoneg; 4156 mac->fec_capa = mac_info.fec_capa; 4157 mac->advertising = mac_info.advertising; 4158 mac->lp_advertising = mac_info.lp_advertising; 4159 4160 return 0; 4161 } 4162 4163 /* Config full duplex for SFP */ 4164 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4165 RTE_ETH_LINK_FULL_DUPLEX); 4166 } 4167 4168 static void 4169 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4170 { 4171 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4172 4173 struct hns3_phy_params_bd0_cmd *req; 4174 uint32_t supported; 4175 4176 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4177 mac->link_speed = rte_le_to_cpu_32(req->speed); 4178 mac->link_duplex = hns3_get_bit(req->duplex, 4179 HNS3_PHY_DUPLEX_CFG_B); 4180 mac->link_autoneg = hns3_get_bit(req->autoneg, 4181 HNS3_PHY_AUTONEG_CFG_B); 4182 mac->advertising = rte_le_to_cpu_32(req->advertising); 4183 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4184 supported = rte_le_to_cpu_32(req->supported); 4185 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4186 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4187 } 4188 4189 static int 4190 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4191 { 4192 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4193 uint16_t i; 4194 int ret; 4195 4196 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4197 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4198 true); 4199 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4200 } 4201 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4202 4203 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4204 if (ret) { 4205 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4206 return ret; 4207 } 4208 4209 hns3_parse_copper_phy_params(desc, mac); 4210 4211 return 0; 4212 } 4213 4214 static int 4215 hns3_update_copper_link_info(struct hns3_hw *hw) 4216 { 4217 struct hns3_mac *mac = &hw->mac; 4218 struct hns3_mac mac_info; 4219 int ret; 4220 4221 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4222 ret = hns3_get_copper_phy_params(hw, &mac_info); 4223 if (ret) 4224 return ret; 4225 4226 if (mac_info.link_speed != mac->link_speed) { 4227 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4228 if (ret) 4229 return ret; 4230 } 4231 4232 mac->link_speed = mac_info.link_speed; 4233 mac->link_duplex = mac_info.link_duplex; 4234 mac->link_autoneg = mac_info.link_autoneg; 4235 mac->supported_speed = mac_info.supported_speed; 4236 mac->advertising = mac_info.advertising; 4237 mac->lp_advertising = mac_info.lp_advertising; 4238 mac->support_autoneg = mac_info.support_autoneg; 4239 4240 return 0; 4241 } 4242 4243 static int 4244 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4245 { 4246 struct hns3_adapter *hns = eth_dev->data->dev_private; 4247 struct hns3_hw *hw = &hns->hw; 4248 4249 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4250 return hns3_update_copper_link_info(hw); 4251 4252 return hns3_update_fiber_link_info(hw); 4253 } 4254 4255 static int 4256 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4257 { 4258 struct hns3_config_mac_mode_cmd *req; 4259 struct hns3_cmd_desc desc; 4260 uint32_t loop_en = 0; 4261 uint8_t val = 0; 4262 int ret; 4263 4264 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4265 4266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4267 if (enable) 4268 val = 1; 4269 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4270 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4271 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4272 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4273 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4274 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4275 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4276 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4277 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4278 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4279 4280 /* 4281 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4282 * when receiving frames. Otherwise, CRC will be stripped. 4283 */ 4284 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 4285 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4286 else 4287 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4288 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4289 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4290 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4291 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4292 4293 ret = hns3_cmd_send(hw, &desc, 1); 4294 if (ret) 4295 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4296 4297 return ret; 4298 } 4299 4300 static int 4301 hns3_get_mac_link_status(struct hns3_hw *hw) 4302 { 4303 struct hns3_link_status_cmd *req; 4304 struct hns3_cmd_desc desc; 4305 int link_status; 4306 int ret; 4307 4308 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4309 ret = hns3_cmd_send(hw, &desc, 1); 4310 if (ret) { 4311 hns3_err(hw, "get link status cmd failed %d", ret); 4312 return RTE_ETH_LINK_DOWN; 4313 } 4314 4315 req = (struct hns3_link_status_cmd *)desc.data; 4316 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4317 4318 return !!link_status; 4319 } 4320 4321 static bool 4322 hns3_update_link_status(struct hns3_hw *hw) 4323 { 4324 int state; 4325 4326 state = hns3_get_mac_link_status(hw); 4327 if (state != hw->mac.link_status) { 4328 hw->mac.link_status = state; 4329 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4330 return true; 4331 } 4332 4333 return false; 4334 } 4335 4336 void 4337 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4338 { 4339 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4340 struct rte_eth_link new_link; 4341 int ret; 4342 4343 if (query) 4344 hns3_update_port_link_info(dev); 4345 4346 memset(&new_link, 0, sizeof(new_link)); 4347 hns3_setup_linkstatus(dev, &new_link); 4348 4349 ret = rte_eth_linkstatus_set(dev, &new_link); 4350 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4351 hns3_start_report_lse(dev); 4352 } 4353 4354 static void 4355 hns3_service_handler(void *param) 4356 { 4357 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4358 struct hns3_adapter *hns = eth_dev->data->dev_private; 4359 struct hns3_hw *hw = &hns->hw; 4360 4361 if (!hns3_is_reset_pending(hns)) { 4362 hns3_update_linkstatus_and_event(hw, true); 4363 hns3_update_hw_stats(hw); 4364 } else { 4365 hns3_warn(hw, "Cancel the query when reset is pending"); 4366 } 4367 4368 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4369 } 4370 4371 static int 4372 hns3_init_hardware(struct hns3_adapter *hns) 4373 { 4374 struct hns3_hw *hw = &hns->hw; 4375 int ret; 4376 4377 /* 4378 * All queue-related HW operations must be performed after the TCAM 4379 * table is configured. 4380 */ 4381 ret = hns3_map_tqp(hw); 4382 if (ret) { 4383 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4384 return ret; 4385 } 4386 4387 ret = hns3_init_umv_space(hw); 4388 if (ret) { 4389 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4390 return ret; 4391 } 4392 4393 ret = hns3_mac_init(hw); 4394 if (ret) { 4395 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4396 goto err_mac_init; 4397 } 4398 4399 ret = hns3_init_mgr_tbl(hw); 4400 if (ret) { 4401 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4402 goto err_mac_init; 4403 } 4404 4405 ret = hns3_promisc_init(hw); 4406 if (ret) { 4407 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4408 ret); 4409 goto err_mac_init; 4410 } 4411 4412 ret = hns3_init_vlan_config(hns); 4413 if (ret) { 4414 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4415 goto err_mac_init; 4416 } 4417 4418 ret = hns3_dcb_init(hw); 4419 if (ret) { 4420 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4421 goto err_mac_init; 4422 } 4423 4424 ret = hns3_init_fd_config(hns); 4425 if (ret) { 4426 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4427 goto err_mac_init; 4428 } 4429 4430 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4431 if (ret) { 4432 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4433 goto err_mac_init; 4434 } 4435 4436 ret = hns3_config_gro(hw, false); 4437 if (ret) { 4438 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4439 goto err_mac_init; 4440 } 4441 4442 /* 4443 * In the initialization clearing the all hardware mapping relationship 4444 * configurations between queues and interrupt vectors is needed, so 4445 * some error caused by the residual configurations, such as the 4446 * unexpected interrupt, can be avoid. 4447 */ 4448 ret = hns3_init_ring_with_vector(hw); 4449 if (ret) { 4450 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4451 goto err_mac_init; 4452 } 4453 4454 ret = hns3_ptp_init(hw); 4455 if (ret) { 4456 PMD_INIT_LOG(ERR, "Failed to init PTP, ret = %d", ret); 4457 goto err_mac_init; 4458 } 4459 4460 return 0; 4461 4462 err_mac_init: 4463 hns3_uninit_umv_space(hw); 4464 return ret; 4465 } 4466 4467 static int 4468 hns3_clear_hw(struct hns3_hw *hw) 4469 { 4470 struct hns3_cmd_desc desc; 4471 int ret; 4472 4473 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4474 4475 ret = hns3_cmd_send(hw, &desc, 1); 4476 if (ret && ret != -EOPNOTSUPP) 4477 return ret; 4478 4479 return 0; 4480 } 4481 4482 static void 4483 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4484 { 4485 uint32_t val; 4486 4487 /* 4488 * The new firmware support report more hardware error types by 4489 * msix mode. These errors are defined as RAS errors in hardware 4490 * and belong to a different type from the MSI-x errors processed 4491 * by the network driver. 4492 * 4493 * Network driver should open the new error report on initialization. 4494 */ 4495 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4496 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4497 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4498 } 4499 4500 static uint32_t 4501 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 4502 { 4503 struct hns3_mac *mac = &hw->mac; 4504 4505 switch (mac->link_speed) { 4506 case RTE_ETH_SPEED_NUM_1G: 4507 return HNS3_FIBER_LINK_SPEED_1G_BIT; 4508 case RTE_ETH_SPEED_NUM_10G: 4509 return HNS3_FIBER_LINK_SPEED_10G_BIT; 4510 case RTE_ETH_SPEED_NUM_25G: 4511 return HNS3_FIBER_LINK_SPEED_25G_BIT; 4512 case RTE_ETH_SPEED_NUM_40G: 4513 return HNS3_FIBER_LINK_SPEED_40G_BIT; 4514 case RTE_ETH_SPEED_NUM_50G: 4515 return HNS3_FIBER_LINK_SPEED_50G_BIT; 4516 case RTE_ETH_SPEED_NUM_100G: 4517 return HNS3_FIBER_LINK_SPEED_100G_BIT; 4518 case RTE_ETH_SPEED_NUM_200G: 4519 return HNS3_FIBER_LINK_SPEED_200G_BIT; 4520 default: 4521 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 4522 return 0; 4523 } 4524 } 4525 4526 /* 4527 * Validity of supported_speed for fiber and copper media type can be 4528 * guaranteed by the following policy: 4529 * Copper: 4530 * Although the initialization of the phy in the firmware may not be 4531 * completed, the firmware can guarantees that the supported_speed is 4532 * an valid value. 4533 * Firber: 4534 * If the version of firmware supports the active query way of the 4535 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 4536 * through it. If unsupported, use the SFP's speed as the value of the 4537 * supported_speed. 4538 */ 4539 static int 4540 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 4541 { 4542 struct hns3_adapter *hns = eth_dev->data->dev_private; 4543 struct hns3_hw *hw = &hns->hw; 4544 struct hns3_mac *mac = &hw->mac; 4545 int ret; 4546 4547 ret = hns3_update_link_info(eth_dev); 4548 if (ret) 4549 return ret; 4550 4551 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER || 4552 mac->media_type == HNS3_MEDIA_TYPE_BACKPLANE) { 4553 /* 4554 * Some firmware does not support the report of supported_speed, 4555 * and only report the effective speed of SFP/backplane. In this 4556 * case, it is necessary to use the SFP/backplane's speed as the 4557 * supported_speed. 4558 */ 4559 if (mac->supported_speed == 0) 4560 mac->supported_speed = 4561 hns3_set_firber_default_support_speed(hw); 4562 } 4563 4564 return 0; 4565 } 4566 4567 static int 4568 hns3_init_pf(struct rte_eth_dev *eth_dev) 4569 { 4570 struct rte_device *dev = eth_dev->device; 4571 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4572 struct hns3_adapter *hns = eth_dev->data->dev_private; 4573 struct hns3_hw *hw = &hns->hw; 4574 int ret; 4575 4576 PMD_INIT_FUNC_TRACE(); 4577 4578 /* Get hardware io base address from pcie BAR2 IO space */ 4579 hw->io_base = pci_dev->mem_resource[2].addr; 4580 4581 ret = hns3_get_pci_revision_id(hw, &hw->revision); 4582 if (ret) 4583 return ret; 4584 4585 /* Firmware command queue initialize */ 4586 ret = hns3_cmd_init_queue(hw); 4587 if (ret) { 4588 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 4589 goto err_cmd_init_queue; 4590 } 4591 4592 hns3_clear_all_event_cause(hw); 4593 4594 /* Firmware command initialize */ 4595 ret = hns3_cmd_init(hw); 4596 if (ret) { 4597 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 4598 goto err_cmd_init; 4599 } 4600 4601 hns3_tx_push_init(eth_dev); 4602 4603 /* 4604 * To ensure that the hardware environment is clean during 4605 * initialization, the driver actively clear the hardware environment 4606 * during initialization, including PF and corresponding VFs' vlan, mac, 4607 * flow table configurations, etc. 4608 */ 4609 ret = hns3_clear_hw(hw); 4610 if (ret) { 4611 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 4612 goto err_cmd_init; 4613 } 4614 4615 hns3_config_all_msix_error(hw, true); 4616 4617 ret = rte_intr_callback_register(pci_dev->intr_handle, 4618 hns3_interrupt_handler, 4619 eth_dev); 4620 if (ret) { 4621 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 4622 goto err_intr_callback_register; 4623 } 4624 4625 /* Enable interrupt */ 4626 rte_intr_enable(pci_dev->intr_handle); 4627 hns3_pf_enable_irq0(hw); 4628 4629 /* Get configuration */ 4630 ret = hns3_get_configuration(hw); 4631 if (ret) { 4632 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 4633 goto err_get_config; 4634 } 4635 4636 ret = hns3_stats_init(hw); 4637 if (ret) 4638 goto err_get_config; 4639 4640 ret = hns3_init_hardware(hns); 4641 if (ret) { 4642 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 4643 goto err_init_hw; 4644 } 4645 4646 /* Initialize flow director filter list & hash */ 4647 ret = hns3_fdir_filter_init(hns); 4648 if (ret) { 4649 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 4650 goto err_fdir; 4651 } 4652 4653 hns3_rss_set_default_args(hw); 4654 4655 ret = hns3_enable_hw_error_intr(hns, true); 4656 if (ret) { 4657 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 4658 ret); 4659 goto err_enable_intr; 4660 } 4661 4662 ret = hns3_get_port_supported_speed(eth_dev); 4663 if (ret) { 4664 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 4665 "by device, ret = %d.", ret); 4666 goto err_supported_speed; 4667 } 4668 4669 hns3_tm_conf_init(eth_dev); 4670 4671 return 0; 4672 4673 err_supported_speed: 4674 (void)hns3_enable_hw_error_intr(hns, false); 4675 err_enable_intr: 4676 hns3_fdir_filter_uninit(hns); 4677 err_fdir: 4678 hns3_uninit_umv_space(hw); 4679 hns3_ptp_uninit(hw); 4680 err_init_hw: 4681 hns3_stats_uninit(hw); 4682 err_get_config: 4683 hns3_pf_disable_irq0(hw); 4684 rte_intr_disable(pci_dev->intr_handle); 4685 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4686 eth_dev); 4687 err_intr_callback_register: 4688 err_cmd_init: 4689 hns3_cmd_uninit(hw); 4690 hns3_cmd_destroy_queue(hw); 4691 err_cmd_init_queue: 4692 hw->io_base = NULL; 4693 4694 return ret; 4695 } 4696 4697 static void 4698 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 4699 { 4700 struct hns3_adapter *hns = eth_dev->data->dev_private; 4701 struct rte_device *dev = eth_dev->device; 4702 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4703 struct hns3_hw *hw = &hns->hw; 4704 4705 PMD_INIT_FUNC_TRACE(); 4706 4707 hns3_tm_conf_uninit(eth_dev); 4708 hns3_enable_hw_error_intr(hns, false); 4709 hns3_rss_uninit(hns); 4710 (void)hns3_config_gro(hw, false); 4711 hns3_promisc_uninit(hw); 4712 hns3_flow_uninit(eth_dev); 4713 hns3_fdir_filter_uninit(hns); 4714 hns3_uninit_umv_space(hw); 4715 hns3_ptp_uninit(hw); 4716 hns3_stats_uninit(hw); 4717 hns3_config_mac_tnl_int(hw, false); 4718 hns3_pf_disable_irq0(hw); 4719 rte_intr_disable(pci_dev->intr_handle); 4720 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4721 eth_dev); 4722 hns3_config_all_msix_error(hw, false); 4723 hns3_cmd_uninit(hw); 4724 hns3_cmd_destroy_queue(hw); 4725 hw->io_base = NULL; 4726 } 4727 4728 static uint32_t 4729 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 4730 { 4731 uint32_t speed_bit; 4732 4733 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4734 case RTE_ETH_LINK_SPEED_10M: 4735 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 4736 break; 4737 case RTE_ETH_LINK_SPEED_10M_HD: 4738 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 4739 break; 4740 case RTE_ETH_LINK_SPEED_100M: 4741 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 4742 break; 4743 case RTE_ETH_LINK_SPEED_100M_HD: 4744 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 4745 break; 4746 case RTE_ETH_LINK_SPEED_1G: 4747 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 4748 break; 4749 default: 4750 speed_bit = 0; 4751 break; 4752 } 4753 4754 return speed_bit; 4755 } 4756 4757 static uint32_t 4758 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 4759 { 4760 uint32_t speed_bit; 4761 4762 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4763 case RTE_ETH_LINK_SPEED_1G: 4764 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 4765 break; 4766 case RTE_ETH_LINK_SPEED_10G: 4767 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 4768 break; 4769 case RTE_ETH_LINK_SPEED_25G: 4770 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 4771 break; 4772 case RTE_ETH_LINK_SPEED_40G: 4773 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 4774 break; 4775 case RTE_ETH_LINK_SPEED_50G: 4776 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 4777 break; 4778 case RTE_ETH_LINK_SPEED_100G: 4779 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 4780 break; 4781 case RTE_ETH_LINK_SPEED_200G: 4782 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 4783 break; 4784 default: 4785 speed_bit = 0; 4786 break; 4787 } 4788 4789 return speed_bit; 4790 } 4791 4792 static int 4793 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 4794 { 4795 struct hns3_mac *mac = &hw->mac; 4796 uint32_t supported_speed = mac->supported_speed; 4797 uint32_t speed_bit = 0; 4798 4799 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 4800 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 4801 else 4802 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 4803 4804 if (!(speed_bit & supported_speed)) { 4805 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 4806 link_speeds); 4807 return -EINVAL; 4808 } 4809 4810 return 0; 4811 } 4812 4813 static uint32_t 4814 hns3_get_link_speed(uint32_t link_speeds) 4815 { 4816 uint32_t speed = RTE_ETH_SPEED_NUM_NONE; 4817 4818 if (link_speeds & RTE_ETH_LINK_SPEED_10M || 4819 link_speeds & RTE_ETH_LINK_SPEED_10M_HD) 4820 speed = RTE_ETH_SPEED_NUM_10M; 4821 if (link_speeds & RTE_ETH_LINK_SPEED_100M || 4822 link_speeds & RTE_ETH_LINK_SPEED_100M_HD) 4823 speed = RTE_ETH_SPEED_NUM_100M; 4824 if (link_speeds & RTE_ETH_LINK_SPEED_1G) 4825 speed = RTE_ETH_SPEED_NUM_1G; 4826 if (link_speeds & RTE_ETH_LINK_SPEED_10G) 4827 speed = RTE_ETH_SPEED_NUM_10G; 4828 if (link_speeds & RTE_ETH_LINK_SPEED_25G) 4829 speed = RTE_ETH_SPEED_NUM_25G; 4830 if (link_speeds & RTE_ETH_LINK_SPEED_40G) 4831 speed = RTE_ETH_SPEED_NUM_40G; 4832 if (link_speeds & RTE_ETH_LINK_SPEED_50G) 4833 speed = RTE_ETH_SPEED_NUM_50G; 4834 if (link_speeds & RTE_ETH_LINK_SPEED_100G) 4835 speed = RTE_ETH_SPEED_NUM_100G; 4836 if (link_speeds & RTE_ETH_LINK_SPEED_200G) 4837 speed = RTE_ETH_SPEED_NUM_200G; 4838 4839 return speed; 4840 } 4841 4842 static uint8_t 4843 hns3_get_link_duplex(uint32_t link_speeds) 4844 { 4845 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 4846 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 4847 return RTE_ETH_LINK_HALF_DUPLEX; 4848 else 4849 return RTE_ETH_LINK_FULL_DUPLEX; 4850 } 4851 4852 static int 4853 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 4854 struct hns3_set_link_speed_cfg *cfg) 4855 { 4856 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4857 struct hns3_phy_params_bd0_cmd *req; 4858 uint16_t i; 4859 4860 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4861 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4862 false); 4863 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4864 } 4865 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 4866 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4867 req->autoneg = cfg->autoneg; 4868 4869 /* 4870 * The full speed capability is used to negotiate when 4871 * auto-negotiation is enabled. 4872 */ 4873 if (cfg->autoneg) { 4874 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 4875 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 4876 HNS3_PHY_LINK_SPEED_100M_BIT | 4877 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 4878 HNS3_PHY_LINK_SPEED_1000M_BIT; 4879 } else { 4880 req->speed = cfg->speed; 4881 req->duplex = cfg->duplex; 4882 } 4883 4884 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4885 } 4886 4887 static int 4888 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 4889 { 4890 struct hns3_config_auto_neg_cmd *req; 4891 struct hns3_cmd_desc desc; 4892 uint32_t flag = 0; 4893 int ret; 4894 4895 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 4896 4897 req = (struct hns3_config_auto_neg_cmd *)desc.data; 4898 if (enable) 4899 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 4900 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 4901 4902 ret = hns3_cmd_send(hw, &desc, 1); 4903 if (ret) 4904 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 4905 4906 return ret; 4907 } 4908 4909 static int 4910 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 4911 struct hns3_set_link_speed_cfg *cfg) 4912 { 4913 int ret; 4914 4915 if (hw->mac.support_autoneg) { 4916 ret = hns3_set_autoneg(hw, cfg->autoneg); 4917 if (ret) { 4918 hns3_err(hw, "failed to configure auto-negotiation."); 4919 return ret; 4920 } 4921 4922 /* 4923 * To enable auto-negotiation, we only need to open the switch 4924 * of auto-negotiation, then firmware sets all speed 4925 * capabilities. 4926 */ 4927 if (cfg->autoneg) 4928 return 0; 4929 } 4930 4931 /* 4932 * Some hardware doesn't support auto-negotiation, but users may not 4933 * configure link_speeds (default 0), which means auto-negotiation. 4934 * In this case, a warning message need to be printed, instead of 4935 * an error. 4936 */ 4937 if (cfg->autoneg) { 4938 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 4939 return 0; 4940 } 4941 4942 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 4943 } 4944 4945 const char * 4946 hns3_get_media_type_name(uint8_t media_type) 4947 { 4948 if (media_type == HNS3_MEDIA_TYPE_FIBER) 4949 return "fiber"; 4950 else if (media_type == HNS3_MEDIA_TYPE_COPPER) 4951 return "copper"; 4952 else if (media_type == HNS3_MEDIA_TYPE_BACKPLANE) 4953 return "backplane"; 4954 else 4955 return "unknown"; 4956 } 4957 4958 static int 4959 hns3_set_port_link_speed(struct hns3_hw *hw, 4960 struct hns3_set_link_speed_cfg *cfg) 4961 { 4962 int ret; 4963 4964 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4965 ret = hns3_set_copper_port_link_speed(hw, cfg); 4966 else 4967 ret = hns3_set_fiber_port_link_speed(hw, cfg); 4968 4969 if (ret) { 4970 hns3_err(hw, "failed to set %s port link speed, ret = %d.", 4971 hns3_get_media_type_name(hw->mac.media_type), 4972 ret); 4973 return ret; 4974 } 4975 4976 return 0; 4977 } 4978 4979 static int 4980 hns3_apply_link_speed(struct hns3_hw *hw) 4981 { 4982 struct rte_eth_conf *conf = &hw->data->dev_conf; 4983 struct hns3_set_link_speed_cfg cfg; 4984 4985 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 4986 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ? 4987 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 4988 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) { 4989 cfg.speed = hns3_get_link_speed(conf->link_speeds); 4990 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 4991 } 4992 4993 return hns3_set_port_link_speed(hw, &cfg); 4994 } 4995 4996 static int 4997 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 4998 { 4999 struct hns3_hw *hw = &hns->hw; 5000 bool link_en; 5001 int ret; 5002 5003 ret = hns3_update_queue_map_configure(hns); 5004 if (ret) { 5005 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5006 ret); 5007 return ret; 5008 } 5009 5010 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5011 ret = hns3_tm_conf_update(hw); 5012 if (ret) { 5013 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5014 return ret; 5015 } 5016 5017 hns3_enable_rxd_adv_layout(hw); 5018 5019 ret = hns3_init_queues(hns, reset_queue); 5020 if (ret) { 5021 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5022 return ret; 5023 } 5024 5025 link_en = hw->set_link_down ? false : true; 5026 ret = hns3_cfg_mac_mode(hw, link_en); 5027 if (ret) { 5028 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5029 goto err_config_mac_mode; 5030 } 5031 5032 ret = hns3_apply_link_speed(hw); 5033 if (ret) 5034 goto err_set_link_speed; 5035 5036 return hns3_restore_filter(hns); 5037 5038 err_set_link_speed: 5039 (void)hns3_cfg_mac_mode(hw, false); 5040 5041 err_config_mac_mode: 5042 hns3_dev_release_mbufs(hns); 5043 /* 5044 * Here is exception handling, hns3_reset_all_tqps will have the 5045 * corresponding error message if it is handled incorrectly, so it is 5046 * not necessary to check hns3_reset_all_tqps return value, here keep 5047 * ret as the error code causing the exception. 5048 */ 5049 (void)hns3_reset_all_tqps(hns); 5050 return ret; 5051 } 5052 5053 static int 5054 hns3_dev_start(struct rte_eth_dev *dev) 5055 { 5056 struct hns3_adapter *hns = dev->data->dev_private; 5057 struct hns3_hw *hw = &hns->hw; 5058 bool old_state = hw->set_link_down; 5059 int ret; 5060 5061 PMD_INIT_FUNC_TRACE(); 5062 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5063 return -EBUSY; 5064 5065 rte_spinlock_lock(&hw->lock); 5066 hw->adapter_state = HNS3_NIC_STARTING; 5067 5068 /* 5069 * If the dev_set_link_down() API has been called, the "set_link_down" 5070 * flag can be cleared by dev_start() API. In addition, the flag should 5071 * also be cleared before calling hns3_do_start() so that MAC can be 5072 * enabled in dev_start stage. 5073 */ 5074 hw->set_link_down = false; 5075 ret = hns3_do_start(hns, true); 5076 if (ret) 5077 goto do_start_fail; 5078 5079 ret = hns3_map_rx_interrupt(dev); 5080 if (ret) 5081 goto map_rx_inter_err; 5082 5083 /* 5084 * There are three register used to control the status of a TQP 5085 * (contains a pair of Tx queue and Rx queue) in the new version network 5086 * engine. One is used to control the enabling of Tx queue, the other is 5087 * used to control the enabling of Rx queue, and the last is the master 5088 * switch used to control the enabling of the tqp. The Tx register and 5089 * TQP register must be enabled at the same time to enable a Tx queue. 5090 * The same applies to the Rx queue. For the older network engine, this 5091 * function only refresh the enabled flag, and it is used to update the 5092 * status of queue in the dpdk framework. 5093 */ 5094 ret = hns3_start_all_txqs(dev); 5095 if (ret) 5096 goto map_rx_inter_err; 5097 5098 ret = hns3_start_all_rxqs(dev); 5099 if (ret) 5100 goto start_all_rxqs_fail; 5101 5102 hw->adapter_state = HNS3_NIC_STARTED; 5103 rte_spinlock_unlock(&hw->lock); 5104 5105 hns3_rx_scattered_calc(dev); 5106 hns3_start_rxtx_datapath(dev); 5107 5108 /* Enable interrupt of all rx queues before enabling queues */ 5109 hns3_dev_all_rx_queue_intr_enable(hw, true); 5110 5111 /* 5112 * After finished the initialization, enable tqps to receive/transmit 5113 * packets and refresh all queue status. 5114 */ 5115 hns3_start_tqps(hw); 5116 5117 hns3_tm_dev_start_proc(hw); 5118 5119 if (dev->data->dev_conf.intr_conf.lsc != 0) 5120 hns3_dev_link_update(dev, 0); 5121 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5122 5123 hns3_info(hw, "hns3 dev start successful!"); 5124 5125 return 0; 5126 5127 start_all_rxqs_fail: 5128 hns3_stop_all_txqs(dev); 5129 map_rx_inter_err: 5130 (void)hns3_do_stop(hns); 5131 do_start_fail: 5132 hw->set_link_down = old_state; 5133 hw->adapter_state = HNS3_NIC_CONFIGURED; 5134 rte_spinlock_unlock(&hw->lock); 5135 5136 return ret; 5137 } 5138 5139 static int 5140 hns3_do_stop(struct hns3_adapter *hns) 5141 { 5142 struct hns3_hw *hw = &hns->hw; 5143 int ret; 5144 5145 /* 5146 * The "hns3_do_stop" function will also be called by .stop_service to 5147 * prepare reset. At the time of global or IMP reset, the command cannot 5148 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5149 * accessed during the reset process. So the mbuf can not be released 5150 * during reset and is required to be released after the reset is 5151 * completed. 5152 */ 5153 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5154 hns3_dev_release_mbufs(hns); 5155 5156 ret = hns3_cfg_mac_mode(hw, false); 5157 if (ret) 5158 return ret; 5159 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5160 5161 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5162 hns3_configure_all_mac_addr(hns, true); 5163 ret = hns3_reset_all_tqps(hns); 5164 if (ret) { 5165 hns3_err(hw, "failed to reset all queues ret = %d.", 5166 ret); 5167 return ret; 5168 } 5169 } 5170 5171 return 0; 5172 } 5173 5174 static int 5175 hns3_dev_stop(struct rte_eth_dev *dev) 5176 { 5177 struct hns3_adapter *hns = dev->data->dev_private; 5178 struct hns3_hw *hw = &hns->hw; 5179 5180 PMD_INIT_FUNC_TRACE(); 5181 dev->data->dev_started = 0; 5182 5183 hw->adapter_state = HNS3_NIC_STOPPING; 5184 hns3_stop_rxtx_datapath(dev); 5185 5186 rte_spinlock_lock(&hw->lock); 5187 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5188 hns3_tm_dev_stop_proc(hw); 5189 hns3_config_mac_tnl_int(hw, false); 5190 hns3_stop_tqps(hw); 5191 hns3_do_stop(hns); 5192 hns3_unmap_rx_interrupt(dev); 5193 hw->adapter_state = HNS3_NIC_CONFIGURED; 5194 } 5195 hns3_rx_scattered_reset(dev); 5196 rte_eal_alarm_cancel(hns3_service_handler, dev); 5197 hns3_stop_report_lse(dev); 5198 rte_spinlock_unlock(&hw->lock); 5199 5200 return 0; 5201 } 5202 5203 static int 5204 hns3_dev_close(struct rte_eth_dev *eth_dev) 5205 { 5206 struct hns3_adapter *hns = eth_dev->data->dev_private; 5207 struct hns3_hw *hw = &hns->hw; 5208 int ret = 0; 5209 5210 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5211 hns3_mp_uninit(eth_dev); 5212 return 0; 5213 } 5214 5215 if (hw->adapter_state == HNS3_NIC_STARTED) 5216 ret = hns3_dev_stop(eth_dev); 5217 5218 hw->adapter_state = HNS3_NIC_CLOSING; 5219 hns3_reset_abort(hns); 5220 hw->adapter_state = HNS3_NIC_CLOSED; 5221 5222 hns3_configure_all_mc_mac_addr(hns, true); 5223 hns3_remove_all_vlan_table(hns); 5224 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5225 hns3_uninit_pf(eth_dev); 5226 hns3_free_all_queues(eth_dev); 5227 rte_free(hw->reset.wait_data); 5228 hns3_mp_uninit(eth_dev); 5229 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5230 5231 return ret; 5232 } 5233 5234 static void 5235 hns3_get_autoneg_rxtx_pause(struct hns3_hw *hw, bool *rx_pause, bool *tx_pause) 5236 { 5237 struct hns3_mac *mac = &hw->mac; 5238 uint32_t advertising = mac->advertising; 5239 uint32_t lp_advertising = mac->lp_advertising; 5240 *rx_pause = false; 5241 *tx_pause = false; 5242 5243 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5244 *rx_pause = true; 5245 *tx_pause = true; 5246 } else if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5247 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5248 *rx_pause = true; 5249 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5250 *tx_pause = true; 5251 } 5252 } 5253 5254 static enum hns3_fc_mode 5255 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5256 { 5257 enum hns3_fc_mode current_mode; 5258 bool rx_pause = false; 5259 bool tx_pause = false; 5260 5261 hns3_get_autoneg_rxtx_pause(hw, &rx_pause, &tx_pause); 5262 5263 if (rx_pause && tx_pause) 5264 current_mode = HNS3_FC_FULL; 5265 else if (rx_pause) 5266 current_mode = HNS3_FC_RX_PAUSE; 5267 else if (tx_pause) 5268 current_mode = HNS3_FC_TX_PAUSE; 5269 else 5270 current_mode = HNS3_FC_NONE; 5271 5272 return current_mode; 5273 } 5274 5275 static enum hns3_fc_mode 5276 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 5277 { 5278 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5279 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5280 struct hns3_mac *mac = &hw->mac; 5281 5282 /* 5283 * When the flow control mode is obtained, the device may not complete 5284 * auto-negotiation. It is necessary to wait for link establishment. 5285 */ 5286 (void)hns3_dev_link_update(dev, 1); 5287 5288 /* 5289 * If the link auto-negotiation of the nic is disabled, or the flow 5290 * control auto-negotiation is not supported, the forced flow control 5291 * mode is used. 5292 */ 5293 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 5294 return hw->requested_fc_mode; 5295 5296 return hns3_get_autoneg_fc_mode(hw); 5297 } 5298 5299 int 5300 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5301 { 5302 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5303 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5304 enum hns3_fc_mode current_mode; 5305 5306 current_mode = hns3_get_current_fc_mode(dev); 5307 switch (current_mode) { 5308 case HNS3_FC_FULL: 5309 fc_conf->mode = RTE_ETH_FC_FULL; 5310 break; 5311 case HNS3_FC_TX_PAUSE: 5312 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 5313 break; 5314 case HNS3_FC_RX_PAUSE: 5315 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 5316 break; 5317 case HNS3_FC_NONE: 5318 default: 5319 fc_conf->mode = RTE_ETH_FC_NONE; 5320 break; 5321 } 5322 5323 fc_conf->pause_time = pf->pause_time; 5324 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 5325 5326 return 0; 5327 } 5328 5329 static int 5330 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 5331 { 5332 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5333 5334 if (!pf->support_fc_autoneg) { 5335 if (autoneg != 0) { 5336 hns3_err(hw, "unsupported fc auto-negotiation."); 5337 return -EOPNOTSUPP; 5338 } 5339 5340 return 0; 5341 } 5342 5343 /* 5344 * If flow control auto-negotiation of the NIC is supported, all 5345 * auto-negotiation features are supported. 5346 */ 5347 if (autoneg != hw->mac.link_autoneg) { 5348 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 5349 return -EOPNOTSUPP; 5350 } 5351 5352 return 0; 5353 } 5354 5355 static int 5356 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5357 { 5358 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5359 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5360 int ret; 5361 5362 if (fc_conf->high_water || fc_conf->low_water || 5363 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 5364 hns3_err(hw, "Unsupported flow control settings specified, " 5365 "high_water(%u), low_water(%u), send_xon(%u) and " 5366 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5367 fc_conf->high_water, fc_conf->low_water, 5368 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 5369 return -EINVAL; 5370 } 5371 5372 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 5373 if (ret) 5374 return ret; 5375 5376 if (!fc_conf->pause_time) { 5377 hns3_err(hw, "Invalid pause time %u setting.", 5378 fc_conf->pause_time); 5379 return -EINVAL; 5380 } 5381 5382 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5383 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 5384 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 5385 "current_fc_status = %d", hw->current_fc_status); 5386 return -EOPNOTSUPP; 5387 } 5388 5389 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 5390 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 5391 return -EOPNOTSUPP; 5392 } 5393 5394 rte_spinlock_lock(&hw->lock); 5395 ret = hns3_fc_enable(dev, fc_conf); 5396 rte_spinlock_unlock(&hw->lock); 5397 5398 return ret; 5399 } 5400 5401 static int 5402 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 5403 struct rte_eth_pfc_conf *pfc_conf) 5404 { 5405 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5406 int ret; 5407 5408 if (!hns3_dev_get_support(hw, DCB)) { 5409 hns3_err(hw, "This port does not support dcb configurations."); 5410 return -EOPNOTSUPP; 5411 } 5412 5413 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 5414 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 5415 hns3_err(hw, "Unsupported flow control settings specified, " 5416 "high_water(%u), low_water(%u), send_xon(%u) and " 5417 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5418 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 5419 pfc_conf->fc.send_xon, 5420 pfc_conf->fc.mac_ctrl_frame_fwd); 5421 return -EINVAL; 5422 } 5423 if (pfc_conf->fc.autoneg) { 5424 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5425 return -EINVAL; 5426 } 5427 if (pfc_conf->fc.pause_time == 0) { 5428 hns3_err(hw, "Invalid pause time %u setting.", 5429 pfc_conf->fc.pause_time); 5430 return -EINVAL; 5431 } 5432 5433 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5434 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 5435 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 5436 "current_fc_status = %d", hw->current_fc_status); 5437 return -EOPNOTSUPP; 5438 } 5439 5440 rte_spinlock_lock(&hw->lock); 5441 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 5442 rte_spinlock_unlock(&hw->lock); 5443 5444 return ret; 5445 } 5446 5447 static int 5448 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 5449 { 5450 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5451 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5452 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 5453 int i; 5454 5455 rte_spinlock_lock(&hw->lock); 5456 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 5457 dcb_info->nb_tcs = pf->local_max_tc; 5458 else 5459 dcb_info->nb_tcs = 1; 5460 5461 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 5462 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 5463 for (i = 0; i < dcb_info->nb_tcs; i++) 5464 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 5465 5466 for (i = 0; i < hw->num_tc; i++) { 5467 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 5468 dcb_info->tc_queue.tc_txq[0][i].base = 5469 hw->tc_queue[i].tqp_offset; 5470 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 5471 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 5472 hw->tc_queue[i].tqp_count; 5473 } 5474 rte_spinlock_unlock(&hw->lock); 5475 5476 return 0; 5477 } 5478 5479 static int 5480 hns3_reinit_dev(struct hns3_adapter *hns) 5481 { 5482 struct hns3_hw *hw = &hns->hw; 5483 int ret; 5484 5485 ret = hns3_cmd_init(hw); 5486 if (ret) { 5487 hns3_err(hw, "Failed to init cmd: %d", ret); 5488 return ret; 5489 } 5490 5491 ret = hns3_init_hardware(hns); 5492 if (ret) { 5493 hns3_err(hw, "Failed to init hardware: %d", ret); 5494 return ret; 5495 } 5496 5497 ret = hns3_reset_all_tqps(hns); 5498 if (ret) { 5499 hns3_err(hw, "Failed to reset all queues: %d", ret); 5500 return ret; 5501 } 5502 5503 ret = hns3_enable_hw_error_intr(hns, true); 5504 if (ret) { 5505 hns3_err(hw, "fail to enable hw error interrupts: %d", 5506 ret); 5507 return ret; 5508 } 5509 hns3_info(hw, "Reset done, driver initialization finished."); 5510 5511 return 0; 5512 } 5513 5514 static bool 5515 is_pf_reset_done(struct hns3_hw *hw) 5516 { 5517 uint32_t val, reg, reg_bit; 5518 5519 switch (hw->reset.level) { 5520 case HNS3_IMP_RESET: 5521 reg = HNS3_GLOBAL_RESET_REG; 5522 reg_bit = HNS3_IMP_RESET_BIT; 5523 break; 5524 case HNS3_GLOBAL_RESET: 5525 reg = HNS3_GLOBAL_RESET_REG; 5526 reg_bit = HNS3_GLOBAL_RESET_BIT; 5527 break; 5528 case HNS3_FUNC_RESET: 5529 reg = HNS3_FUN_RST_ING; 5530 reg_bit = HNS3_FUN_RST_ING_B; 5531 break; 5532 case HNS3_FLR_RESET: 5533 default: 5534 hns3_err(hw, "Wait for unsupported reset level: %d", 5535 hw->reset.level); 5536 return true; 5537 } 5538 val = hns3_read_dev(hw, reg); 5539 if (hns3_get_bit(val, reg_bit)) 5540 return false; 5541 else 5542 return true; 5543 } 5544 5545 static enum hns3_reset_level 5546 hns3_detect_reset_event(struct hns3_hw *hw) 5547 { 5548 enum hns3_reset_level new_req = HNS3_NONE_RESET; 5549 uint32_t vector0_intr_state; 5550 5551 vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 5552 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) 5553 new_req = HNS3_IMP_RESET; 5554 else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) 5555 new_req = HNS3_GLOBAL_RESET; 5556 5557 return new_req; 5558 } 5559 5560 bool 5561 hns3_is_reset_pending(struct hns3_adapter *hns) 5562 { 5563 enum hns3_reset_level new_req; 5564 struct hns3_hw *hw = &hns->hw; 5565 enum hns3_reset_level last_req; 5566 5567 /* 5568 * Only primary can process can process the reset event, 5569 * so don't check reset event in secondary. 5570 */ 5571 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5572 return false; 5573 5574 new_req = hns3_detect_reset_event(hw); 5575 if (new_req == HNS3_NONE_RESET) 5576 return false; 5577 5578 last_req = hns3_get_reset_level(hns, &hw->reset.pending); 5579 if (last_req == HNS3_NONE_RESET || last_req < new_req) { 5580 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 5581 hns3_schedule_delayed_reset(hns); 5582 hns3_warn(hw, "High level reset detected, delay do reset"); 5583 return true; 5584 } 5585 last_req = hns3_get_reset_level(hns, &hw->reset.request); 5586 if (last_req != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 5587 hw->reset.level < last_req) { 5588 hns3_warn(hw, "High level reset %d is request", last_req); 5589 return true; 5590 } 5591 return false; 5592 } 5593 5594 static int 5595 hns3_wait_hardware_ready(struct hns3_adapter *hns) 5596 { 5597 struct hns3_hw *hw = &hns->hw; 5598 struct hns3_wait_data *wait_data = hw->reset.wait_data; 5599 struct timeval tv; 5600 5601 if (wait_data->result == HNS3_WAIT_SUCCESS) 5602 return 0; 5603 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 5604 hns3_clock_gettime(&tv); 5605 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 5606 tv.tv_sec, tv.tv_usec); 5607 return -ETIME; 5608 } else if (wait_data->result == HNS3_WAIT_REQUEST) 5609 return -EAGAIN; 5610 5611 wait_data->hns = hns; 5612 wait_data->check_completion = is_pf_reset_done; 5613 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 5614 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 5615 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 5616 wait_data->count = HNS3_RESET_WAIT_CNT; 5617 wait_data->result = HNS3_WAIT_REQUEST; 5618 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 5619 return -EAGAIN; 5620 } 5621 5622 static int 5623 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 5624 { 5625 struct hns3_cmd_desc desc; 5626 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 5627 5628 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 5629 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 5630 req->fun_reset_vfid = func_id; 5631 5632 return hns3_cmd_send(hw, &desc, 1); 5633 } 5634 5635 static void 5636 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 5637 { 5638 struct hns3_hw *hw = &hns->hw; 5639 struct timeval tv; 5640 uint32_t val; 5641 5642 hns3_clock_gettime(&tv); 5643 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 5644 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 5645 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 5646 tv.tv_sec, tv.tv_usec); 5647 return; 5648 } 5649 5650 switch (reset_level) { 5651 case HNS3_IMP_RESET: 5652 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5653 hns3_set_bit(val, HNS3_VECTOR0_TRIGGER_IMP_RESET_B, 1); 5654 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5655 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 5656 tv.tv_sec, tv.tv_usec); 5657 break; 5658 case HNS3_GLOBAL_RESET: 5659 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 5660 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 5661 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 5662 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 5663 tv.tv_sec, tv.tv_usec); 5664 break; 5665 case HNS3_FUNC_RESET: 5666 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 5667 tv.tv_sec, tv.tv_usec); 5668 /* schedule again to check later */ 5669 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 5670 hns3_schedule_reset(hns); 5671 break; 5672 default: 5673 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 5674 return; 5675 } 5676 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 5677 } 5678 5679 static enum hns3_reset_level 5680 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 5681 { 5682 struct hns3_hw *hw = &hns->hw; 5683 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 5684 5685 /* Return the highest priority reset level amongst all */ 5686 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 5687 reset_level = HNS3_IMP_RESET; 5688 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 5689 reset_level = HNS3_GLOBAL_RESET; 5690 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 5691 reset_level = HNS3_FUNC_RESET; 5692 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 5693 reset_level = HNS3_FLR_RESET; 5694 5695 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 5696 return HNS3_NONE_RESET; 5697 5698 return reset_level; 5699 } 5700 5701 static void 5702 hns3_record_imp_error(struct hns3_adapter *hns) 5703 { 5704 struct hns3_hw *hw = &hns->hw; 5705 uint32_t reg_val; 5706 5707 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5708 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 5709 hns3_warn(hw, "Detected IMP RD poison!"); 5710 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 5711 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5712 } 5713 5714 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 5715 hns3_warn(hw, "Detected IMP CMDQ error!"); 5716 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 5717 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5718 } 5719 } 5720 5721 static int 5722 hns3_prepare_reset(struct hns3_adapter *hns) 5723 { 5724 struct hns3_hw *hw = &hns->hw; 5725 uint32_t reg_val; 5726 int ret; 5727 5728 switch (hw->reset.level) { 5729 case HNS3_FUNC_RESET: 5730 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 5731 if (ret) 5732 return ret; 5733 5734 /* 5735 * After performaning pf reset, it is not necessary to do the 5736 * mailbox handling or send any command to firmware, because 5737 * any mailbox handling or command to firmware is only valid 5738 * after hns3_cmd_init is called. 5739 */ 5740 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 5741 hw->reset.stats.request_cnt++; 5742 break; 5743 case HNS3_IMP_RESET: 5744 hns3_record_imp_error(hns); 5745 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5746 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 5747 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 5748 break; 5749 default: 5750 break; 5751 } 5752 return 0; 5753 } 5754 5755 static int 5756 hns3_set_rst_done(struct hns3_hw *hw) 5757 { 5758 struct hns3_pf_rst_done_cmd *req; 5759 struct hns3_cmd_desc desc; 5760 5761 req = (struct hns3_pf_rst_done_cmd *)desc.data; 5762 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 5763 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 5764 return hns3_cmd_send(hw, &desc, 1); 5765 } 5766 5767 static int 5768 hns3_stop_service(struct hns3_adapter *hns) 5769 { 5770 struct hns3_hw *hw = &hns->hw; 5771 struct rte_eth_dev *eth_dev; 5772 5773 eth_dev = &rte_eth_devices[hw->data->port_id]; 5774 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5775 if (hw->adapter_state == HNS3_NIC_STARTED) { 5776 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 5777 hns3_update_linkstatus_and_event(hw, false); 5778 } 5779 hns3_stop_rxtx_datapath(eth_dev); 5780 5781 rte_spinlock_lock(&hw->lock); 5782 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 5783 hw->adapter_state == HNS3_NIC_STOPPING) { 5784 hns3_enable_all_queues(hw, false); 5785 hns3_do_stop(hns); 5786 hw->reset.mbuf_deferred_free = true; 5787 } else 5788 hw->reset.mbuf_deferred_free = false; 5789 5790 /* 5791 * It is cumbersome for hardware to pick-and-choose entries for deletion 5792 * from table space. Hence, for function reset software intervention is 5793 * required to delete the entries 5794 */ 5795 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 5796 hns3_configure_all_mc_mac_addr(hns, true); 5797 rte_spinlock_unlock(&hw->lock); 5798 5799 return 0; 5800 } 5801 5802 static int 5803 hns3_start_service(struct hns3_adapter *hns) 5804 { 5805 struct hns3_hw *hw = &hns->hw; 5806 struct rte_eth_dev *eth_dev; 5807 5808 if (hw->reset.level == HNS3_IMP_RESET || 5809 hw->reset.level == HNS3_GLOBAL_RESET) 5810 hns3_set_rst_done(hw); 5811 eth_dev = &rte_eth_devices[hw->data->port_id]; 5812 hns3_start_rxtx_datapath(eth_dev); 5813 if (hw->adapter_state == HNS3_NIC_STARTED) { 5814 /* 5815 * This API parent function already hold the hns3_hw.lock, the 5816 * hns3_service_handler may report lse, in bonding application 5817 * it will call driver's ops which may acquire the hns3_hw.lock 5818 * again, thus lead to deadlock. 5819 * We defer calls hns3_service_handler to avoid the deadlock. 5820 */ 5821 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 5822 hns3_service_handler, eth_dev); 5823 5824 /* Enable interrupt of all rx queues before enabling queues */ 5825 hns3_dev_all_rx_queue_intr_enable(hw, true); 5826 /* 5827 * Enable state of each rxq and txq will be recovered after 5828 * reset, so we need to restore them before enable all tqps; 5829 */ 5830 hns3_restore_tqp_enable_state(hw); 5831 /* 5832 * When finished the initialization, enable queues to receive 5833 * and transmit packets. 5834 */ 5835 hns3_enable_all_queues(hw, true); 5836 } 5837 5838 return 0; 5839 } 5840 5841 static int 5842 hns3_restore_conf(struct hns3_adapter *hns) 5843 { 5844 struct hns3_hw *hw = &hns->hw; 5845 int ret; 5846 5847 ret = hns3_configure_all_mac_addr(hns, false); 5848 if (ret) 5849 return ret; 5850 5851 ret = hns3_configure_all_mc_mac_addr(hns, false); 5852 if (ret) 5853 goto err_mc_mac; 5854 5855 ret = hns3_dev_promisc_restore(hns); 5856 if (ret) 5857 goto err_promisc; 5858 5859 ret = hns3_restore_vlan_table(hns); 5860 if (ret) 5861 goto err_promisc; 5862 5863 ret = hns3_restore_vlan_conf(hns); 5864 if (ret) 5865 goto err_promisc; 5866 5867 ret = hns3_restore_ptp(hns); 5868 if (ret) 5869 goto err_promisc; 5870 5871 ret = hns3_restore_rx_interrupt(hw); 5872 if (ret) 5873 goto err_promisc; 5874 5875 ret = hns3_restore_gro_conf(hw); 5876 if (ret) 5877 goto err_promisc; 5878 5879 ret = hns3_restore_fec(hw); 5880 if (ret) 5881 goto err_promisc; 5882 5883 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 5884 ret = hns3_do_start(hns, false); 5885 if (ret) 5886 goto err_promisc; 5887 hns3_info(hw, "hns3 dev restart successful!"); 5888 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 5889 hw->adapter_state = HNS3_NIC_CONFIGURED; 5890 return 0; 5891 5892 err_promisc: 5893 hns3_configure_all_mc_mac_addr(hns, true); 5894 err_mc_mac: 5895 hns3_configure_all_mac_addr(hns, true); 5896 return ret; 5897 } 5898 5899 static void 5900 hns3_reset_service(void *param) 5901 { 5902 struct hns3_adapter *hns = (struct hns3_adapter *)param; 5903 struct hns3_hw *hw = &hns->hw; 5904 enum hns3_reset_level reset_level; 5905 struct timeval tv_delta; 5906 struct timeval tv_start; 5907 struct timeval tv; 5908 uint64_t msec; 5909 int ret; 5910 5911 /* 5912 * The interrupt is not triggered within the delay time. 5913 * The interrupt may have been lost. It is necessary to handle 5914 * the interrupt to recover from the error. 5915 */ 5916 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 5917 SCHEDULE_DEFERRED) { 5918 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 5919 __ATOMIC_RELAXED); 5920 hns3_err(hw, "Handling interrupts in delayed tasks"); 5921 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 5922 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5923 if (reset_level == HNS3_NONE_RESET) { 5924 hns3_err(hw, "No reset level is set, try IMP reset"); 5925 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 5926 } 5927 } 5928 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 5929 5930 /* 5931 * Check if there is any ongoing reset in the hardware. This status can 5932 * be checked from reset_pending. If there is then, we need to wait for 5933 * hardware to complete reset. 5934 * a. If we are able to figure out in reasonable time that hardware 5935 * has fully resetted then, we can proceed with driver, client 5936 * reset. 5937 * b. else, we can come back later to check this status so re-sched 5938 * now. 5939 */ 5940 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5941 if (reset_level != HNS3_NONE_RESET) { 5942 hns3_clock_gettime(&tv_start); 5943 ret = hns3_reset_process(hns, reset_level); 5944 hns3_clock_gettime(&tv); 5945 timersub(&tv, &tv_start, &tv_delta); 5946 msec = hns3_clock_calctime_ms(&tv_delta); 5947 if (msec > HNS3_RESET_PROCESS_MS) 5948 hns3_err(hw, "%d handle long time delta %" PRIu64 " ms time=%ld.%.6ld", 5949 hw->reset.level, msec, 5950 tv.tv_sec, tv.tv_usec); 5951 if (ret == -EAGAIN) 5952 return; 5953 } 5954 5955 /* Check if we got any *new* reset requests to be honored */ 5956 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 5957 if (reset_level != HNS3_NONE_RESET) 5958 hns3_msix_process(hns, reset_level); 5959 } 5960 5961 static uint32_t 5962 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 5963 uint32_t speed_capa) 5964 { 5965 uint32_t speed_bit; 5966 uint32_t num = 0; 5967 uint32_t i; 5968 5969 for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { 5970 speed_bit = 5971 rte_eth_speed_bitflag(speed_fec_capa_tbl[i].speed, 5972 RTE_ETH_LINK_FULL_DUPLEX); 5973 if ((speed_capa & speed_bit) == 0) 5974 continue; 5975 5976 speed_fec_capa[num].speed = speed_fec_capa_tbl[i].speed; 5977 speed_fec_capa[num].capa = speed_fec_capa_tbl[i].capa; 5978 num++; 5979 } 5980 5981 return num; 5982 } 5983 5984 static int 5985 hns3_fec_get_capability(struct rte_eth_dev *dev, 5986 struct rte_eth_fec_capa *speed_fec_capa, 5987 unsigned int num) 5988 { 5989 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5990 unsigned int speed_num; 5991 uint32_t speed_capa; 5992 5993 speed_capa = hns3_get_speed_capa(hw); 5994 /* speed_num counts number of speed capabilities */ 5995 speed_num = rte_popcount32(speed_capa & HNS3_SPEEDS_SUPP_FEC); 5996 if (speed_num == 0) 5997 return -ENOTSUP; 5998 5999 if (speed_fec_capa == NULL) 6000 return speed_num; 6001 6002 if (num < speed_num) { 6003 hns3_err(hw, "not enough array size(%u) to store FEC capabilities, should not be less than %u", 6004 num, speed_num); 6005 return -EINVAL; 6006 } 6007 6008 return hns3_get_speed_fec_capa(speed_fec_capa, speed_capa); 6009 } 6010 6011 6012 static int 6013 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6014 { 6015 struct hns3_config_fec_cmd *req; 6016 struct hns3_cmd_desc desc; 6017 int ret; 6018 6019 /* 6020 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6021 * in device of link speed 6022 * below 10 Gbps. 6023 */ 6024 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) { 6025 *state = 0; 6026 return 0; 6027 } 6028 6029 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6030 req = (struct hns3_config_fec_cmd *)desc.data; 6031 ret = hns3_cmd_send(hw, &desc, 1); 6032 if (ret) { 6033 hns3_err(hw, "get current fec auto state failed, ret = %d", 6034 ret); 6035 return ret; 6036 } 6037 6038 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6039 return 0; 6040 } 6041 6042 static int 6043 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6044 { 6045 struct hns3_sfp_info_cmd *resp; 6046 uint32_t tmp_fec_capa; 6047 uint8_t auto_state; 6048 struct hns3_cmd_desc desc; 6049 int ret; 6050 6051 /* 6052 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6053 * configured FEC mode is returned. 6054 * If link is up, current FEC mode is returned. 6055 */ 6056 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) { 6057 ret = get_current_fec_auto_state(hw, &auto_state); 6058 if (ret) 6059 return ret; 6060 6061 if (auto_state == 0x1) { 6062 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6063 return 0; 6064 } 6065 } 6066 6067 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6068 resp = (struct hns3_sfp_info_cmd *)desc.data; 6069 resp->query_type = HNS3_ACTIVE_QUERY; 6070 6071 ret = hns3_cmd_send(hw, &desc, 1); 6072 if (ret == -EOPNOTSUPP) { 6073 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6074 return ret; 6075 } else if (ret) { 6076 hns3_err(hw, "get FEC failed, ret = %d", ret); 6077 return ret; 6078 } 6079 6080 /* 6081 * FEC mode order defined in hns3 hardware is inconsistent with 6082 * that defined in the ethdev library. So the sequence needs 6083 * to be converted. 6084 */ 6085 switch (resp->active_fec) { 6086 case HNS3_MAC_FEC_OFF: 6087 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6088 break; 6089 case HNS3_MAC_FEC_BASER: 6090 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6091 break; 6092 case HNS3_MAC_FEC_RS: 6093 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6094 break; 6095 case HNS3_MAC_FEC_LLRS: 6096 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(LLRS); 6097 break; 6098 default: 6099 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6100 break; 6101 } 6102 6103 *fec_capa = tmp_fec_capa; 6104 return 0; 6105 } 6106 6107 static int 6108 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6109 { 6110 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6111 6112 return hns3_fec_get_internal(hw, fec_capa); 6113 } 6114 6115 static int 6116 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6117 { 6118 struct hns3_config_fec_cmd *req; 6119 struct hns3_cmd_desc desc; 6120 int ret; 6121 6122 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6123 6124 req = (struct hns3_config_fec_cmd *)desc.data; 6125 switch (mode) { 6126 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6127 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6128 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6129 break; 6130 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6131 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6132 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6133 break; 6134 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6135 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6136 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6137 break; 6138 case RTE_ETH_FEC_MODE_CAPA_MASK(LLRS): 6139 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6140 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_LLRS); 6141 break; 6142 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6143 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6144 break; 6145 default: 6146 return 0; 6147 } 6148 ret = hns3_cmd_send(hw, &desc, 1); 6149 if (ret) 6150 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6151 6152 return ret; 6153 } 6154 6155 static uint32_t 6156 hns3_parse_hw_fec_capa(uint8_t hw_fec_capa) 6157 { 6158 const struct { 6159 uint32_t hw_fec_capa; 6160 uint32_t fec_capa; 6161 } fec_capa_map[] = { 6162 { HNS3_FIBER_FEC_AUTO_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) }, 6163 { HNS3_FIBER_FEC_BASER_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 6164 { HNS3_FIBER_FEC_RS_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 6165 { HNS3_FIBER_FEC_LLRS_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(LLRS) }, 6166 { HNS3_FIBER_FEC_NOFEC_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) }, 6167 }; 6168 uint32_t capa = 0; 6169 uint32_t i; 6170 6171 for (i = 0; i < RTE_DIM(fec_capa_map); i++) { 6172 if ((hw_fec_capa & fec_capa_map[i].hw_fec_capa) != 0) 6173 capa |= fec_capa_map[i].fec_capa; 6174 } 6175 6176 return capa; 6177 } 6178 6179 static uint32_t 6180 hns3_get_current_speed_fec_cap(struct hns3_mac *mac) 6181 { 6182 uint32_t i; 6183 6184 if (mac->fec_capa != 0) 6185 return hns3_parse_hw_fec_capa(mac->fec_capa); 6186 6187 for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { 6188 if (mac->link_speed == speed_fec_capa_tbl[i].speed) 6189 return speed_fec_capa_tbl[i].capa; 6190 } 6191 6192 return 0; 6193 } 6194 6195 static int 6196 hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode) 6197 { 6198 struct hns3_adapter *hns = dev->data->dev_private; 6199 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6200 uint32_t cur_capa; 6201 6202 if (rte_popcount32(mode) != 1) { 6203 hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode); 6204 return -EINVAL; 6205 } 6206 6207 /* 6208 * Check whether the configured mode is within the FEC capability. 6209 * If not, the configured mode will not be supported. 6210 */ 6211 cur_capa = hns3_get_current_speed_fec_cap(&hw->mac); 6212 if ((cur_capa & mode) == 0) { 6213 hns3_err(hw, "unsupported FEC mode(0x%x)", mode); 6214 return -EINVAL; 6215 } 6216 6217 return 0; 6218 } 6219 6220 static int 6221 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6222 { 6223 struct hns3_adapter *hns = dev->data->dev_private; 6224 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6225 struct hns3_pf *pf = &hns->pf; 6226 int ret; 6227 6228 ret = hns3_fec_mode_valid(dev, mode); 6229 if (ret != 0) 6230 return ret; 6231 6232 rte_spinlock_lock(&hw->lock); 6233 ret = hns3_set_fec_hw(hw, mode); 6234 if (ret) { 6235 rte_spinlock_unlock(&hw->lock); 6236 return ret; 6237 } 6238 6239 pf->fec_mode = mode; 6240 rte_spinlock_unlock(&hw->lock); 6241 6242 return 0; 6243 } 6244 6245 static int 6246 hns3_restore_fec(struct hns3_hw *hw) 6247 { 6248 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6249 struct hns3_pf *pf = &hns->pf; 6250 uint32_t mode = pf->fec_mode; 6251 int ret; 6252 6253 ret = hns3_set_fec_hw(hw, mode); 6254 if (ret) 6255 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6256 mode, ret); 6257 6258 return ret; 6259 } 6260 6261 static int 6262 hns3_query_dev_fec_info(struct hns3_hw *hw) 6263 { 6264 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6265 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 6266 int ret; 6267 6268 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 6269 if (ret) 6270 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6271 6272 return ret; 6273 } 6274 6275 static bool 6276 hns3_optical_module_existed(struct hns3_hw *hw) 6277 { 6278 struct hns3_cmd_desc desc; 6279 bool existed; 6280 int ret; 6281 6282 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 6283 ret = hns3_cmd_send(hw, &desc, 1); 6284 if (ret) { 6285 hns3_err(hw, 6286 "fail to get optical module exist state, ret = %d.", 6287 ret); 6288 return false; 6289 } 6290 existed = !!desc.data[0]; 6291 6292 return existed; 6293 } 6294 6295 static int 6296 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 6297 uint32_t len, uint8_t *data) 6298 { 6299 #define HNS3_SFP_INFO_CMD_NUM 6 6300 #define HNS3_SFP_INFO_MAX_LEN \ 6301 (HNS3_SFP_INFO_BD0_LEN + \ 6302 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 6303 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 6304 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 6305 uint16_t read_len; 6306 uint16_t copy_len; 6307 int ret; 6308 int i; 6309 6310 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6311 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 6312 true); 6313 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 6314 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 6315 } 6316 6317 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 6318 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 6319 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 6320 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 6321 6322 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 6323 if (ret) { 6324 hns3_err(hw, "fail to get module EEPROM info, ret = %d.", 6325 ret); 6326 return ret; 6327 } 6328 6329 /* The data format in BD0 is different with the others. */ 6330 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 6331 memcpy(data, sfp_info_bd0->data, copy_len); 6332 read_len = copy_len; 6333 6334 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6335 if (read_len >= len) 6336 break; 6337 6338 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 6339 memcpy(data + read_len, desc[i].data, copy_len); 6340 read_len += copy_len; 6341 } 6342 6343 return (int)read_len; 6344 } 6345 6346 static int 6347 hns3_get_module_eeprom(struct rte_eth_dev *dev, 6348 struct rte_dev_eeprom_info *info) 6349 { 6350 struct hns3_adapter *hns = dev->data->dev_private; 6351 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6352 uint32_t offset = info->offset; 6353 uint32_t len = info->length; 6354 uint8_t *data = info->data; 6355 uint32_t read_len = 0; 6356 6357 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 6358 return -ENOTSUP; 6359 6360 if (!hns3_optical_module_existed(hw)) { 6361 hns3_err(hw, "fail to read module EEPROM: no module is connected."); 6362 return -EIO; 6363 } 6364 6365 while (read_len < len) { 6366 int ret; 6367 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 6368 len - read_len, 6369 data + read_len); 6370 if (ret < 0) 6371 return -EIO; 6372 read_len += ret; 6373 } 6374 6375 return 0; 6376 } 6377 6378 static int 6379 hns3_get_module_info(struct rte_eth_dev *dev, 6380 struct rte_eth_dev_module_info *modinfo) 6381 { 6382 #define HNS3_SFF8024_ID_SFP 0x03 6383 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 6384 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 6385 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 6386 #define HNS3_SFF_8636_V1_3 0x03 6387 struct hns3_adapter *hns = dev->data->dev_private; 6388 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6389 struct rte_dev_eeprom_info info; 6390 struct hns3_sfp_type sfp_type; 6391 int ret; 6392 6393 memset(&sfp_type, 0, sizeof(sfp_type)); 6394 memset(&info, 0, sizeof(info)); 6395 info.data = (uint8_t *)&sfp_type; 6396 info.length = sizeof(sfp_type); 6397 ret = hns3_get_module_eeprom(dev, &info); 6398 if (ret) 6399 return ret; 6400 6401 switch (sfp_type.type) { 6402 case HNS3_SFF8024_ID_SFP: 6403 modinfo->type = RTE_ETH_MODULE_SFF_8472; 6404 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 6405 break; 6406 case HNS3_SFF8024_ID_QSFP_8438: 6407 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6408 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6409 break; 6410 case HNS3_SFF8024_ID_QSFP_8436_8636: 6411 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 6412 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6413 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6414 } else { 6415 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6416 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6417 } 6418 break; 6419 case HNS3_SFF8024_ID_QSFP28_8636: 6420 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6421 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6422 break; 6423 default: 6424 hns3_err(hw, "unknown module, type = %u, extra_type = %u.", 6425 sfp_type.type, sfp_type.ext_type); 6426 return -EINVAL; 6427 } 6428 6429 return 0; 6430 } 6431 6432 static const struct eth_dev_ops hns3_eth_dev_ops = { 6433 .dev_configure = hns3_dev_configure, 6434 .dev_start = hns3_dev_start, 6435 .dev_stop = hns3_dev_stop, 6436 .dev_close = hns3_dev_close, 6437 .promiscuous_enable = hns3_dev_promiscuous_enable, 6438 .promiscuous_disable = hns3_dev_promiscuous_disable, 6439 .allmulticast_enable = hns3_dev_allmulticast_enable, 6440 .allmulticast_disable = hns3_dev_allmulticast_disable, 6441 .mtu_set = hns3_dev_mtu_set, 6442 .stats_get = hns3_stats_get, 6443 .stats_reset = hns3_stats_reset, 6444 .xstats_get = hns3_dev_xstats_get, 6445 .xstats_get_names = hns3_dev_xstats_get_names, 6446 .xstats_reset = hns3_dev_xstats_reset, 6447 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 6448 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 6449 .dev_infos_get = hns3_dev_infos_get, 6450 .fw_version_get = hns3_fw_version_get, 6451 .rx_queue_setup = hns3_rx_queue_setup, 6452 .tx_queue_setup = hns3_tx_queue_setup, 6453 .rx_queue_release = hns3_dev_rx_queue_release, 6454 .tx_queue_release = hns3_dev_tx_queue_release, 6455 .rx_queue_start = hns3_dev_rx_queue_start, 6456 .rx_queue_stop = hns3_dev_rx_queue_stop, 6457 .tx_queue_start = hns3_dev_tx_queue_start, 6458 .tx_queue_stop = hns3_dev_tx_queue_stop, 6459 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 6460 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 6461 .rxq_info_get = hns3_rxq_info_get, 6462 .txq_info_get = hns3_txq_info_get, 6463 .rx_burst_mode_get = hns3_rx_burst_mode_get, 6464 .tx_burst_mode_get = hns3_tx_burst_mode_get, 6465 .flow_ctrl_get = hns3_flow_ctrl_get, 6466 .flow_ctrl_set = hns3_flow_ctrl_set, 6467 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 6468 .mac_addr_add = hns3_add_mac_addr, 6469 .mac_addr_remove = hns3_remove_mac_addr, 6470 .mac_addr_set = hns3_set_default_mac_addr, 6471 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 6472 .link_update = hns3_dev_link_update, 6473 .dev_set_link_up = hns3_dev_set_link_up, 6474 .dev_set_link_down = hns3_dev_set_link_down, 6475 .rss_hash_update = hns3_dev_rss_hash_update, 6476 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 6477 .reta_update = hns3_dev_rss_reta_update, 6478 .reta_query = hns3_dev_rss_reta_query, 6479 .flow_ops_get = hns3_dev_flow_ops_get, 6480 .vlan_filter_set = hns3_vlan_filter_set, 6481 .vlan_tpid_set = hns3_vlan_tpid_set, 6482 .vlan_offload_set = hns3_vlan_offload_set, 6483 .vlan_pvid_set = hns3_vlan_pvid_set, 6484 .get_reg = hns3_get_regs, 6485 .get_module_info = hns3_get_module_info, 6486 .get_module_eeprom = hns3_get_module_eeprom, 6487 .get_dcb_info = hns3_get_dcb_info, 6488 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 6489 .fec_get_capability = hns3_fec_get_capability, 6490 .fec_get = hns3_fec_get, 6491 .fec_set = hns3_fec_set, 6492 .tm_ops_get = hns3_tm_ops_get, 6493 .tx_done_cleanup = hns3_tx_done_cleanup, 6494 .timesync_enable = hns3_timesync_enable, 6495 .timesync_disable = hns3_timesync_disable, 6496 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 6497 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 6498 .timesync_adjust_time = hns3_timesync_adjust_time, 6499 .timesync_read_time = hns3_timesync_read_time, 6500 .timesync_write_time = hns3_timesync_write_time, 6501 .eth_dev_priv_dump = hns3_eth_dev_priv_dump, 6502 .eth_rx_descriptor_dump = hns3_rx_descriptor_dump, 6503 .eth_tx_descriptor_dump = hns3_tx_descriptor_dump, 6504 .get_monitor_addr = hns3_get_monitor_addr, 6505 }; 6506 6507 static const struct hns3_reset_ops hns3_reset_ops = { 6508 .reset_service = hns3_reset_service, 6509 .stop_service = hns3_stop_service, 6510 .prepare_reset = hns3_prepare_reset, 6511 .wait_hardware_ready = hns3_wait_hardware_ready, 6512 .reinit_dev = hns3_reinit_dev, 6513 .restore_conf = hns3_restore_conf, 6514 .start_service = hns3_start_service, 6515 }; 6516 6517 static void 6518 hns3_init_hw_ops(struct hns3_hw *hw) 6519 { 6520 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr; 6521 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; 6522 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; 6523 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; 6524 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector; 6525 } 6526 6527 static int 6528 hns3_dev_init(struct rte_eth_dev *eth_dev) 6529 { 6530 struct hns3_adapter *hns = eth_dev->data->dev_private; 6531 struct hns3_hw *hw = &hns->hw; 6532 int ret; 6533 6534 PMD_INIT_FUNC_TRACE(); 6535 6536 hns3_flow_init(eth_dev); 6537 6538 hns3_set_rxtx_function(eth_dev); 6539 eth_dev->dev_ops = &hns3_eth_dev_ops; 6540 eth_dev->rx_queue_count = hns3_rx_queue_count; 6541 ret = hns3_mp_init(eth_dev); 6542 if (ret) 6543 goto err_mp_init; 6544 6545 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6546 hns3_tx_push_init(eth_dev); 6547 return 0; 6548 } 6549 6550 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 6551 hns->is_vf = false; 6552 hw->data = eth_dev->data; 6553 hns3_parse_devargs(eth_dev); 6554 6555 /* 6556 * Set default max packet size according to the mtu 6557 * default vale in DPDK frame. 6558 */ 6559 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 6560 6561 ret = hns3_reset_init(hw); 6562 if (ret) 6563 goto err_init_reset; 6564 hw->reset.ops = &hns3_reset_ops; 6565 6566 hns3_init_hw_ops(hw); 6567 ret = hns3_init_pf(eth_dev); 6568 if (ret) { 6569 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 6570 goto err_init_pf; 6571 } 6572 6573 ret = hns3_init_mac_addrs(eth_dev); 6574 if (ret != 0) 6575 goto err_init_mac_addrs; 6576 6577 hw->adapter_state = HNS3_NIC_INITIALIZED; 6578 6579 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6580 SCHEDULE_PENDING) { 6581 hns3_err(hw, "Reschedule reset service after dev_init"); 6582 hns3_schedule_reset(hns); 6583 } else { 6584 /* IMP will wait ready flag before reset */ 6585 hns3_notify_reset_ready(hw, false); 6586 } 6587 6588 hns3_info(hw, "hns3 dev initialization successful!"); 6589 return 0; 6590 6591 err_init_mac_addrs: 6592 hns3_uninit_pf(eth_dev); 6593 6594 err_init_pf: 6595 rte_free(hw->reset.wait_data); 6596 6597 err_init_reset: 6598 hns3_mp_uninit(eth_dev); 6599 6600 err_mp_init: 6601 eth_dev->dev_ops = NULL; 6602 eth_dev->rx_pkt_burst = NULL; 6603 eth_dev->rx_descriptor_status = NULL; 6604 eth_dev->tx_pkt_burst = NULL; 6605 eth_dev->tx_pkt_prepare = NULL; 6606 eth_dev->tx_descriptor_status = NULL; 6607 return ret; 6608 } 6609 6610 static int 6611 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 6612 { 6613 struct hns3_adapter *hns = eth_dev->data->dev_private; 6614 struct hns3_hw *hw = &hns->hw; 6615 6616 PMD_INIT_FUNC_TRACE(); 6617 6618 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6619 hns3_mp_uninit(eth_dev); 6620 return 0; 6621 } 6622 6623 if (hw->adapter_state < HNS3_NIC_CLOSING) 6624 hns3_dev_close(eth_dev); 6625 6626 hw->adapter_state = HNS3_NIC_REMOVED; 6627 return 0; 6628 } 6629 6630 static int 6631 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6632 struct rte_pci_device *pci_dev) 6633 { 6634 return rte_eth_dev_pci_generic_probe(pci_dev, 6635 sizeof(struct hns3_adapter), 6636 hns3_dev_init); 6637 } 6638 6639 static int 6640 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 6641 { 6642 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 6643 } 6644 6645 static const struct rte_pci_id pci_id_hns3_map[] = { 6646 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 6647 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 6648 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 6649 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 6650 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 6651 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 6652 { .vendor_id = 0, }, /* sentinel */ 6653 }; 6654 6655 static struct rte_pci_driver rte_hns3_pmd = { 6656 .id_table = pci_id_hns3_map, 6657 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 6658 .probe = eth_hns3_pci_probe, 6659 .remove = eth_hns3_pci_remove, 6660 }; 6661 6662 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 6663 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 6664 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 6665 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 6666 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 6667 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 6668 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " 6669 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> "); 6670 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 6671 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 6672 #ifdef RTE_ETHDEV_DEBUG_RX 6673 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_rx, rx, DEBUG); 6674 #endif 6675 #ifdef RTE_ETHDEV_DEBUG_TX 6676 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_tx, tx, DEBUG); 6677 #endif 6678