1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <bus_pci_driver.h> 7 #include <ethdev_pci.h> 8 9 #include "hns3_common.h" 10 #include "hns3_dump.h" 11 #include "hns3_logs.h" 12 #include "hns3_rxtx.h" 13 #include "hns3_intr.h" 14 #include "hns3_regs.h" 15 #include "hns3_dcb.h" 16 #include "hns3_mp.h" 17 #include "hns3_flow.h" 18 #include "hns3_ptp.h" 19 #include "hns3_ethdev.h" 20 21 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 22 #define HNS3_SERVICE_QUICK_INTERVAL 10 23 #define HNS3_INVALID_PVID 0xFFFF 24 25 #define HNS3_FILTER_TYPE_VF 0 26 #define HNS3_FILTER_TYPE_PORT 1 27 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 28 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 29 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 30 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 31 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 32 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_EGRESS_B) 34 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 35 | HNS3_FILTER_FE_ROCE_INGRESS_B) 36 37 /* Reset related Registers */ 38 #define HNS3_GLOBAL_RESET_BIT 0 39 #define HNS3_CORE_RESET_BIT 1 40 #define HNS3_IMP_RESET_BIT 2 41 #define HNS3_FUN_RST_ING_B 0 42 43 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 44 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 45 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 46 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 47 #define HNS3_VECTOR0_TRIGGER_IMP_RESET_B 7U 48 49 #define HNS3_RESET_WAIT_MS 100 50 #define HNS3_RESET_WAIT_CNT 200 51 52 enum hns3_evt_cause { 53 HNS3_VECTOR0_EVENT_RST, 54 HNS3_VECTOR0_EVENT_MBX, 55 HNS3_VECTOR0_EVENT_ERR, 56 HNS3_VECTOR0_EVENT_PTP, 57 HNS3_VECTOR0_EVENT_OTHER, 58 }; 59 60 struct hns3_intr_state { 61 uint32_t vector0_state; 62 uint32_t cmdq_state; 63 uint32_t hw_err_state; 64 }; 65 66 #define HNS3_SPEEDS_SUPP_FEC (RTE_ETH_LINK_SPEED_10G | \ 67 RTE_ETH_LINK_SPEED_25G | \ 68 RTE_ETH_LINK_SPEED_40G | \ 69 RTE_ETH_LINK_SPEED_50G | \ 70 RTE_ETH_LINK_SPEED_100G | \ 71 RTE_ETH_LINK_SPEED_200G) 72 73 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 74 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 75 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 76 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 77 78 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 80 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 81 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 82 83 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 84 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 85 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 86 87 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 88 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 89 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 90 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 91 92 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 93 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 94 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 95 96 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 97 RTE_ETH_FEC_MODE_CAPA_MASK(RS) | 98 RTE_ETH_FEC_MODE_CAPA_MASK(LLRS) } 99 }; 100 101 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 102 RTE_ATOMIC(uint64_t) *levels); 103 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 104 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 105 int on); 106 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 107 static bool hns3_update_link_status(struct hns3_hw *hw); 108 109 static int hns3_add_mc_mac_addr(struct hns3_hw *hw, 110 struct rte_ether_addr *mac_addr); 111 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw, 112 struct rte_ether_addr *mac_addr); 113 static int hns3_restore_fec(struct hns3_hw *hw); 114 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 115 static int hns3_do_stop(struct hns3_adapter *hns); 116 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 117 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 118 119 120 static void 121 hns3_pf_disable_irq0(struct hns3_hw *hw) 122 { 123 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 124 } 125 126 static void 127 hns3_pf_enable_irq0(struct hns3_hw *hw) 128 { 129 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 130 } 131 132 static enum hns3_evt_cause 133 hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) 134 { 135 struct hns3_hw *hw = &hns->hw; 136 137 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 138 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 139 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 140 hw->reset.stats.imp_cnt++; 141 hns3_warn(hw, "IMP reset detected, clear reset status"); 142 143 return HNS3_VECTOR0_EVENT_RST; 144 } 145 146 static enum hns3_evt_cause 147 hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) 148 { 149 struct hns3_hw *hw = &hns->hw; 150 151 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 152 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 153 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 154 hw->reset.stats.global_cnt++; 155 hns3_warn(hw, "Global reset detected, clear reset status"); 156 157 return HNS3_VECTOR0_EVENT_RST; 158 } 159 160 static void 161 hns3_query_intr_state(struct hns3_hw *hw, struct hns3_intr_state *state) 162 { 163 state->vector0_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 164 state->cmdq_state = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 165 state->hw_err_state = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 166 } 167 168 static enum hns3_evt_cause 169 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 170 { 171 struct hns3_hw *hw = &hns->hw; 172 struct hns3_intr_state state; 173 uint32_t val; 174 enum hns3_evt_cause ret; 175 176 hns3_query_intr_state(hw, &state); 177 178 /* 179 * Assumption: If by any chance reset and mailbox events are reported 180 * together then we will only process reset event and defer the 181 * processing of the mailbox events. Since, we would have not cleared 182 * RX CMDQ event this time we would receive again another interrupt 183 * from H/W just for the mailbox. 184 */ 185 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & state.vector0_state) { /* IMP */ 186 ret = hns3_proc_imp_reset_event(hns, &val); 187 goto out; 188 } 189 190 /* Global reset */ 191 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & state.vector0_state) { 192 ret = hns3_proc_global_reset_event(hns, &val); 193 goto out; 194 } 195 196 /* Check for vector0 1588 event source */ 197 if (BIT(HNS3_VECTOR0_1588_INT_B) & state.vector0_state) { 198 val = BIT(HNS3_VECTOR0_1588_INT_B); 199 ret = HNS3_VECTOR0_EVENT_PTP; 200 goto out; 201 } 202 203 /* check for vector0 msix event source */ 204 if (state.vector0_state & HNS3_VECTOR0_REG_MSIX_MASK || 205 state.hw_err_state & HNS3_RAS_REG_NFE_MASK) { 206 val = state.vector0_state | state.hw_err_state; 207 ret = HNS3_VECTOR0_EVENT_ERR; 208 goto out; 209 } 210 211 /* check for vector0 mailbox(=CMDQ RX) event source */ 212 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & state.cmdq_state) { 213 state.cmdq_state &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 214 val = state.cmdq_state; 215 ret = HNS3_VECTOR0_EVENT_MBX; 216 goto out; 217 } 218 219 val = state.vector0_state; 220 ret = HNS3_VECTOR0_EVENT_OTHER; 221 222 out: 223 *clearval = val; 224 return ret; 225 } 226 227 void 228 hns3_clear_reset_event(struct hns3_hw *hw) 229 { 230 uint32_t clearval = 0; 231 232 switch (hw->reset.level) { 233 case HNS3_IMP_RESET: 234 clearval = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 235 break; 236 case HNS3_GLOBAL_RESET: 237 clearval = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 238 break; 239 default: 240 break; 241 } 242 243 if (clearval == 0) 244 return; 245 246 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, clearval); 247 248 hns3_pf_enable_irq0(hw); 249 } 250 251 static void 252 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 253 { 254 if (event_type == HNS3_VECTOR0_EVENT_RST || 255 event_type == HNS3_VECTOR0_EVENT_PTP) 256 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 257 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 258 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 259 } 260 261 static void 262 hns3_clear_all_event_cause(struct hns3_hw *hw) 263 { 264 uint32_t vector0_int_stats; 265 266 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 267 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 268 hns3_warn(hw, "Probe during IMP reset interrupt"); 269 270 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 271 hns3_warn(hw, "Probe during Global reset interrupt"); 272 273 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 274 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 275 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 276 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 277 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 278 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 279 BIT(HNS3_VECTOR0_1588_INT_B)); 280 } 281 282 static void 283 hns3_handle_mac_tnl(struct hns3_hw *hw) 284 { 285 struct hns3_cmd_desc desc; 286 uint32_t status; 287 int ret; 288 289 /* query and clear mac tnl interrupt */ 290 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 291 ret = hns3_cmd_send(hw, &desc, 1); 292 if (ret) { 293 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 294 return; 295 } 296 297 status = rte_le_to_cpu_32(desc.data[0]); 298 if (status) { 299 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 300 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 301 false); 302 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 303 ret = hns3_cmd_send(hw, &desc, 1); 304 if (ret) 305 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 306 ret); 307 } 308 } 309 310 static void 311 hns3_delay_before_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 312 { 313 #define IMPRESET_WAIT_MS_TIME 5 314 315 if (event_type == HNS3_VECTOR0_EVENT_RST && 316 regclr & BIT(HNS3_VECTOR0_IMPRESET_INT_B) && 317 hw->revision >= PCI_REVISION_ID_HIP09_A) { 318 rte_delay_ms(IMPRESET_WAIT_MS_TIME); 319 hns3_dbg(hw, "wait firmware watchdog initialization completed."); 320 } 321 } 322 323 static bool 324 hns3_reset_event_valid(struct hns3_hw *hw) 325 { 326 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 327 enum hns3_reset_level new_req = HNS3_NONE_RESET; 328 enum hns3_reset_level last_req; 329 uint32_t vector0_int; 330 331 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 332 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int) 333 new_req = HNS3_IMP_RESET; 334 else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int) 335 new_req = HNS3_GLOBAL_RESET; 336 if (new_req == HNS3_NONE_RESET) 337 return true; 338 339 last_req = hns3_get_reset_level(hns, &hw->reset.pending); 340 if (last_req == HNS3_NONE_RESET) 341 return true; 342 343 if (new_req > last_req) 344 return true; 345 346 hns3_warn(hw, "last_req (%u) less than or equal to new_req (%u) ignore", 347 last_req, new_req); 348 return false; 349 } 350 351 static void 352 hns3_interrupt_handler(void *param) 353 { 354 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 355 struct hns3_adapter *hns = dev->data->dev_private; 356 struct hns3_hw *hw = &hns->hw; 357 enum hns3_evt_cause event_cause; 358 struct hns3_intr_state state; 359 uint32_t clearval = 0; 360 361 if (!hns3_reset_event_valid(hw)) 362 return; 363 364 /* Disable interrupt */ 365 hns3_pf_disable_irq0(hw); 366 367 event_cause = hns3_check_event_cause(hns, &clearval); 368 hns3_query_intr_state(hw, &state); 369 hns3_delay_before_clear_event_cause(hw, event_cause, clearval); 370 hns3_clear_event_cause(hw, event_cause, clearval); 371 /* vector 0 interrupt is shared with reset and mailbox source events. */ 372 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 373 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 374 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 375 state.vector0_state, state.hw_err_state, 376 state.cmdq_state); 377 hns3_handle_mac_tnl(hw); 378 hns3_handle_error(hns); 379 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 380 hns3_warn(hw, "received reset interrupt"); 381 hns3_schedule_reset(hns); 382 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 383 hns3pf_handle_mbx_msg(hw); 384 } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { 385 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 386 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 387 state.vector0_state, state.hw_err_state, 388 state.cmdq_state); 389 } 390 391 /* Enable interrupt if it is not cause by reset */ 392 if (event_cause == HNS3_VECTOR0_EVENT_ERR || 393 event_cause == HNS3_VECTOR0_EVENT_MBX || 394 event_cause == HNS3_VECTOR0_EVENT_PTP || 395 event_cause == HNS3_VECTOR0_EVENT_OTHER) 396 hns3_pf_enable_irq0(hw); 397 } 398 399 static int 400 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 401 { 402 #define HNS3_VLAN_ID_OFFSET_STEP 160 403 #define HNS3_VLAN_BYTE_SIZE 8 404 struct hns3_vlan_filter_pf_cfg_cmd *req; 405 struct hns3_hw *hw = &hns->hw; 406 uint8_t vlan_offset_byte_val; 407 struct hns3_cmd_desc desc; 408 uint8_t vlan_offset_byte; 409 uint8_t vlan_offset_base; 410 int ret; 411 412 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 413 414 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 415 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 416 HNS3_VLAN_BYTE_SIZE; 417 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 418 419 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 420 req->vlan_offset = vlan_offset_base; 421 req->vlan_cfg = on ? 0 : 1; 422 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 423 424 ret = hns3_cmd_send(hw, &desc, 1); 425 if (ret) 426 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 427 vlan_id, ret); 428 429 return ret; 430 } 431 432 static void 433 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 434 { 435 struct hns3_user_vlan_table *vlan_entry; 436 struct hns3_pf *pf = &hns->pf; 437 438 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 439 if (vlan_entry->vlan_id == vlan_id) { 440 if (vlan_entry->hd_tbl_status) 441 hns3_set_port_vlan_filter(hns, vlan_id, 0); 442 LIST_REMOVE(vlan_entry, next); 443 rte_free(vlan_entry); 444 break; 445 } 446 } 447 } 448 449 static void 450 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 451 bool writen_to_tbl) 452 { 453 struct hns3_user_vlan_table *vlan_entry; 454 struct hns3_hw *hw = &hns->hw; 455 struct hns3_pf *pf = &hns->pf; 456 457 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 458 if (vlan_entry->vlan_id == vlan_id) 459 return; 460 } 461 462 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 463 if (vlan_entry == NULL) { 464 hns3_err(hw, "Failed to malloc hns3 vlan table"); 465 return; 466 } 467 468 vlan_entry->hd_tbl_status = writen_to_tbl; 469 vlan_entry->vlan_id = vlan_id; 470 471 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 472 } 473 474 static int 475 hns3_restore_vlan_table(struct hns3_adapter *hns) 476 { 477 struct hns3_user_vlan_table *vlan_entry; 478 struct hns3_hw *hw = &hns->hw; 479 struct hns3_pf *pf = &hns->pf; 480 uint16_t vlan_id; 481 int ret = 0; 482 483 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 484 return hns3_vlan_pvid_configure(hns, 485 hw->port_base_vlan_cfg.pvid, 1); 486 487 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 488 if (vlan_entry->hd_tbl_status) { 489 vlan_id = vlan_entry->vlan_id; 490 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 491 if (ret) 492 break; 493 } 494 } 495 496 return ret; 497 } 498 499 static int 500 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 501 { 502 struct hns3_hw *hw = &hns->hw; 503 bool writen_to_tbl = false; 504 int ret = 0; 505 506 /* 507 * When vlan filter is enabled, hardware regards packets without vlan 508 * as packets with vlan 0. So, to receive packets without vlan, vlan id 509 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 510 */ 511 if (on == 0 && vlan_id == 0) 512 return 0; 513 514 /* 515 * When port base vlan enabled, we use port base vlan as the vlan 516 * filter condition. In this case, we don't update vlan filter table 517 * when user add new vlan or remove exist vlan, just update the 518 * vlan list. The vlan id in vlan list will be written in vlan filter 519 * table until port base vlan disabled 520 */ 521 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 522 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 523 writen_to_tbl = true; 524 } 525 526 if (ret == 0) { 527 if (on) 528 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 529 else 530 hns3_rm_dev_vlan_table(hns, vlan_id); 531 } 532 return ret; 533 } 534 535 static int 536 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 537 { 538 struct hns3_adapter *hns = dev->data->dev_private; 539 struct hns3_hw *hw = &hns->hw; 540 int ret; 541 542 rte_spinlock_lock(&hw->lock); 543 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 544 rte_spinlock_unlock(&hw->lock); 545 return ret; 546 } 547 548 static int 549 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 550 uint16_t tpid) 551 { 552 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 553 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 554 struct hns3_hw *hw = &hns->hw; 555 struct hns3_cmd_desc desc; 556 int ret; 557 558 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && 559 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) { 560 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 561 return -EINVAL; 562 } 563 564 if (tpid != RTE_ETHER_TYPE_VLAN) { 565 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 566 return -EINVAL; 567 } 568 569 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 570 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 571 572 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 573 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 574 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 575 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 576 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 577 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 578 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 579 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 580 } 581 582 ret = hns3_cmd_send(hw, &desc, 1); 583 if (ret) { 584 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 585 ret); 586 return ret; 587 } 588 589 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 590 591 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 592 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 593 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 594 595 ret = hns3_cmd_send(hw, &desc, 1); 596 if (ret) 597 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 598 ret); 599 return ret; 600 } 601 602 static int 603 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 604 uint16_t tpid) 605 { 606 struct hns3_adapter *hns = dev->data->dev_private; 607 struct hns3_hw *hw = &hns->hw; 608 int ret; 609 610 rte_spinlock_lock(&hw->lock); 611 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 612 rte_spinlock_unlock(&hw->lock); 613 return ret; 614 } 615 616 static int 617 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 618 struct hns3_rx_vtag_cfg *vcfg) 619 { 620 struct hns3_vport_vtag_rx_cfg_cmd *req; 621 struct hns3_hw *hw = &hns->hw; 622 struct hns3_cmd_desc desc; 623 uint16_t vport_id; 624 uint8_t bitmap; 625 int ret; 626 627 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 628 629 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 630 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 631 vcfg->strip_tag1_en ? 1 : 0); 632 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 633 vcfg->strip_tag2_en ? 1 : 0); 634 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 635 vcfg->vlan1_vlan_prionly ? 1 : 0); 636 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 637 vcfg->vlan2_vlan_prionly ? 1 : 0); 638 639 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 640 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 641 vcfg->strip_tag1_discard_en ? 1 : 0); 642 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 643 vcfg->strip_tag2_discard_en ? 1 : 0); 644 /* 645 * In current version VF is not supported when PF is driven by DPDK 646 * driver, just need to configure parameters for PF vport. 647 */ 648 vport_id = HNS3_PF_FUNC_ID; 649 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 650 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 651 req->vf_bitmap[req->vf_offset] = bitmap; 652 653 ret = hns3_cmd_send(hw, &desc, 1); 654 if (ret) 655 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 656 return ret; 657 } 658 659 static int 660 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 661 { 662 struct hns3_rx_vtag_cfg rxvlan_cfg; 663 struct hns3_hw *hw = &hns->hw; 664 int ret; 665 666 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 667 rxvlan_cfg.strip_tag1_en = false; 668 rxvlan_cfg.strip_tag2_en = enable; 669 rxvlan_cfg.strip_tag2_discard_en = false; 670 } else { 671 rxvlan_cfg.strip_tag1_en = enable; 672 rxvlan_cfg.strip_tag2_en = true; 673 rxvlan_cfg.strip_tag2_discard_en = true; 674 } 675 676 rxvlan_cfg.strip_tag1_discard_en = false; 677 rxvlan_cfg.vlan1_vlan_prionly = false; 678 rxvlan_cfg.vlan2_vlan_prionly = false; 679 rxvlan_cfg.rx_vlan_offload_en = enable; 680 681 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 682 if (ret) { 683 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 684 enable ? "enable" : "disable", ret); 685 return ret; 686 } 687 688 memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg, 689 sizeof(struct hns3_rx_vtag_cfg)); 690 691 return ret; 692 } 693 694 static int 695 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 696 uint8_t fe_type, bool filter_en, uint8_t vf_id) 697 { 698 struct hns3_vlan_filter_ctrl_cmd *req; 699 struct hns3_cmd_desc desc; 700 int ret; 701 702 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 703 704 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 705 req->vlan_type = vlan_type; 706 req->vlan_fe = filter_en ? fe_type : 0; 707 req->vf_id = vf_id; 708 709 ret = hns3_cmd_send(hw, &desc, 1); 710 if (ret) 711 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 712 713 return ret; 714 } 715 716 static int 717 hns3_vlan_filter_init(struct hns3_adapter *hns) 718 { 719 struct hns3_hw *hw = &hns->hw; 720 int ret; 721 722 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 723 HNS3_FILTER_FE_EGRESS, false, 724 HNS3_PF_FUNC_ID); 725 if (ret) { 726 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 727 return ret; 728 } 729 730 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 731 HNS3_FILTER_FE_INGRESS, false, 732 HNS3_PF_FUNC_ID); 733 if (ret) 734 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 735 736 return ret; 737 } 738 739 static int 740 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 741 { 742 struct hns3_hw *hw = &hns->hw; 743 int ret; 744 745 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 746 HNS3_FILTER_FE_INGRESS, enable, 747 HNS3_PF_FUNC_ID); 748 if (ret) 749 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 750 enable ? "enable" : "disable", ret); 751 752 return ret; 753 } 754 755 static int 756 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 757 { 758 struct hns3_adapter *hns = dev->data->dev_private; 759 struct hns3_hw *hw = &hns->hw; 760 struct rte_eth_rxmode *rxmode; 761 unsigned int tmp_mask; 762 bool enable; 763 int ret = 0; 764 765 rte_spinlock_lock(&hw->lock); 766 rxmode = &dev->data->dev_conf.rxmode; 767 tmp_mask = (unsigned int)mask; 768 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { 769 /* ignore vlan filter configuration during promiscuous mode */ 770 if (!dev->data->promiscuous) { 771 /* Enable or disable VLAN filter */ 772 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? 773 true : false; 774 775 ret = hns3_enable_vlan_filter(hns, enable); 776 if (ret) { 777 rte_spinlock_unlock(&hw->lock); 778 hns3_err(hw, "failed to %s rx filter, ret = %d", 779 enable ? "enable" : "disable", ret); 780 return ret; 781 } 782 } 783 } 784 785 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { 786 /* Enable or disable VLAN stripping */ 787 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? 788 true : false; 789 790 ret = hns3_en_hw_strip_rxvtag(hns, enable); 791 if (ret) { 792 rte_spinlock_unlock(&hw->lock); 793 hns3_err(hw, "failed to %s rx strip, ret = %d", 794 enable ? "enable" : "disable", ret); 795 return ret; 796 } 797 } 798 799 rte_spinlock_unlock(&hw->lock); 800 801 return ret; 802 } 803 804 static int 805 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 806 struct hns3_tx_vtag_cfg *vcfg) 807 { 808 struct hns3_vport_vtag_tx_cfg_cmd *req; 809 struct hns3_cmd_desc desc; 810 struct hns3_hw *hw = &hns->hw; 811 uint16_t vport_id; 812 uint8_t bitmap; 813 int ret; 814 815 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 816 817 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 818 req->def_vlan_tag1 = vcfg->default_tag1; 819 req->def_vlan_tag2 = vcfg->default_tag2; 820 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 821 vcfg->accept_tag1 ? 1 : 0); 822 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 823 vcfg->accept_untag1 ? 1 : 0); 824 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 825 vcfg->accept_tag2 ? 1 : 0); 826 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 827 vcfg->accept_untag2 ? 1 : 0); 828 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 829 vcfg->insert_tag1_en ? 1 : 0); 830 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 831 vcfg->insert_tag2_en ? 1 : 0); 832 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 833 834 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 835 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 836 vcfg->tag_shift_mode_en ? 1 : 0); 837 838 /* 839 * In current version VF is not supported when PF is driven by DPDK 840 * driver, just need to configure parameters for PF vport. 841 */ 842 vport_id = HNS3_PF_FUNC_ID; 843 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 844 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 845 req->vf_bitmap[req->vf_offset] = bitmap; 846 847 ret = hns3_cmd_send(hw, &desc, 1); 848 if (ret) 849 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 850 851 return ret; 852 } 853 854 static int 855 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 856 uint16_t pvid) 857 { 858 struct hns3_hw *hw = &hns->hw; 859 struct hns3_tx_vtag_cfg txvlan_cfg; 860 int ret; 861 862 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 863 txvlan_cfg.accept_tag1 = true; 864 txvlan_cfg.insert_tag1_en = false; 865 txvlan_cfg.default_tag1 = 0; 866 } else { 867 txvlan_cfg.accept_tag1 = 868 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 869 txvlan_cfg.insert_tag1_en = true; 870 txvlan_cfg.default_tag1 = pvid; 871 } 872 873 txvlan_cfg.accept_untag1 = true; 874 txvlan_cfg.accept_tag2 = true; 875 txvlan_cfg.accept_untag2 = true; 876 txvlan_cfg.insert_tag2_en = false; 877 txvlan_cfg.default_tag2 = 0; 878 txvlan_cfg.tag_shift_mode_en = true; 879 880 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 881 if (ret) { 882 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 883 ret); 884 return ret; 885 } 886 887 memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg, 888 sizeof(struct hns3_tx_vtag_cfg)); 889 890 return ret; 891 } 892 893 894 static void 895 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 896 { 897 struct hns3_user_vlan_table *vlan_entry; 898 struct hns3_pf *pf = &hns->pf; 899 900 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 901 if (vlan_entry->hd_tbl_status) { 902 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 903 vlan_entry->hd_tbl_status = false; 904 } 905 } 906 907 if (is_del_list) { 908 vlan_entry = LIST_FIRST(&pf->vlan_list); 909 while (vlan_entry) { 910 LIST_REMOVE(vlan_entry, next); 911 rte_free(vlan_entry); 912 vlan_entry = LIST_FIRST(&pf->vlan_list); 913 } 914 } 915 } 916 917 static void 918 hns3_add_all_vlan_table(struct hns3_adapter *hns) 919 { 920 struct hns3_user_vlan_table *vlan_entry; 921 struct hns3_pf *pf = &hns->pf; 922 923 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 924 if (!vlan_entry->hd_tbl_status) { 925 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 926 vlan_entry->hd_tbl_status = true; 927 } 928 } 929 } 930 931 static void 932 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 933 { 934 struct hns3_hw *hw = &hns->hw; 935 int ret; 936 937 hns3_rm_all_vlan_table(hns, true); 938 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 939 ret = hns3_set_port_vlan_filter(hns, 940 hw->port_base_vlan_cfg.pvid, 0); 941 if (ret) { 942 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 943 ret); 944 return; 945 } 946 } 947 } 948 949 static int 950 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 951 uint16_t port_base_vlan_state, uint16_t new_pvid) 952 { 953 struct hns3_hw *hw = &hns->hw; 954 uint16_t old_pvid; 955 int ret; 956 957 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 958 old_pvid = hw->port_base_vlan_cfg.pvid; 959 if (old_pvid != HNS3_INVALID_PVID) { 960 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 961 if (ret) { 962 hns3_err(hw, "failed to remove old pvid %u, " 963 "ret = %d", old_pvid, ret); 964 return ret; 965 } 966 } 967 968 hns3_rm_all_vlan_table(hns, false); 969 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 970 if (ret) { 971 hns3_err(hw, "failed to add new pvid %u, ret = %d", 972 new_pvid, ret); 973 return ret; 974 } 975 } else { 976 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 977 if (ret) { 978 hns3_err(hw, "failed to remove pvid %u, ret = %d", 979 new_pvid, ret); 980 return ret; 981 } 982 983 hns3_add_all_vlan_table(hns); 984 } 985 return 0; 986 } 987 988 static int 989 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 990 { 991 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 992 struct hns3_rx_vtag_cfg rx_vlan_cfg; 993 bool rx_strip_en; 994 int ret; 995 996 rx_strip_en = old_cfg->rx_vlan_offload_en; 997 if (on) { 998 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 999 rx_vlan_cfg.strip_tag2_en = true; 1000 rx_vlan_cfg.strip_tag2_discard_en = true; 1001 } else { 1002 rx_vlan_cfg.strip_tag1_en = false; 1003 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 1004 rx_vlan_cfg.strip_tag2_discard_en = false; 1005 } 1006 rx_vlan_cfg.strip_tag1_discard_en = false; 1007 rx_vlan_cfg.vlan1_vlan_prionly = false; 1008 rx_vlan_cfg.vlan2_vlan_prionly = false; 1009 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 1010 1011 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 1012 if (ret) 1013 return ret; 1014 1015 memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg, 1016 sizeof(struct hns3_rx_vtag_cfg)); 1017 1018 return ret; 1019 } 1020 1021 static int 1022 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 1023 { 1024 struct hns3_hw *hw = &hns->hw; 1025 uint16_t port_base_vlan_state; 1026 int ret, err; 1027 1028 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 1029 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 1030 hns3_warn(hw, "Invalid operation! As current pvid set " 1031 "is %u, disable pvid %u is invalid", 1032 hw->port_base_vlan_cfg.pvid, pvid); 1033 return 0; 1034 } 1035 1036 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 1037 HNS3_PORT_BASE_VLAN_DISABLE; 1038 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 1039 if (ret) { 1040 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 1041 ret); 1042 return ret; 1043 } 1044 1045 ret = hns3_en_pvid_strip(hns, on); 1046 if (ret) { 1047 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1048 "ret = %d", ret); 1049 goto pvid_vlan_strip_fail; 1050 } 1051 1052 if (pvid == HNS3_INVALID_PVID) 1053 goto out; 1054 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1055 if (ret) { 1056 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1057 ret); 1058 goto vlan_filter_set_fail; 1059 } 1060 1061 out: 1062 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1063 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1064 return ret; 1065 1066 vlan_filter_set_fail: 1067 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1068 HNS3_PORT_BASE_VLAN_ENABLE); 1069 if (err) 1070 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1071 1072 pvid_vlan_strip_fail: 1073 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1074 hw->port_base_vlan_cfg.pvid); 1075 if (err) 1076 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1077 1078 return ret; 1079 } 1080 1081 static int 1082 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1083 { 1084 struct hns3_adapter *hns = dev->data->dev_private; 1085 struct hns3_hw *hw = &hns->hw; 1086 bool pvid_en_state_change; 1087 uint16_t pvid_state; 1088 int ret; 1089 1090 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1091 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1092 RTE_ETHER_MAX_VLAN_ID); 1093 return -EINVAL; 1094 } 1095 1096 /* 1097 * If PVID configuration state change, should refresh the PVID 1098 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1099 */ 1100 pvid_state = hw->port_base_vlan_cfg.state; 1101 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1102 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1103 pvid_en_state_change = false; 1104 else 1105 pvid_en_state_change = true; 1106 1107 rte_spinlock_lock(&hw->lock); 1108 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1109 rte_spinlock_unlock(&hw->lock); 1110 if (ret) 1111 return ret; 1112 /* 1113 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1114 * need be processed by PMD. 1115 */ 1116 if (pvid_en_state_change && 1117 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1118 hns3_update_all_queues_pvid_proc_en(hw); 1119 1120 return 0; 1121 } 1122 1123 static int 1124 hns3_default_vlan_config(struct hns3_adapter *hns) 1125 { 1126 struct hns3_hw *hw = &hns->hw; 1127 int ret; 1128 1129 /* 1130 * When vlan filter is enabled, hardware regards packets without vlan 1131 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1132 * table, packets without vlan won't be received. So, add vlan 0 as 1133 * the default vlan. 1134 */ 1135 ret = hns3_vlan_filter_configure(hns, 0, 1); 1136 if (ret) 1137 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 static int 1142 hns3_init_vlan_config(struct hns3_adapter *hns) 1143 { 1144 struct hns3_hw *hw = &hns->hw; 1145 int ret; 1146 1147 /* 1148 * This function can be called in the initialization and reset process, 1149 * when in reset process, it means that hardware had been reseted 1150 * successfully and we need to restore the hardware configuration to 1151 * ensure that the hardware configuration remains unchanged before and 1152 * after reset. 1153 */ 1154 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) { 1155 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1156 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1157 } 1158 1159 ret = hns3_vlan_filter_init(hns); 1160 if (ret) { 1161 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1162 return ret; 1163 } 1164 1165 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER, 1166 RTE_ETHER_TYPE_VLAN); 1167 if (ret) { 1168 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1169 return ret; 1170 } 1171 1172 /* 1173 * When in the reinit dev stage of the reset process, the following 1174 * vlan-related configurations may differ from those at initialization, 1175 * we will restore configurations to hardware in hns3_restore_vlan_table 1176 * and hns3_restore_vlan_conf later. 1177 */ 1178 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) { 1179 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1180 if (ret) { 1181 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1182 return ret; 1183 } 1184 1185 ret = hns3_en_hw_strip_rxvtag(hns, false); 1186 if (ret) { 1187 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1188 ret); 1189 return ret; 1190 } 1191 } 1192 1193 return hns3_default_vlan_config(hns); 1194 } 1195 1196 static int 1197 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1198 { 1199 struct hns3_pf *pf = &hns->pf; 1200 struct hns3_hw *hw = &hns->hw; 1201 uint64_t offloads; 1202 bool enable; 1203 int ret; 1204 1205 if (!hw->data->promiscuous) { 1206 /* restore vlan filter states */ 1207 offloads = hw->data->dev_conf.rxmode.offloads; 1208 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false; 1209 ret = hns3_enable_vlan_filter(hns, enable); 1210 if (ret) { 1211 hns3_err(hw, "failed to restore vlan rx filter conf, " 1212 "ret = %d", ret); 1213 return ret; 1214 } 1215 } 1216 1217 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1218 if (ret) { 1219 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1220 return ret; 1221 } 1222 1223 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1224 if (ret) 1225 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1226 1227 return ret; 1228 } 1229 1230 static int 1231 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1232 { 1233 struct hns3_adapter *hns = dev->data->dev_private; 1234 struct rte_eth_dev_data *data = dev->data; 1235 struct rte_eth_txmode *txmode; 1236 struct hns3_hw *hw = &hns->hw; 1237 int mask; 1238 int ret; 1239 1240 txmode = &data->dev_conf.txmode; 1241 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1242 hns3_warn(hw, 1243 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1244 "configuration is not supported! Ignore these two " 1245 "parameters: hw_vlan_reject_tagged(%u), " 1246 "hw_vlan_reject_untagged(%u)", 1247 txmode->hw_vlan_reject_tagged, 1248 txmode->hw_vlan_reject_untagged); 1249 1250 /* Apply vlan offload setting */ 1251 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK; 1252 ret = hns3_vlan_offload_set(dev, mask); 1253 if (ret) { 1254 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1255 ret); 1256 return ret; 1257 } 1258 1259 /* 1260 * If pvid config is not set in rte_eth_conf, driver needn't to set 1261 * VLAN pvid related configuration to hardware. 1262 */ 1263 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1264 return 0; 1265 1266 /* Apply pvid setting */ 1267 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1268 txmode->hw_vlan_insert_pvid); 1269 if (ret) 1270 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1271 txmode->pvid, ret); 1272 1273 return ret; 1274 } 1275 1276 static int 1277 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1278 unsigned int tso_mss_max) 1279 { 1280 struct hns3_cfg_tso_status_cmd *req; 1281 struct hns3_cmd_desc desc; 1282 uint16_t tso_mss; 1283 1284 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1285 1286 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1287 1288 tso_mss = 0; 1289 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1290 tso_mss_min); 1291 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1292 1293 tso_mss = 0; 1294 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1295 tso_mss_max); 1296 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1297 1298 return hns3_cmd_send(hw, &desc, 1); 1299 } 1300 1301 static int 1302 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1303 uint16_t *allocated_size, bool is_alloc) 1304 { 1305 struct hns3_umv_spc_alc_cmd *req; 1306 struct hns3_cmd_desc desc; 1307 int ret; 1308 1309 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1310 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1311 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1312 req->space_size = rte_cpu_to_le_32(space_size); 1313 1314 ret = hns3_cmd_send(hw, &desc, 1); 1315 if (ret) { 1316 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1317 is_alloc ? "allocate" : "free", ret); 1318 return ret; 1319 } 1320 1321 if (is_alloc && allocated_size) 1322 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1323 1324 return 0; 1325 } 1326 1327 static int 1328 hns3_init_umv_space(struct hns3_hw *hw) 1329 { 1330 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1331 struct hns3_pf *pf = &hns->pf; 1332 uint16_t allocated_size = 0; 1333 int ret; 1334 1335 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1336 true); 1337 if (ret) 1338 return ret; 1339 1340 if (allocated_size < pf->wanted_umv_size) 1341 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1342 pf->wanted_umv_size, allocated_size); 1343 1344 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1345 pf->wanted_umv_size; 1346 pf->used_umv_size = 0; 1347 return 0; 1348 } 1349 1350 static int 1351 hns3_uninit_umv_space(struct hns3_hw *hw) 1352 { 1353 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1354 struct hns3_pf *pf = &hns->pf; 1355 int ret; 1356 1357 if (pf->max_umv_size == 0) 1358 return 0; 1359 1360 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1361 if (ret) 1362 return ret; 1363 1364 pf->max_umv_size = 0; 1365 1366 return 0; 1367 } 1368 1369 static bool 1370 hns3_is_umv_space_full(struct hns3_hw *hw) 1371 { 1372 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1373 struct hns3_pf *pf = &hns->pf; 1374 bool is_full; 1375 1376 is_full = (pf->used_umv_size >= pf->max_umv_size); 1377 1378 return is_full; 1379 } 1380 1381 static void 1382 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1383 { 1384 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1385 struct hns3_pf *pf = &hns->pf; 1386 1387 if (is_free) { 1388 if (pf->used_umv_size > 0) 1389 pf->used_umv_size--; 1390 } else 1391 pf->used_umv_size++; 1392 } 1393 1394 static void 1395 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1396 const uint8_t *addr, bool is_mc) 1397 { 1398 const unsigned char *mac_addr = addr; 1399 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1400 ((uint32_t)mac_addr[2] << 16) | 1401 ((uint32_t)mac_addr[1] << 8) | 1402 (uint32_t)mac_addr[0]; 1403 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1404 1405 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1406 if (is_mc) { 1407 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1408 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1409 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1410 } 1411 1412 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1413 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1414 } 1415 1416 static int 1417 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1418 uint8_t resp_code, 1419 enum hns3_mac_vlan_tbl_opcode op) 1420 { 1421 if (cmdq_resp) { 1422 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1423 cmdq_resp); 1424 return -EIO; 1425 } 1426 1427 if (op == HNS3_MAC_VLAN_ADD) { 1428 if (resp_code == 0 || resp_code == 1) { 1429 return 0; 1430 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1431 hns3_err(hw, "add mac addr failed for uc_overflow"); 1432 return -ENOSPC; 1433 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1434 hns3_err(hw, "add mac addr failed for mc_overflow"); 1435 return -ENOSPC; 1436 } 1437 1438 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1439 resp_code); 1440 return -EIO; 1441 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1442 if (resp_code == 0) { 1443 return 0; 1444 } else if (resp_code == 1) { 1445 hns3_dbg(hw, "remove mac addr failed for miss"); 1446 return -ENOENT; 1447 } 1448 1449 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1450 resp_code); 1451 return -EIO; 1452 } else if (op == HNS3_MAC_VLAN_LKUP) { 1453 if (resp_code == 0) { 1454 return 0; 1455 } else if (resp_code == 1) { 1456 hns3_dbg(hw, "lookup mac addr failed for miss"); 1457 return -ENOENT; 1458 } 1459 1460 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1461 resp_code); 1462 return -EIO; 1463 } 1464 1465 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1466 op); 1467 1468 return -EINVAL; 1469 } 1470 1471 static int 1472 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1473 struct hns3_mac_vlan_tbl_entry_cmd *req, 1474 struct hns3_cmd_desc *desc, uint8_t desc_num) 1475 { 1476 uint8_t resp_code; 1477 uint16_t retval; 1478 int ret; 1479 int i; 1480 1481 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { 1482 for (i = 0; i < desc_num - 1; i++) { 1483 hns3_cmd_setup_basic_desc(&desc[i], 1484 HNS3_OPC_MAC_VLAN_ADD, true); 1485 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1486 if (i == 0) 1487 memcpy(desc[i].data, req, 1488 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1489 } 1490 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, 1491 true); 1492 } else { 1493 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, 1494 true); 1495 memcpy(desc[0].data, req, 1496 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1497 } 1498 ret = hns3_cmd_send(hw, desc, desc_num); 1499 if (ret) { 1500 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1501 ret); 1502 return ret; 1503 } 1504 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1505 retval = rte_le_to_cpu_16(desc[0].retval); 1506 1507 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1508 HNS3_MAC_VLAN_LKUP); 1509 } 1510 1511 static int 1512 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1513 struct hns3_mac_vlan_tbl_entry_cmd *req, 1514 struct hns3_cmd_desc *desc, uint8_t desc_num) 1515 { 1516 uint8_t resp_code; 1517 uint16_t retval; 1518 int cfg_status; 1519 int ret; 1520 int i; 1521 1522 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { 1523 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); 1524 memcpy(desc->data, req, 1525 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1526 ret = hns3_cmd_send(hw, desc, desc_num); 1527 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; 1528 retval = rte_le_to_cpu_16(desc->retval); 1529 1530 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1531 HNS3_MAC_VLAN_ADD); 1532 } else { 1533 for (i = 0; i < desc_num; i++) { 1534 hns3_cmd_reuse_desc(&desc[i], false); 1535 if (i == desc_num - 1) 1536 desc[i].flag &= 1537 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1538 else 1539 desc[i].flag |= 1540 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1541 } 1542 memcpy(desc[0].data, req, 1543 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1544 desc[0].retval = 0; 1545 ret = hns3_cmd_send(hw, desc, desc_num); 1546 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1547 retval = rte_le_to_cpu_16(desc[0].retval); 1548 1549 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1550 HNS3_MAC_VLAN_ADD); 1551 } 1552 1553 if (ret) { 1554 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1555 return ret; 1556 } 1557 1558 return cfg_status; 1559 } 1560 1561 static int 1562 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1563 struct hns3_mac_vlan_tbl_entry_cmd *req) 1564 { 1565 struct hns3_cmd_desc desc; 1566 uint8_t resp_code; 1567 uint16_t retval; 1568 int ret; 1569 1570 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1571 1572 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1573 1574 ret = hns3_cmd_send(hw, &desc, 1); 1575 if (ret) { 1576 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1577 return ret; 1578 } 1579 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1580 retval = rte_le_to_cpu_16(desc.retval); 1581 1582 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1583 HNS3_MAC_VLAN_REMOVE); 1584 } 1585 1586 static int 1587 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1588 { 1589 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1590 struct hns3_mac_vlan_tbl_entry_cmd req; 1591 struct hns3_pf *pf = &hns->pf; 1592 struct hns3_cmd_desc desc; 1593 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1594 uint16_t egress_port = 0; 1595 uint8_t vf_id; 1596 int ret; 1597 1598 /* check if mac addr is valid */ 1599 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1600 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1601 mac_addr); 1602 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1603 mac_str); 1604 return -EINVAL; 1605 } 1606 1607 memset(&req, 0, sizeof(req)); 1608 1609 /* 1610 * In current version VF is not supported when PF is driven by DPDK 1611 * driver, just need to configure parameters for PF vport. 1612 */ 1613 vf_id = HNS3_PF_FUNC_ID; 1614 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1615 HNS3_MAC_EPORT_VFID_S, vf_id); 1616 1617 req.egress_port = rte_cpu_to_le_16(egress_port); 1618 1619 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1620 1621 /* 1622 * Lookup the mac address in the mac_vlan table, and add 1623 * it if the entry is inexistent. Repeated unicast entry 1624 * is not allowed in the mac vlan table. 1625 */ 1626 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, 1627 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1628 if (ret == -ENOENT) { 1629 if (!hns3_is_umv_space_full(hw)) { 1630 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, 1631 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1632 if (!ret) 1633 hns3_update_umv_space(hw, false); 1634 return ret; 1635 } 1636 1637 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1638 1639 return -ENOSPC; 1640 } 1641 1642 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1643 1644 /* check if we just hit the duplicate */ 1645 if (ret == 0) { 1646 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1647 return 0; 1648 } 1649 1650 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1651 mac_str); 1652 1653 return ret; 1654 } 1655 1656 static int 1657 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1658 { 1659 struct hns3_mac_vlan_tbl_entry_cmd req; 1660 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1661 int ret; 1662 1663 /* check if mac addr is valid */ 1664 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1665 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1666 mac_addr); 1667 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1668 mac_str); 1669 return -EINVAL; 1670 } 1671 1672 memset(&req, 0, sizeof(req)); 1673 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1674 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1675 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1676 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1677 return 0; 1678 else if (ret == 0) 1679 hns3_update_umv_space(hw, true); 1680 1681 return ret; 1682 } 1683 1684 static int 1685 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1686 struct rte_ether_addr *mac_addr) 1687 { 1688 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1689 struct rte_ether_addr *oaddr; 1690 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1691 int ret, ret_val; 1692 1693 rte_spinlock_lock(&hw->lock); 1694 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1695 ret = hw->ops.del_uc_mac_addr(hw, oaddr); 1696 if (ret) { 1697 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1698 oaddr); 1699 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1700 mac_str, ret); 1701 1702 rte_spinlock_unlock(&hw->lock); 1703 return ret; 1704 } 1705 1706 ret = hw->ops.add_uc_mac_addr(hw, mac_addr); 1707 if (ret) { 1708 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1709 mac_addr); 1710 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1711 goto err_add_uc_addr; 1712 } 1713 1714 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1715 if (ret) { 1716 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1717 goto err_pause_addr_cfg; 1718 } 1719 1720 rte_ether_addr_copy(mac_addr, 1721 (struct rte_ether_addr *)hw->mac.mac_addr); 1722 rte_spinlock_unlock(&hw->lock); 1723 1724 return 0; 1725 1726 err_pause_addr_cfg: 1727 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr); 1728 if (ret_val) { 1729 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1730 mac_addr); 1731 hns3_warn(hw, 1732 "Failed to roll back to del set mac addr(%s): %d", 1733 mac_str, ret_val); 1734 } 1735 1736 err_add_uc_addr: 1737 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr); 1738 if (ret_val) { 1739 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1740 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1741 mac_str, ret_val); 1742 } 1743 rte_spinlock_unlock(&hw->lock); 1744 1745 return ret; 1746 } 1747 1748 static void 1749 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1750 { 1751 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1752 uint8_t word_num; 1753 uint8_t bit_num; 1754 1755 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1756 word_num = vfid / 32; 1757 bit_num = vfid % 32; 1758 if (clr) 1759 desc[1].data[word_num] &= 1760 rte_cpu_to_le_32(~(1UL << bit_num)); 1761 else 1762 desc[1].data[word_num] |= 1763 rte_cpu_to_le_32(1UL << bit_num); 1764 } else { 1765 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1766 bit_num = vfid % 32; 1767 if (clr) 1768 desc[2].data[word_num] &= 1769 rte_cpu_to_le_32(~(1UL << bit_num)); 1770 else 1771 desc[2].data[word_num] |= 1772 rte_cpu_to_le_32(1UL << bit_num); 1773 } 1774 } 1775 1776 static int 1777 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1778 { 1779 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; 1780 struct hns3_mac_vlan_tbl_entry_cmd req; 1781 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1782 uint8_t vf_id; 1783 int ret; 1784 int idx; 1785 1786 /* Check if mac addr is valid */ 1787 if (!rte_is_multicast_ether_addr(mac_addr)) { 1788 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1789 mac_addr); 1790 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1791 mac_str); 1792 return -EINVAL; 1793 } 1794 1795 memset(&req, 0, sizeof(req)); 1796 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1797 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1798 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1799 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1800 if (ret) { 1801 /* This mac addr do not exist, add new entry for it */ 1802 for (idx = 0; idx < HNS3_MC_MAC_VLAN_OPS_DESC_NUM; idx++) 1803 memset(desc[idx].data, 0, sizeof(desc[idx].data)); 1804 } 1805 1806 /* 1807 * In current version VF is not supported when PF is driven by DPDK 1808 * driver, just need to configure parameters for PF vport. 1809 */ 1810 vf_id = HNS3_PF_FUNC_ID; 1811 hns3_update_desc_vfid(desc, vf_id, false); 1812 ret = hns3_add_mac_vlan_tbl(hw, &req, desc, 1813 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1814 if (ret) { 1815 if (ret == -ENOSPC) 1816 hns3_err(hw, "mc mac vlan table is full"); 1817 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1818 mac_addr); 1819 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1820 } 1821 1822 return ret; 1823 } 1824 1825 static int 1826 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1827 { 1828 struct hns3_mac_vlan_tbl_entry_cmd req; 1829 struct hns3_cmd_desc desc[3]; 1830 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1831 uint8_t vf_id; 1832 int ret; 1833 1834 /* Check if mac addr is valid */ 1835 if (!rte_is_multicast_ether_addr(mac_addr)) { 1836 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1837 mac_addr); 1838 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1839 mac_str); 1840 return -EINVAL; 1841 } 1842 1843 memset(&req, 0, sizeof(req)); 1844 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1845 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1846 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1847 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1848 if (ret == 0) { 1849 /* 1850 * This mac addr exist, remove this handle's VFID for it. 1851 * In current version VF is not supported when PF is driven by 1852 * DPDK driver, just need to configure parameters for PF vport. 1853 */ 1854 vf_id = HNS3_PF_FUNC_ID; 1855 hns3_update_desc_vfid(desc, vf_id, true); 1856 1857 /* All the vfid is zero, so need to delete this entry */ 1858 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1859 } else if (ret == -ENOENT) { 1860 /* This mac addr doesn't exist. */ 1861 return 0; 1862 } 1863 1864 if (ret) { 1865 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1866 mac_addr); 1867 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1868 } 1869 1870 return ret; 1871 } 1872 1873 static int 1874 hns3_check_mq_mode(struct rte_eth_dev *dev) 1875 { 1876 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1877 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1878 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1879 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1880 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 1881 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 1882 uint8_t num_tc; 1883 int max_tc = 0; 1884 int i; 1885 1886 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) || 1887 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB || 1888 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) { 1889 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 1890 rx_mq_mode, tx_mq_mode); 1891 return -EOPNOTSUPP; 1892 } 1893 1894 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 1895 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 1896 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 1897 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 1898 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 1899 dcb_rx_conf->nb_tcs, pf->tc_max); 1900 return -EINVAL; 1901 } 1902 1903 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 1904 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 1905 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, " 1906 "nb_tcs(%d) != %d or %d in rx direction.", 1907 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 1908 return -EINVAL; 1909 } 1910 1911 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 1912 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 1913 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 1914 return -EINVAL; 1915 } 1916 1917 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 1918 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 1919 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 1920 "is not equal to one in tx direction.", 1921 i, dcb_rx_conf->dcb_tc[i]); 1922 return -EINVAL; 1923 } 1924 if (dcb_rx_conf->dcb_tc[i] > max_tc) 1925 max_tc = dcb_rx_conf->dcb_tc[i]; 1926 } 1927 1928 num_tc = max_tc + 1; 1929 if (num_tc > dcb_rx_conf->nb_tcs) { 1930 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 1931 num_tc, dcb_rx_conf->nb_tcs); 1932 return -EINVAL; 1933 } 1934 } 1935 1936 return 0; 1937 } 1938 1939 static int 1940 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 1941 enum hns3_ring_type queue_type, uint16_t queue_id) 1942 { 1943 struct hns3_cmd_desc desc; 1944 struct hns3_ctrl_vector_chain_cmd *req = 1945 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 1946 enum hns3_opcode_type op; 1947 uint16_t tqp_type_and_id = 0; 1948 uint16_t type; 1949 uint16_t gl; 1950 int ret; 1951 1952 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 1953 hns3_cmd_setup_basic_desc(&desc, op, false); 1954 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 1955 HNS3_TQP_INT_ID_L_S); 1956 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 1957 HNS3_TQP_INT_ID_H_S); 1958 1959 if (queue_type == HNS3_RING_TYPE_RX) 1960 gl = HNS3_RING_GL_RX; 1961 else 1962 gl = HNS3_RING_GL_TX; 1963 1964 type = queue_type; 1965 1966 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 1967 type); 1968 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 1969 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 1970 gl); 1971 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 1972 req->int_cause_num = 1; 1973 ret = hns3_cmd_send(hw, &desc, 1); 1974 if (ret) { 1975 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 1976 en ? "Map" : "Unmap", queue_id, vector_id, ret); 1977 return ret; 1978 } 1979 1980 return 0; 1981 } 1982 1983 static int 1984 hns3_setup_dcb(struct rte_eth_dev *dev) 1985 { 1986 struct hns3_adapter *hns = dev->data->dev_private; 1987 struct hns3_hw *hw = &hns->hw; 1988 int ret; 1989 1990 if (!hns3_dev_get_support(hw, DCB)) { 1991 hns3_err(hw, "this port does not support dcb configurations."); 1992 return -EOPNOTSUPP; 1993 } 1994 1995 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 1996 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 1997 return -EOPNOTSUPP; 1998 } 1999 2000 ret = hns3_dcb_configure(hns); 2001 if (ret) 2002 hns3_err(hw, "failed to config dcb: %d", ret); 2003 2004 return ret; 2005 } 2006 2007 static int 2008 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2009 { 2010 int ret; 2011 2012 /* 2013 * Some hardware doesn't support auto-negotiation, but users may not 2014 * configure link_speeds (default 0), which means auto-negotiation. 2015 * In this case, it should return success. 2016 */ 2017 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG && 2018 hw->mac.support_autoneg == 0) 2019 return 0; 2020 2021 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 2022 ret = hns3_check_port_speed(hw, link_speeds); 2023 if (ret) 2024 return ret; 2025 } 2026 2027 return 0; 2028 } 2029 2030 static int 2031 hns3_check_dev_conf(struct rte_eth_dev *dev) 2032 { 2033 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2034 struct rte_eth_conf *conf = &dev->data->dev_conf; 2035 int ret; 2036 2037 ret = hns3_check_mq_mode(dev); 2038 if (ret) 2039 return ret; 2040 2041 return hns3_check_link_speed(hw, conf->link_speeds); 2042 } 2043 2044 static int 2045 hns3_dev_configure(struct rte_eth_dev *dev) 2046 { 2047 struct hns3_adapter *hns = dev->data->dev_private; 2048 struct rte_eth_conf *conf = &dev->data->dev_conf; 2049 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2050 struct hns3_hw *hw = &hns->hw; 2051 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2052 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2053 struct rte_eth_rss_conf rss_conf; 2054 bool gro_en; 2055 int ret; 2056 2057 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2058 2059 /* 2060 * Some versions of hardware network engine does not support 2061 * individually enable/disable/reset the Tx or Rx queue. These devices 2062 * must enable/disable/reset Tx and Rx queues at the same time. When the 2063 * numbers of Tx queues allocated by upper applications are not equal to 2064 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2065 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2066 * work as usual. But these fake queues are imperceptible, and can not 2067 * be used by upper applications. 2068 */ 2069 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2070 if (ret) { 2071 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2072 hw->cfg_max_queues = 0; 2073 return ret; 2074 } 2075 2076 hw->adapter_state = HNS3_NIC_CONFIGURING; 2077 ret = hns3_check_dev_conf(dev); 2078 if (ret) 2079 goto cfg_err; 2080 2081 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2082 ret = hns3_setup_dcb(dev); 2083 if (ret) 2084 goto cfg_err; 2085 } 2086 2087 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2088 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2089 rss_conf = conf->rx_adv_conf.rss_conf; 2090 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2091 if (ret) 2092 goto cfg_err; 2093 } 2094 2095 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu); 2096 if (ret != 0) 2097 goto cfg_err; 2098 2099 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2100 if (ret) 2101 goto cfg_err; 2102 2103 ret = hns3_dev_configure_vlan(dev); 2104 if (ret) 2105 goto cfg_err; 2106 2107 /* config hardware GRO */ 2108 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 2109 ret = hns3_config_gro(hw, gro_en); 2110 if (ret) 2111 goto cfg_err; 2112 2113 hns3_init_rx_ptype_tble(dev); 2114 hw->adapter_state = HNS3_NIC_CONFIGURED; 2115 2116 return 0; 2117 2118 cfg_err: 2119 hw->cfg_max_queues = 0; 2120 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2121 hw->adapter_state = HNS3_NIC_INITIALIZED; 2122 2123 return ret; 2124 } 2125 2126 static int 2127 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2128 { 2129 struct hns3_config_max_frm_size_cmd *req; 2130 struct hns3_cmd_desc desc; 2131 2132 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2133 2134 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2135 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2136 req->min_frm_size = RTE_ETHER_MIN_LEN; 2137 2138 return hns3_cmd_send(hw, &desc, 1); 2139 } 2140 2141 static int 2142 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2143 { 2144 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2145 int err; 2146 int ret; 2147 2148 ret = hns3_set_mac_mtu(hw, mps); 2149 if (ret) { 2150 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2151 return ret; 2152 } 2153 2154 ret = hns3_buffer_alloc(hw); 2155 if (ret) { 2156 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2157 goto rollback; 2158 } 2159 2160 hns->pf.mps = mps; 2161 2162 return 0; 2163 2164 rollback: 2165 err = hns3_set_mac_mtu(hw, hns->pf.mps); 2166 if (err) 2167 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2168 2169 return ret; 2170 } 2171 2172 static int 2173 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2174 { 2175 struct hns3_adapter *hns = dev->data->dev_private; 2176 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2177 struct hns3_hw *hw = &hns->hw; 2178 int ret; 2179 2180 if (dev->data->dev_started) { 2181 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2182 "before configuration", dev->data->port_id); 2183 return -EBUSY; 2184 } 2185 2186 rte_spinlock_lock(&hw->lock); 2187 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2188 2189 /* 2190 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2191 * assign to "uint16_t" type variable. 2192 */ 2193 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2194 if (ret) { 2195 rte_spinlock_unlock(&hw->lock); 2196 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2197 dev->data->port_id, mtu, ret); 2198 return ret; 2199 } 2200 2201 rte_spinlock_unlock(&hw->lock); 2202 2203 return 0; 2204 } 2205 2206 static uint32_t 2207 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2208 { 2209 uint32_t speed_capa = 0; 2210 2211 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2212 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD; 2213 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2214 speed_capa |= RTE_ETH_LINK_SPEED_10M; 2215 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2216 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 2217 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2218 speed_capa |= RTE_ETH_LINK_SPEED_100M; 2219 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2220 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2221 2222 return speed_capa; 2223 } 2224 2225 static uint32_t 2226 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2227 { 2228 uint32_t speed_capa = 0; 2229 2230 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2231 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2232 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2233 speed_capa |= RTE_ETH_LINK_SPEED_10G; 2234 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2235 speed_capa |= RTE_ETH_LINK_SPEED_25G; 2236 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2237 speed_capa |= RTE_ETH_LINK_SPEED_40G; 2238 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2239 speed_capa |= RTE_ETH_LINK_SPEED_50G; 2240 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2241 speed_capa |= RTE_ETH_LINK_SPEED_100G; 2242 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2243 speed_capa |= RTE_ETH_LINK_SPEED_200G; 2244 2245 return speed_capa; 2246 } 2247 2248 uint32_t 2249 hns3_get_speed_capa(struct hns3_hw *hw) 2250 { 2251 struct hns3_mac *mac = &hw->mac; 2252 uint32_t speed_capa; 2253 2254 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2255 speed_capa = 2256 hns3_get_copper_port_speed_capa(mac->supported_speed); 2257 else 2258 speed_capa = 2259 hns3_get_firber_port_speed_capa(mac->supported_speed); 2260 2261 if (mac->support_autoneg == 0) 2262 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 2263 2264 return speed_capa; 2265 } 2266 2267 static int 2268 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2269 { 2270 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2271 int ret; 2272 2273 (void)hns3_update_link_status(hw); 2274 2275 ret = hns3_update_link_info(eth_dev); 2276 if (ret) 2277 hw->mac.link_status = RTE_ETH_LINK_DOWN; 2278 2279 return ret; 2280 } 2281 2282 static void 2283 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2284 struct rte_eth_link *new_link) 2285 { 2286 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2287 struct hns3_mac *mac = &hw->mac; 2288 2289 switch (mac->link_speed) { 2290 case RTE_ETH_SPEED_NUM_10M: 2291 case RTE_ETH_SPEED_NUM_100M: 2292 case RTE_ETH_SPEED_NUM_1G: 2293 case RTE_ETH_SPEED_NUM_10G: 2294 case RTE_ETH_SPEED_NUM_25G: 2295 case RTE_ETH_SPEED_NUM_40G: 2296 case RTE_ETH_SPEED_NUM_50G: 2297 case RTE_ETH_SPEED_NUM_100G: 2298 case RTE_ETH_SPEED_NUM_200G: 2299 if (mac->link_status) 2300 new_link->link_speed = mac->link_speed; 2301 break; 2302 default: 2303 if (mac->link_status) 2304 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 2305 break; 2306 } 2307 2308 if (!mac->link_status) 2309 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE; 2310 2311 new_link->link_duplex = mac->link_duplex; 2312 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 2313 new_link->link_autoneg = mac->link_autoneg; 2314 } 2315 2316 static int 2317 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2318 { 2319 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2320 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2321 2322 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2323 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2324 struct hns3_mac *mac = &hw->mac; 2325 struct rte_eth_link new_link; 2326 int ret; 2327 2328 memset(&new_link, 0, sizeof(new_link)); 2329 /* When port is stopped, report link down. */ 2330 if (eth_dev->data->dev_started == 0) { 2331 new_link.link_autoneg = mac->link_autoneg; 2332 new_link.link_duplex = mac->link_duplex; 2333 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2334 new_link.link_status = RTE_ETH_LINK_DOWN; 2335 goto out; 2336 } 2337 2338 do { 2339 ret = hns3_update_port_link_info(eth_dev); 2340 if (ret) { 2341 hns3_err(hw, "failed to get port link info, ret = %d.", 2342 ret); 2343 break; 2344 } 2345 2346 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP) 2347 break; 2348 2349 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2350 } while (retry_cnt--); 2351 2352 hns3_setup_linkstatus(eth_dev, &new_link); 2353 2354 out: 2355 return rte_eth_linkstatus_set(eth_dev, &new_link); 2356 } 2357 2358 static int 2359 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2360 { 2361 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2362 int ret; 2363 2364 /* 2365 * The "tx_pkt_burst" will be restored. But the secondary process does 2366 * not support the mechanism for notifying the primary process. 2367 */ 2368 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2369 hns3_err(hw, "secondary process does not support to set link up."); 2370 return -ENOTSUP; 2371 } 2372 2373 /* 2374 * If device isn't started Rx/Tx function is still disabled, setting 2375 * link up is not allowed. But it is probably better to return success 2376 * to reduce the impact on the upper layer. 2377 */ 2378 if (hw->adapter_state != HNS3_NIC_STARTED) { 2379 hns3_info(hw, "device isn't started, can't set link up."); 2380 return 0; 2381 } 2382 2383 if (!hw->set_link_down) 2384 return 0; 2385 2386 rte_spinlock_lock(&hw->lock); 2387 ret = hns3_cfg_mac_mode(hw, true); 2388 if (ret) { 2389 rte_spinlock_unlock(&hw->lock); 2390 hns3_err(hw, "failed to set link up, ret = %d", ret); 2391 return ret; 2392 } 2393 2394 hw->set_link_down = false; 2395 hns3_start_tx_datapath(dev); 2396 rte_spinlock_unlock(&hw->lock); 2397 2398 return 0; 2399 } 2400 2401 static int 2402 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2403 { 2404 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2405 int ret; 2406 2407 /* 2408 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2409 * process does not support the mechanism for notifying the primary 2410 * process. 2411 */ 2412 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2413 hns3_err(hw, "secondary process does not support to set link down."); 2414 return -ENOTSUP; 2415 } 2416 2417 /* 2418 * If device isn't started or the API has been called, link status is 2419 * down, return success. 2420 */ 2421 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2422 return 0; 2423 2424 rte_spinlock_lock(&hw->lock); 2425 hns3_stop_tx_datapath(dev); 2426 ret = hns3_cfg_mac_mode(hw, false); 2427 if (ret) { 2428 hns3_start_tx_datapath(dev); 2429 rte_spinlock_unlock(&hw->lock); 2430 hns3_err(hw, "failed to set link down, ret = %d", ret); 2431 return ret; 2432 } 2433 2434 hw->set_link_down = true; 2435 rte_spinlock_unlock(&hw->lock); 2436 2437 return 0; 2438 } 2439 2440 static int 2441 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2442 { 2443 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2444 struct hns3_pf *pf = &hns->pf; 2445 2446 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2447 return -EINVAL; 2448 2449 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2450 2451 return 0; 2452 } 2453 2454 static int 2455 hns3_query_function_status(struct hns3_hw *hw) 2456 { 2457 #define HNS3_QUERY_MAX_CNT 10 2458 #define HNS3_QUERY_SLEEP_MSCOEND 1 2459 struct hns3_func_status_cmd *req; 2460 struct hns3_cmd_desc desc; 2461 int timeout = 0; 2462 int ret; 2463 2464 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2465 req = (struct hns3_func_status_cmd *)desc.data; 2466 2467 do { 2468 ret = hns3_cmd_send(hw, &desc, 1); 2469 if (ret) { 2470 PMD_INIT_LOG(ERR, "query function status failed %d", 2471 ret); 2472 return ret; 2473 } 2474 2475 /* Check pf reset is done */ 2476 if (req->pf_state) 2477 break; 2478 2479 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2480 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2481 2482 return hns3_parse_func_status(hw, req); 2483 } 2484 2485 static int 2486 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2487 { 2488 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2489 struct hns3_pf *pf = &hns->pf; 2490 2491 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2492 /* 2493 * The total_tqps_num obtained from firmware is maximum tqp 2494 * numbers of this port, which should be used for PF and VFs. 2495 * There is no need for pf to have so many tqp numbers in 2496 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2497 * coming from config file, is assigned to maximum queue number 2498 * for the PF of this port by user. So users can modify the 2499 * maximum queue number of PF according to their own application 2500 * scenarios, which is more flexible to use. In addition, many 2501 * memories can be saved due to allocating queue statistics 2502 * room according to the actual number of queues required. The 2503 * maximum queue number of PF for network engine with 2504 * revision_id greater than 0x30 is assigned by config file. 2505 */ 2506 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2507 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2508 "must be greater than 0.", 2509 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2510 return -EINVAL; 2511 } 2512 2513 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2514 hw->total_tqps_num); 2515 } else { 2516 /* 2517 * Due to the limitation on the number of PF interrupts 2518 * available, the maximum queue number assigned to PF on 2519 * the network engine with revision_id 0x21 is 64. 2520 */ 2521 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2522 HNS3_MAX_TQP_NUM_HIP08_PF); 2523 } 2524 2525 return 0; 2526 } 2527 2528 static int 2529 hns3_query_pf_resource(struct hns3_hw *hw) 2530 { 2531 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2532 struct hns3_pf *pf = &hns->pf; 2533 struct hns3_pf_res_cmd *req; 2534 struct hns3_cmd_desc desc; 2535 int ret; 2536 2537 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2538 ret = hns3_cmd_send(hw, &desc, 1); 2539 if (ret) { 2540 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2541 return ret; 2542 } 2543 2544 req = (struct hns3_pf_res_cmd *)desc.data; 2545 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2546 rte_le_to_cpu_16(req->ext_tqp_num); 2547 ret = hns3_get_pf_max_tqp_num(hw); 2548 if (ret) 2549 return ret; 2550 2551 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2552 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2553 2554 if (req->tx_buf_size) 2555 pf->tx_buf_size = 2556 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2557 else 2558 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2559 2560 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2561 2562 if (req->dv_buf_size) 2563 pf->dv_buf_size = 2564 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2565 else 2566 pf->dv_buf_size = HNS3_DEFAULT_DV; 2567 2568 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2569 2570 hw->num_msi = 2571 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2572 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2573 2574 return 0; 2575 } 2576 2577 static void 2578 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2579 { 2580 struct hns3_cfg_param_cmd *req; 2581 uint64_t mac_addr_tmp_high; 2582 uint8_t ext_rss_size_max; 2583 uint64_t mac_addr_tmp; 2584 uint32_t i; 2585 2586 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2587 2588 /* get the configuration */ 2589 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2590 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 2591 2592 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2593 HNS3_CFG_PHY_ADDR_M, 2594 HNS3_CFG_PHY_ADDR_S); 2595 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2596 HNS3_CFG_MEDIA_TP_M, 2597 HNS3_CFG_MEDIA_TP_S); 2598 /* get mac address */ 2599 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 2600 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2601 HNS3_CFG_MAC_ADDR_H_M, 2602 HNS3_CFG_MAC_ADDR_H_S); 2603 2604 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 2605 2606 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2607 HNS3_CFG_DEFAULT_SPEED_M, 2608 HNS3_CFG_DEFAULT_SPEED_S); 2609 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2610 HNS3_CFG_RSS_SIZE_M, 2611 HNS3_CFG_RSS_SIZE_S); 2612 2613 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2614 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 2615 2616 req = (struct hns3_cfg_param_cmd *)desc[1].data; 2617 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 2618 2619 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2620 HNS3_CFG_SPEED_ABILITY_M, 2621 HNS3_CFG_SPEED_ABILITY_S); 2622 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2623 HNS3_CFG_UMV_TBL_SPACE_M, 2624 HNS3_CFG_UMV_TBL_SPACE_S); 2625 if (!cfg->umv_space) 2626 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 2627 2628 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 2629 HNS3_CFG_EXT_RSS_SIZE_M, 2630 HNS3_CFG_EXT_RSS_SIZE_S); 2631 /* 2632 * Field ext_rss_size_max obtained from firmware will be more flexible 2633 * for future changes and expansions, which is an exponent of 2, instead 2634 * of reading out directly. If this field is not zero, hns3 PF PMD 2635 * uses it as rss_size_max under one TC. Device, whose revision 2636 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 2637 * maximum number of queues supported under a TC through this field. 2638 */ 2639 if (ext_rss_size_max) 2640 cfg->rss_size_max = 1U << ext_rss_size_max; 2641 } 2642 2643 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 2644 * @hw: pointer to struct hns3_hw 2645 * @hcfg: the config structure to be getted 2646 */ 2647 static int 2648 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 2649 { 2650 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 2651 struct hns3_cfg_param_cmd *req; 2652 uint32_t offset; 2653 uint32_t i; 2654 int ret; 2655 2656 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 2657 offset = 0; 2658 req = (struct hns3_cfg_param_cmd *)desc[i].data; 2659 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 2660 true); 2661 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 2662 i * HNS3_CFG_RD_LEN_BYTES); 2663 /* Len should be divided by 4 when send to hardware */ 2664 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 2665 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 2666 req->offset = rte_cpu_to_le_32(offset); 2667 } 2668 2669 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 2670 if (ret) { 2671 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 2672 return ret; 2673 } 2674 2675 hns3_parse_cfg(hcfg, desc); 2676 2677 return 0; 2678 } 2679 2680 static int 2681 hns3_parse_speed(int speed_cmd, uint32_t *speed) 2682 { 2683 switch (speed_cmd) { 2684 case HNS3_CFG_SPEED_10M: 2685 *speed = RTE_ETH_SPEED_NUM_10M; 2686 break; 2687 case HNS3_CFG_SPEED_100M: 2688 *speed = RTE_ETH_SPEED_NUM_100M; 2689 break; 2690 case HNS3_CFG_SPEED_1G: 2691 *speed = RTE_ETH_SPEED_NUM_1G; 2692 break; 2693 case HNS3_CFG_SPEED_10G: 2694 *speed = RTE_ETH_SPEED_NUM_10G; 2695 break; 2696 case HNS3_CFG_SPEED_25G: 2697 *speed = RTE_ETH_SPEED_NUM_25G; 2698 break; 2699 case HNS3_CFG_SPEED_40G: 2700 *speed = RTE_ETH_SPEED_NUM_40G; 2701 break; 2702 case HNS3_CFG_SPEED_50G: 2703 *speed = RTE_ETH_SPEED_NUM_50G; 2704 break; 2705 case HNS3_CFG_SPEED_100G: 2706 *speed = RTE_ETH_SPEED_NUM_100G; 2707 break; 2708 case HNS3_CFG_SPEED_200G: 2709 *speed = RTE_ETH_SPEED_NUM_200G; 2710 break; 2711 default: 2712 return -EINVAL; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int 2719 hns3_get_capability(struct hns3_hw *hw) 2720 { 2721 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2722 struct hns3_pf *pf = &hns->pf; 2723 int ret; 2724 2725 ret = hns3_query_mac_stats_reg_num(hw); 2726 if (ret) 2727 return ret; 2728 2729 if (hw->revision < PCI_REVISION_ID_HIP09_A) { 2730 hns3_set_default_dev_specifications(hw); 2731 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 2732 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 2733 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 2734 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 2735 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 2736 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 2737 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 2738 hw->rss_info.ipv6_sctp_offload_supported = false; 2739 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 2740 pf->support_multi_tc_pause = false; 2741 hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; 2742 return 0; 2743 } 2744 2745 ret = hns3_query_dev_specifications(hw); 2746 if (ret) { 2747 PMD_INIT_LOG(ERR, 2748 "failed to query dev specifications, ret = %d", 2749 ret); 2750 return ret; 2751 } 2752 2753 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 2754 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 2755 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 2756 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 2757 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 2758 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 2759 hw->rss_info.ipv6_sctp_offload_supported = true; 2760 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 2761 pf->support_multi_tc_pause = true; 2762 hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; 2763 2764 return 0; 2765 } 2766 2767 static int 2768 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 2769 { 2770 int ret; 2771 2772 switch (media_type) { 2773 case HNS3_MEDIA_TYPE_COPPER: 2774 if (!hns3_dev_get_support(hw, COPPER)) { 2775 PMD_INIT_LOG(ERR, 2776 "Media type is copper, not supported."); 2777 ret = -EOPNOTSUPP; 2778 } else { 2779 ret = 0; 2780 } 2781 break; 2782 case HNS3_MEDIA_TYPE_FIBER: 2783 case HNS3_MEDIA_TYPE_BACKPLANE: 2784 ret = 0; 2785 break; 2786 default: 2787 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 2788 ret = -EINVAL; 2789 break; 2790 } 2791 2792 return ret; 2793 } 2794 2795 static int 2796 hns3_get_board_configuration(struct hns3_hw *hw) 2797 { 2798 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2799 struct hns3_pf *pf = &hns->pf; 2800 struct hns3_cfg cfg; 2801 int ret; 2802 2803 memset(&cfg, 0, sizeof(cfg)); 2804 ret = hns3_get_board_cfg(hw, &cfg); 2805 if (ret) { 2806 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 2807 return ret; 2808 } 2809 2810 ret = hns3_check_media_type(hw, cfg.media_type); 2811 if (ret) 2812 return ret; 2813 2814 hw->mac.media_type = cfg.media_type; 2815 hw->rss_size_max = cfg.rss_size_max; 2816 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 2817 hw->mac.phy_addr = cfg.phy_addr; 2818 hw->dcb_info.num_pg = 1; 2819 hw->dcb_info.hw_pfc_map = 0; 2820 2821 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 2822 if (ret) { 2823 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 2824 cfg.default_speed, ret); 2825 return ret; 2826 } 2827 2828 pf->tc_max = cfg.tc_num; 2829 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 2830 PMD_INIT_LOG(WARNING, 2831 "Get TC num(%u) from flash, set TC num to 1", 2832 pf->tc_max); 2833 pf->tc_max = 1; 2834 } 2835 2836 /* Dev does not support DCB */ 2837 if (!hns3_dev_get_support(hw, DCB)) { 2838 pf->tc_max = 1; 2839 pf->pfc_max = 0; 2840 } else 2841 pf->pfc_max = pf->tc_max; 2842 2843 hw->dcb_info.num_tc = 1; 2844 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 2845 hw->tqps_num / hw->dcb_info.num_tc); 2846 hns3_set_bit(hw->hw_tc_map, 0, 1); 2847 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 2848 2849 pf->wanted_umv_size = cfg.umv_space; 2850 2851 return ret; 2852 } 2853 2854 static int 2855 hns3_get_configuration(struct hns3_hw *hw) 2856 { 2857 int ret; 2858 2859 ret = hns3_query_function_status(hw); 2860 if (ret) { 2861 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 2862 return ret; 2863 } 2864 2865 /* Get device capability */ 2866 ret = hns3_get_capability(hw); 2867 if (ret) { 2868 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 2869 return ret; 2870 } 2871 2872 /* Get pf resource */ 2873 ret = hns3_query_pf_resource(hw); 2874 if (ret) { 2875 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 2876 return ret; 2877 } 2878 2879 ret = hns3_get_board_configuration(hw); 2880 if (ret) { 2881 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 2882 return ret; 2883 } 2884 2885 ret = hns3_query_dev_fec_info(hw); 2886 if (ret) 2887 PMD_INIT_LOG(ERR, 2888 "failed to query FEC information, ret = %d", ret); 2889 2890 return ret; 2891 } 2892 2893 static int 2894 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 2895 uint16_t tqp_vid, bool is_pf) 2896 { 2897 struct hns3_tqp_map_cmd *req; 2898 struct hns3_cmd_desc desc; 2899 int ret; 2900 2901 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 2902 2903 req = (struct hns3_tqp_map_cmd *)desc.data; 2904 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 2905 req->tqp_vf = func_id; 2906 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 2907 if (!is_pf) 2908 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 2909 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 2910 2911 ret = hns3_cmd_send(hw, &desc, 1); 2912 if (ret) 2913 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 2914 2915 return ret; 2916 } 2917 2918 static int 2919 hns3_map_tqp(struct hns3_hw *hw) 2920 { 2921 uint16_t i; 2922 int ret; 2923 2924 /* 2925 * In current version, VF is not supported when PF is driven by DPDK 2926 * driver, so we assign total tqps_num tqps allocated to this port 2927 * to PF. 2928 */ 2929 for (i = 0; i < hw->total_tqps_num; i++) { 2930 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 2931 if (ret) 2932 return ret; 2933 } 2934 2935 return 0; 2936 } 2937 2938 static int 2939 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 2940 { 2941 struct hns3_config_mac_speed_dup_cmd *req; 2942 struct hns3_cmd_desc desc; 2943 int ret; 2944 2945 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 2946 2947 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 2948 2949 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 2950 2951 switch (speed) { 2952 case RTE_ETH_SPEED_NUM_10M: 2953 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2954 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 2955 break; 2956 case RTE_ETH_SPEED_NUM_100M: 2957 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2958 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 2959 break; 2960 case RTE_ETH_SPEED_NUM_1G: 2961 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2962 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 2963 break; 2964 case RTE_ETH_SPEED_NUM_10G: 2965 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2966 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 2967 break; 2968 case RTE_ETH_SPEED_NUM_25G: 2969 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2970 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 2971 break; 2972 case RTE_ETH_SPEED_NUM_40G: 2973 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2974 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 2975 break; 2976 case RTE_ETH_SPEED_NUM_50G: 2977 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2978 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 2979 break; 2980 case RTE_ETH_SPEED_NUM_100G: 2981 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2982 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 2983 break; 2984 case RTE_ETH_SPEED_NUM_200G: 2985 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2986 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 2987 break; 2988 default: 2989 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 2990 return -EINVAL; 2991 } 2992 2993 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 2994 2995 ret = hns3_cmd_send(hw, &desc, 1); 2996 if (ret) 2997 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 2998 2999 return ret; 3000 } 3001 3002 static int 3003 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3004 { 3005 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3006 struct hns3_pf *pf = &hns->pf; 3007 struct hns3_priv_buf *priv; 3008 uint32_t i, total_size; 3009 3010 total_size = pf->pkt_buf_size; 3011 3012 /* alloc tx buffer for all enabled tc */ 3013 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3014 priv = &buf_alloc->priv_buf[i]; 3015 3016 if (hw->hw_tc_map & BIT(i)) { 3017 if (total_size < pf->tx_buf_size) 3018 return -ENOMEM; 3019 3020 priv->tx_buf_size = pf->tx_buf_size; 3021 } else 3022 priv->tx_buf_size = 0; 3023 3024 total_size -= priv->tx_buf_size; 3025 } 3026 3027 return 0; 3028 } 3029 3030 static int 3031 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3032 { 3033 /* TX buffer size is unit by 128 byte */ 3034 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3035 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3036 struct hns3_tx_buff_alloc_cmd *req; 3037 struct hns3_cmd_desc desc; 3038 uint32_t buf_size; 3039 uint32_t i; 3040 int ret; 3041 3042 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3043 3044 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3045 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3046 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3047 3048 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3049 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3050 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3051 } 3052 3053 ret = hns3_cmd_send(hw, &desc, 1); 3054 if (ret) 3055 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3056 3057 return ret; 3058 } 3059 3060 static int 3061 hns3_get_tc_num(struct hns3_hw *hw) 3062 { 3063 int cnt = 0; 3064 uint8_t i; 3065 3066 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3067 if (hw->hw_tc_map & BIT(i)) 3068 cnt++; 3069 return cnt; 3070 } 3071 3072 static uint32_t 3073 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3074 { 3075 struct hns3_priv_buf *priv; 3076 uint32_t rx_priv = 0; 3077 int i; 3078 3079 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3080 priv = &buf_alloc->priv_buf[i]; 3081 if (priv->enable) 3082 rx_priv += priv->buf_size; 3083 } 3084 return rx_priv; 3085 } 3086 3087 static uint32_t 3088 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3089 { 3090 uint32_t total_tx_size = 0; 3091 uint32_t i; 3092 3093 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3094 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3095 3096 return total_tx_size; 3097 } 3098 3099 /* Get the number of pfc enabled TCs, which have private buffer */ 3100 static int 3101 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3102 { 3103 struct hns3_priv_buf *priv; 3104 int cnt = 0; 3105 uint8_t i; 3106 3107 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3108 priv = &buf_alloc->priv_buf[i]; 3109 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3110 cnt++; 3111 } 3112 3113 return cnt; 3114 } 3115 3116 /* Get the number of pfc disabled TCs, which have private buffer */ 3117 static int 3118 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3119 struct hns3_pkt_buf_alloc *buf_alloc) 3120 { 3121 struct hns3_priv_buf *priv; 3122 int cnt = 0; 3123 uint8_t i; 3124 3125 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3126 priv = &buf_alloc->priv_buf[i]; 3127 if (hw->hw_tc_map & BIT(i) && 3128 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3129 cnt++; 3130 } 3131 3132 return cnt; 3133 } 3134 3135 static bool 3136 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3137 uint32_t rx_all) 3138 { 3139 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3140 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3141 struct hns3_pf *pf = &hns->pf; 3142 uint32_t shared_buf, aligned_mps; 3143 uint32_t rx_priv; 3144 uint8_t tc_num; 3145 uint8_t i; 3146 3147 tc_num = hns3_get_tc_num(hw); 3148 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3149 3150 if (hns3_dev_get_support(hw, DCB)) 3151 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3152 pf->dv_buf_size; 3153 else 3154 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3155 + pf->dv_buf_size; 3156 3157 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3158 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3159 HNS3_BUF_SIZE_UNIT); 3160 3161 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3162 if (rx_all < rx_priv + shared_std) 3163 return false; 3164 3165 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3166 buf_alloc->s_buf.buf_size = shared_buf; 3167 if (hns3_dev_get_support(hw, DCB)) { 3168 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3169 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3170 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3171 HNS3_BUF_SIZE_UNIT); 3172 } else { 3173 buf_alloc->s_buf.self.high = 3174 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3175 buf_alloc->s_buf.self.low = aligned_mps; 3176 } 3177 3178 if (hns3_dev_get_support(hw, DCB)) { 3179 hi_thrd = shared_buf - pf->dv_buf_size; 3180 3181 if (tc_num <= NEED_RESERVE_TC_NUM) 3182 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3183 BUF_MAX_PERCENT; 3184 3185 if (tc_num) 3186 hi_thrd = hi_thrd / tc_num; 3187 3188 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3189 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3190 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3191 } else { 3192 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3193 lo_thrd = aligned_mps; 3194 } 3195 3196 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3197 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3198 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3199 } 3200 3201 return true; 3202 } 3203 3204 static bool 3205 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3206 struct hns3_pkt_buf_alloc *buf_alloc) 3207 { 3208 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3209 struct hns3_pf *pf = &hns->pf; 3210 struct hns3_priv_buf *priv; 3211 uint32_t aligned_mps; 3212 uint32_t rx_all; 3213 uint8_t i; 3214 3215 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3216 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3217 3218 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3219 priv = &buf_alloc->priv_buf[i]; 3220 3221 priv->enable = 0; 3222 priv->wl.low = 0; 3223 priv->wl.high = 0; 3224 priv->buf_size = 0; 3225 3226 if (!(hw->hw_tc_map & BIT(i))) 3227 continue; 3228 3229 priv->enable = 1; 3230 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3231 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3232 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3233 HNS3_BUF_SIZE_UNIT); 3234 } else { 3235 priv->wl.low = 0; 3236 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3237 aligned_mps; 3238 } 3239 3240 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3241 } 3242 3243 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3244 } 3245 3246 static bool 3247 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3248 struct hns3_pkt_buf_alloc *buf_alloc) 3249 { 3250 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3251 struct hns3_pf *pf = &hns->pf; 3252 struct hns3_priv_buf *priv; 3253 int no_pfc_priv_num; 3254 uint32_t rx_all; 3255 uint8_t mask; 3256 int i; 3257 3258 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3259 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3260 3261 /* let the last to be cleared first */ 3262 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3263 priv = &buf_alloc->priv_buf[i]; 3264 mask = BIT((uint8_t)i); 3265 if (hw->hw_tc_map & mask && 3266 !(hw->dcb_info.hw_pfc_map & mask)) { 3267 /* Clear the no pfc TC private buffer */ 3268 priv->wl.low = 0; 3269 priv->wl.high = 0; 3270 priv->buf_size = 0; 3271 priv->enable = 0; 3272 no_pfc_priv_num--; 3273 } 3274 3275 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3276 no_pfc_priv_num == 0) 3277 break; 3278 } 3279 3280 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3281 } 3282 3283 static bool 3284 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3285 struct hns3_pkt_buf_alloc *buf_alloc) 3286 { 3287 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3288 struct hns3_pf *pf = &hns->pf; 3289 struct hns3_priv_buf *priv; 3290 uint32_t rx_all; 3291 int pfc_priv_num; 3292 uint8_t mask; 3293 int i; 3294 3295 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3296 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3297 3298 /* let the last to be cleared first */ 3299 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3300 priv = &buf_alloc->priv_buf[i]; 3301 mask = BIT((uint8_t)i); 3302 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3303 /* Reduce the number of pfc TC with private buffer */ 3304 priv->wl.low = 0; 3305 priv->enable = 0; 3306 priv->wl.high = 0; 3307 priv->buf_size = 0; 3308 pfc_priv_num--; 3309 } 3310 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3311 pfc_priv_num == 0) 3312 break; 3313 } 3314 3315 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3316 } 3317 3318 static bool 3319 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3320 struct hns3_pkt_buf_alloc *buf_alloc) 3321 { 3322 #define COMPENSATE_BUFFER 0x3C00 3323 #define COMPENSATE_HALF_MPS_NUM 5 3324 #define PRIV_WL_GAP 0x1800 3325 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3326 struct hns3_pf *pf = &hns->pf; 3327 uint32_t tc_num = hns3_get_tc_num(hw); 3328 uint32_t half_mps = pf->mps >> 1; 3329 struct hns3_priv_buf *priv; 3330 uint32_t min_rx_priv; 3331 uint32_t rx_priv; 3332 uint8_t i; 3333 3334 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3335 if (tc_num) 3336 rx_priv = rx_priv / tc_num; 3337 3338 if (tc_num <= NEED_RESERVE_TC_NUM) 3339 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3340 3341 /* 3342 * Minimum value of private buffer in rx direction (min_rx_priv) is 3343 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3344 * buffer if rx_priv is greater than min_rx_priv. 3345 */ 3346 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3347 COMPENSATE_HALF_MPS_NUM * half_mps; 3348 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3349 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3350 if (rx_priv < min_rx_priv) 3351 return false; 3352 3353 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3354 priv = &buf_alloc->priv_buf[i]; 3355 priv->enable = 0; 3356 priv->wl.low = 0; 3357 priv->wl.high = 0; 3358 priv->buf_size = 0; 3359 3360 if (!(hw->hw_tc_map & BIT(i))) 3361 continue; 3362 3363 priv->enable = 1; 3364 priv->buf_size = rx_priv; 3365 priv->wl.high = rx_priv - pf->dv_buf_size; 3366 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3367 } 3368 3369 buf_alloc->s_buf.buf_size = 0; 3370 3371 return true; 3372 } 3373 3374 /* 3375 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3376 * @hw: pointer to struct hns3_hw 3377 * @buf_alloc: pointer to buffer calculation data 3378 * @return: 0: calculate successful, negative: fail 3379 */ 3380 static int 3381 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3382 { 3383 /* When DCB is not supported, rx private buffer is not allocated. */ 3384 if (!hns3_dev_get_support(hw, DCB)) { 3385 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3386 struct hns3_pf *pf = &hns->pf; 3387 uint32_t rx_all = pf->pkt_buf_size; 3388 3389 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3390 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3391 return -ENOMEM; 3392 3393 return 0; 3394 } 3395 3396 /* 3397 * Try to allocate privated packet buffer for all TCs without share 3398 * buffer. 3399 */ 3400 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3401 return 0; 3402 3403 /* 3404 * Try to allocate privated packet buffer for all TCs with share 3405 * buffer. 3406 */ 3407 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3408 return 0; 3409 3410 /* 3411 * For different application scenes, the enabled port number, TC number 3412 * and no_drop TC number are different. In order to obtain the better 3413 * performance, software could allocate the buffer size and configure 3414 * the waterline by trying to decrease the private buffer size according 3415 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 3416 * enabled tc. 3417 */ 3418 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3419 return 0; 3420 3421 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3422 return 0; 3423 3424 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3425 return 0; 3426 3427 return -ENOMEM; 3428 } 3429 3430 static int 3431 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3432 { 3433 struct hns3_rx_priv_buff_cmd *req; 3434 struct hns3_cmd_desc desc; 3435 uint32_t buf_size; 3436 int ret; 3437 int i; 3438 3439 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3440 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3441 3442 /* Alloc private buffer TCs */ 3443 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3444 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3445 3446 req->buf_num[i] = 3447 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3448 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3449 } 3450 3451 buf_size = buf_alloc->s_buf.buf_size; 3452 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3453 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3454 3455 ret = hns3_cmd_send(hw, &desc, 1); 3456 if (ret) 3457 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3458 3459 return ret; 3460 } 3461 3462 static int 3463 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3464 { 3465 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3466 struct hns3_rx_priv_wl_buf *req; 3467 struct hns3_priv_buf *priv; 3468 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3469 int i, j; 3470 int ret; 3471 3472 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3473 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3474 false); 3475 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3476 3477 /* The first descriptor set the NEXT bit to 1 */ 3478 if (i == 0) 3479 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3480 else 3481 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3482 3483 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3484 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3485 3486 priv = &buf_alloc->priv_buf[idx]; 3487 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3488 HNS3_BUF_UNIT_S); 3489 req->tc_wl[j].high |= 3490 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3491 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3492 HNS3_BUF_UNIT_S); 3493 req->tc_wl[j].low |= 3494 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3495 } 3496 } 3497 3498 /* Send 2 descriptor at one time */ 3499 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 3500 if (ret) 3501 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 3502 ret); 3503 return ret; 3504 } 3505 3506 static int 3507 hns3_common_thrd_config(struct hns3_hw *hw, 3508 struct hns3_pkt_buf_alloc *buf_alloc) 3509 { 3510 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 3511 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 3512 struct hns3_rx_com_thrd *req; 3513 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 3514 struct hns3_tc_thrd *tc; 3515 int tc_idx; 3516 int i, j; 3517 int ret; 3518 3519 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 3520 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 3521 false); 3522 req = (struct hns3_rx_com_thrd *)&desc[i].data; 3523 3524 /* The first descriptor set the NEXT bit to 1 */ 3525 if (i == 0) 3526 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3527 else 3528 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3529 3530 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3531 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 3532 tc = &s_buf->tc_thrd[tc_idx]; 3533 3534 req->com_thrd[j].high = 3535 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 3536 req->com_thrd[j].high |= 3537 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3538 req->com_thrd[j].low = 3539 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 3540 req->com_thrd[j].low |= 3541 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3542 } 3543 } 3544 3545 /* Send 2 descriptors at one time */ 3546 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 3547 if (ret) 3548 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 3549 3550 return ret; 3551 } 3552 3553 static int 3554 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3555 { 3556 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 3557 struct hns3_rx_com_wl *req; 3558 struct hns3_cmd_desc desc; 3559 int ret; 3560 3561 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 3562 3563 req = (struct hns3_rx_com_wl *)desc.data; 3564 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 3565 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3566 3567 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 3568 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3569 3570 ret = hns3_cmd_send(hw, &desc, 1); 3571 if (ret) 3572 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 3573 3574 return ret; 3575 } 3576 3577 int 3578 hns3_buffer_alloc(struct hns3_hw *hw) 3579 { 3580 struct hns3_pkt_buf_alloc pkt_buf; 3581 int ret; 3582 3583 memset(&pkt_buf, 0, sizeof(pkt_buf)); 3584 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 3585 if (ret) { 3586 PMD_INIT_LOG(ERR, 3587 "could not calc tx buffer size for all TCs %d", 3588 ret); 3589 return ret; 3590 } 3591 3592 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 3593 if (ret) { 3594 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 3595 return ret; 3596 } 3597 3598 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 3599 if (ret) { 3600 PMD_INIT_LOG(ERR, 3601 "could not calc rx priv buffer size for all TCs %d", 3602 ret); 3603 return ret; 3604 } 3605 3606 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 3607 if (ret) { 3608 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 3609 return ret; 3610 } 3611 3612 if (hns3_dev_get_support(hw, DCB)) { 3613 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 3614 if (ret) { 3615 PMD_INIT_LOG(ERR, 3616 "could not configure rx private waterline %d", 3617 ret); 3618 return ret; 3619 } 3620 3621 ret = hns3_common_thrd_config(hw, &pkt_buf); 3622 if (ret) { 3623 PMD_INIT_LOG(ERR, 3624 "could not configure common threshold %d", 3625 ret); 3626 return ret; 3627 } 3628 } 3629 3630 ret = hns3_common_wl_config(hw, &pkt_buf); 3631 if (ret) 3632 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 3633 ret); 3634 3635 return ret; 3636 } 3637 3638 static int 3639 hns3_mac_init(struct hns3_hw *hw) 3640 { 3641 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3642 struct hns3_mac *mac = &hw->mac; 3643 struct hns3_pf *pf = &hns->pf; 3644 int ret; 3645 3646 pf->support_sfp_query = true; 3647 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 3648 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 3649 if (ret) { 3650 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 3651 return ret; 3652 } 3653 3654 mac->link_status = RTE_ETH_LINK_DOWN; 3655 3656 return hns3_config_mtu(hw, pf->mps); 3657 } 3658 3659 static int 3660 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 3661 { 3662 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 3663 #define HNS3_ETHERTYPE_ALREADY_ADD 1 3664 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 3665 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 3666 int return_status; 3667 3668 if (cmdq_resp) { 3669 PMD_INIT_LOG(ERR, 3670 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.", 3671 cmdq_resp); 3672 return -EIO; 3673 } 3674 3675 switch (resp_code) { 3676 case HNS3_ETHERTYPE_SUCCESS_ADD: 3677 case HNS3_ETHERTYPE_ALREADY_ADD: 3678 return_status = 0; 3679 break; 3680 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 3681 PMD_INIT_LOG(ERR, 3682 "add mac ethertype failed for manager table overflow."); 3683 return_status = -EIO; 3684 break; 3685 case HNS3_ETHERTYPE_KEY_CONFLICT: 3686 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 3687 return_status = -EIO; 3688 break; 3689 default: 3690 PMD_INIT_LOG(ERR, 3691 "add mac ethertype failed for undefined, code=%u.", 3692 resp_code); 3693 return_status = -EIO; 3694 break; 3695 } 3696 3697 return return_status; 3698 } 3699 3700 static int 3701 hns3_add_mgr_tbl(struct hns3_hw *hw, 3702 const struct hns3_mac_mgr_tbl_entry_cmd *req) 3703 { 3704 struct hns3_cmd_desc desc; 3705 uint8_t resp_code; 3706 uint16_t retval; 3707 int ret; 3708 3709 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 3710 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 3711 3712 ret = hns3_cmd_send(hw, &desc, 1); 3713 if (ret) { 3714 PMD_INIT_LOG(ERR, 3715 "add mac ethertype failed for cmd_send, ret =%d.", 3716 ret); 3717 return ret; 3718 } 3719 3720 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 3721 retval = rte_le_to_cpu_16(desc.retval); 3722 3723 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 3724 } 3725 3726 static void 3727 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 3728 int *table_item_num) 3729 { 3730 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 3731 3732 /* 3733 * In current version, we add one item in management table as below: 3734 * 0x0180C200000E -- LLDP MC address 3735 */ 3736 tbl = mgr_table; 3737 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 3738 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 3739 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 3740 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 3741 tbl->i_port_bitmap = 0x1; 3742 *table_item_num = 1; 3743 } 3744 3745 static int 3746 hns3_init_mgr_tbl(struct hns3_hw *hw) 3747 { 3748 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 3749 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 3750 int table_item_num; 3751 int ret; 3752 int i; 3753 3754 memset(mgr_table, 0, sizeof(mgr_table)); 3755 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 3756 for (i = 0; i < table_item_num; i++) { 3757 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 3758 if (ret) { 3759 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 3760 ret); 3761 return ret; 3762 } 3763 } 3764 3765 return 0; 3766 } 3767 3768 static void 3769 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 3770 bool en_mc, bool en_bc, int vport_id) 3771 { 3772 if (!param) 3773 return; 3774 3775 memset(param, 0, sizeof(struct hns3_promisc_param)); 3776 if (en_uc) 3777 param->enable = HNS3_PROMISC_EN_UC; 3778 if (en_mc) 3779 param->enable |= HNS3_PROMISC_EN_MC; 3780 if (en_bc) 3781 param->enable |= HNS3_PROMISC_EN_BC; 3782 param->vf_id = vport_id; 3783 } 3784 3785 static int 3786 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 3787 { 3788 struct hns3_promisc_cfg_cmd *req; 3789 struct hns3_cmd_desc desc; 3790 int ret; 3791 3792 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 3793 3794 req = (struct hns3_promisc_cfg_cmd *)desc.data; 3795 req->vf_id = param->vf_id; 3796 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 3797 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 3798 3799 ret = hns3_cmd_send(hw, &desc, 1); 3800 if (ret) 3801 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 3802 3803 return ret; 3804 } 3805 3806 static int 3807 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 3808 { 3809 struct hns3_promisc_param param; 3810 bool en_bc_pmc = true; 3811 uint8_t vf_id; 3812 3813 /* 3814 * In current version VF is not supported when PF is driven by DPDK 3815 * driver, just need to configure parameters for PF vport. 3816 */ 3817 vf_id = HNS3_PF_FUNC_ID; 3818 3819 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 3820 return hns3_cmd_set_promisc_mode(hw, ¶m); 3821 } 3822 3823 static int 3824 hns3_promisc_init(struct hns3_hw *hw) 3825 { 3826 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3827 struct hns3_pf *pf = &hns->pf; 3828 struct hns3_promisc_param param; 3829 uint16_t func_id; 3830 int ret; 3831 3832 ret = hns3_set_promisc_mode(hw, false, false); 3833 if (ret) { 3834 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 3835 return ret; 3836 } 3837 3838 /* 3839 * In current version VFs are not supported when PF is driven by DPDK 3840 * driver. After PF has been taken over by DPDK, the original VF will 3841 * be invalid. So, there is a possibility of entry residues. It should 3842 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 3843 * during init. 3844 */ 3845 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 3846 hns3_promisc_param_init(¶m, false, false, false, func_id); 3847 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3848 if (ret) { 3849 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 3850 " ret = %d", func_id, ret); 3851 return ret; 3852 } 3853 } 3854 3855 return 0; 3856 } 3857 3858 static void 3859 hns3_promisc_uninit(struct hns3_hw *hw) 3860 { 3861 struct hns3_promisc_param param; 3862 uint16_t func_id; 3863 int ret; 3864 3865 func_id = HNS3_PF_FUNC_ID; 3866 3867 /* 3868 * In current version VFs are not supported when PF is driven by 3869 * DPDK driver, and VFs' promisc mode status has been cleared during 3870 * init and their status will not change. So just clear PF's promisc 3871 * mode status during uninit. 3872 */ 3873 hns3_promisc_param_init(¶m, false, false, false, func_id); 3874 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3875 if (ret) 3876 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 3877 " uninit, ret = %d", ret); 3878 } 3879 3880 static int 3881 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 3882 { 3883 bool allmulti = dev->data->all_multicast ? true : false; 3884 struct hns3_adapter *hns = dev->data->dev_private; 3885 struct hns3_hw *hw = &hns->hw; 3886 uint64_t offloads; 3887 int err; 3888 int ret; 3889 3890 rte_spinlock_lock(&hw->lock); 3891 ret = hns3_set_promisc_mode(hw, true, true); 3892 if (ret) { 3893 rte_spinlock_unlock(&hw->lock); 3894 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 3895 ret); 3896 return ret; 3897 } 3898 3899 /* 3900 * When promiscuous mode was enabled, disable the vlan filter to let 3901 * all packets coming in the receiving direction. 3902 */ 3903 offloads = dev->data->dev_conf.rxmode.offloads; 3904 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3905 ret = hns3_enable_vlan_filter(hns, false); 3906 if (ret) { 3907 hns3_err(hw, "failed to enable promiscuous mode due to " 3908 "failure to disable vlan filter, ret = %d", 3909 ret); 3910 err = hns3_set_promisc_mode(hw, false, allmulti); 3911 if (err) 3912 hns3_err(hw, "failed to restore promiscuous " 3913 "status after disable vlan filter " 3914 "failed during enabling promiscuous " 3915 "mode, ret = %d", ret); 3916 } 3917 } 3918 3919 rte_spinlock_unlock(&hw->lock); 3920 3921 return ret; 3922 } 3923 3924 static int 3925 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 3926 { 3927 bool allmulti = dev->data->all_multicast ? true : false; 3928 struct hns3_adapter *hns = dev->data->dev_private; 3929 struct hns3_hw *hw = &hns->hw; 3930 uint64_t offloads; 3931 int err; 3932 int ret; 3933 3934 /* If now in all_multicast mode, must remain in all_multicast mode. */ 3935 rte_spinlock_lock(&hw->lock); 3936 ret = hns3_set_promisc_mode(hw, false, allmulti); 3937 if (ret) { 3938 rte_spinlock_unlock(&hw->lock); 3939 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 3940 ret); 3941 return ret; 3942 } 3943 /* when promiscuous mode was disabled, restore the vlan filter status */ 3944 offloads = dev->data->dev_conf.rxmode.offloads; 3945 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3946 ret = hns3_enable_vlan_filter(hns, true); 3947 if (ret) { 3948 hns3_err(hw, "failed to disable promiscuous mode due to" 3949 " failure to restore vlan filter, ret = %d", 3950 ret); 3951 err = hns3_set_promisc_mode(hw, true, true); 3952 if (err) 3953 hns3_err(hw, "failed to restore promiscuous " 3954 "status after enabling vlan filter " 3955 "failed during disabling promiscuous " 3956 "mode, ret = %d", ret); 3957 } 3958 } 3959 rte_spinlock_unlock(&hw->lock); 3960 3961 return ret; 3962 } 3963 3964 static int 3965 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 3966 { 3967 struct hns3_adapter *hns = dev->data->dev_private; 3968 struct hns3_hw *hw = &hns->hw; 3969 int ret; 3970 3971 if (dev->data->promiscuous) 3972 return 0; 3973 3974 rte_spinlock_lock(&hw->lock); 3975 ret = hns3_set_promisc_mode(hw, false, true); 3976 rte_spinlock_unlock(&hw->lock); 3977 if (ret) 3978 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 3979 ret); 3980 3981 return ret; 3982 } 3983 3984 static int 3985 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 3986 { 3987 struct hns3_adapter *hns = dev->data->dev_private; 3988 struct hns3_hw *hw = &hns->hw; 3989 int ret; 3990 3991 /* If now in promiscuous mode, must remain in all_multicast mode. */ 3992 if (dev->data->promiscuous) 3993 return 0; 3994 3995 rte_spinlock_lock(&hw->lock); 3996 ret = hns3_set_promisc_mode(hw, false, false); 3997 rte_spinlock_unlock(&hw->lock); 3998 if (ret) 3999 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4000 ret); 4001 4002 return ret; 4003 } 4004 4005 static int 4006 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4007 { 4008 struct hns3_hw *hw = &hns->hw; 4009 bool allmulti = hw->data->all_multicast ? true : false; 4010 int ret; 4011 4012 if (hw->data->promiscuous) { 4013 ret = hns3_set_promisc_mode(hw, true, true); 4014 if (ret) 4015 hns3_err(hw, "failed to restore promiscuous mode, " 4016 "ret = %d", ret); 4017 return ret; 4018 } 4019 4020 ret = hns3_set_promisc_mode(hw, false, allmulti); 4021 if (ret) 4022 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4023 ret); 4024 return ret; 4025 } 4026 4027 static int 4028 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4029 { 4030 struct hns3_sfp_info_cmd *resp; 4031 uint32_t local_pause, lp_pause; 4032 struct hns3_cmd_desc desc; 4033 int ret; 4034 4035 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4036 resp = (struct hns3_sfp_info_cmd *)desc.data; 4037 resp->query_type = HNS3_ACTIVE_QUERY; 4038 4039 ret = hns3_cmd_send(hw, &desc, 1); 4040 if (ret == -EOPNOTSUPP) { 4041 hns3_warn(hw, "firmware does not support get SFP info," 4042 " ret = %d.", ret); 4043 return ret; 4044 } else if (ret) { 4045 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4046 return ret; 4047 } 4048 4049 /* 4050 * In some case, the speed of MAC obtained from firmware may be 0, it 4051 * shouldn't be set to mac->speed. 4052 */ 4053 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4054 return 0; 4055 4056 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4057 /* 4058 * if resp->supported_speed is 0, it means it's an old version 4059 * firmware, do not update these params. 4060 */ 4061 if (resp->supported_speed) { 4062 mac_info->query_type = HNS3_ACTIVE_QUERY; 4063 mac_info->supported_speed = 4064 rte_le_to_cpu_32(resp->supported_speed); 4065 mac_info->support_autoneg = resp->autoneg_ability; 4066 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED 4067 : RTE_ETH_LINK_AUTONEG; 4068 mac_info->fec_capa = resp->fec_ability; 4069 local_pause = resp->pause_status & HNS3_FIBER_LOCAL_PAUSE_MASK; 4070 lp_pause = (resp->pause_status & HNS3_FIBER_LP_PAUSE_MASK) >> 4071 HNS3_FIBER_LP_PAUSE_S; 4072 mac_info->advertising = 4073 local_pause << HNS3_PHY_LINK_MODE_PAUSE_S; 4074 mac_info->lp_advertising = 4075 lp_pause << HNS3_PHY_LINK_MODE_PAUSE_S; 4076 } else { 4077 mac_info->query_type = HNS3_DEFAULT_QUERY; 4078 } 4079 4080 return 0; 4081 } 4082 4083 static uint8_t 4084 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4085 { 4086 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M)) 4087 duplex = RTE_ETH_LINK_FULL_DUPLEX; 4088 4089 return duplex; 4090 } 4091 4092 static int 4093 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4094 { 4095 struct hns3_mac *mac = &hw->mac; 4096 int ret; 4097 4098 duplex = hns3_check_speed_dup(duplex, speed); 4099 if (mac->link_speed == speed && mac->link_duplex == duplex) 4100 return 0; 4101 4102 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4103 if (ret) 4104 return ret; 4105 4106 ret = hns3_port_shaper_update(hw, speed); 4107 if (ret) 4108 return ret; 4109 4110 mac->link_speed = speed; 4111 mac->link_duplex = duplex; 4112 4113 return 0; 4114 } 4115 4116 static int 4117 hns3_update_fiber_link_info(struct hns3_hw *hw) 4118 { 4119 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4120 struct hns3_mac *mac = &hw->mac; 4121 struct hns3_mac mac_info; 4122 int ret; 4123 4124 /* If firmware do not support get SFP/qSFP speed, return directly */ 4125 if (!pf->support_sfp_query) 4126 return 0; 4127 4128 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4129 ret = hns3_get_sfp_info(hw, &mac_info); 4130 if (ret == -EOPNOTSUPP) { 4131 pf->support_sfp_query = false; 4132 return ret; 4133 } else if (ret) 4134 return ret; 4135 4136 /* Do nothing if no SFP */ 4137 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE) 4138 return 0; 4139 4140 /* 4141 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4142 * to reconfigure the speed of MAC. Otherwise, it indicates 4143 * that the current firmware only supports to obtain the 4144 * speed of the SFP, and the speed of MAC needs to reconfigure. 4145 */ 4146 mac->query_type = mac_info.query_type; 4147 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4148 if (mac_info.link_speed != mac->link_speed) { 4149 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4150 if (ret) 4151 return ret; 4152 } 4153 4154 mac->link_speed = mac_info.link_speed; 4155 mac->supported_speed = mac_info.supported_speed; 4156 mac->support_autoneg = mac_info.support_autoneg; 4157 mac->link_autoneg = mac_info.link_autoneg; 4158 mac->fec_capa = mac_info.fec_capa; 4159 mac->advertising = mac_info.advertising; 4160 mac->lp_advertising = mac_info.lp_advertising; 4161 4162 return 0; 4163 } 4164 4165 /* Config full duplex for SFP */ 4166 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4167 RTE_ETH_LINK_FULL_DUPLEX); 4168 } 4169 4170 static void 4171 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4172 { 4173 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4174 4175 struct hns3_phy_params_bd0_cmd *req; 4176 uint32_t supported; 4177 4178 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4179 mac->link_speed = rte_le_to_cpu_32(req->speed); 4180 mac->link_duplex = hns3_get_bit(req->duplex, 4181 HNS3_PHY_DUPLEX_CFG_B); 4182 mac->link_autoneg = hns3_get_bit(req->autoneg, 4183 HNS3_PHY_AUTONEG_CFG_B); 4184 mac->advertising = rte_le_to_cpu_32(req->advertising); 4185 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4186 supported = rte_le_to_cpu_32(req->supported); 4187 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4188 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4189 } 4190 4191 static int 4192 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4193 { 4194 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4195 uint16_t i; 4196 int ret; 4197 4198 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4199 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4200 true); 4201 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4202 } 4203 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4204 4205 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4206 if (ret) { 4207 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4208 return ret; 4209 } 4210 4211 hns3_parse_copper_phy_params(desc, mac); 4212 4213 return 0; 4214 } 4215 4216 static int 4217 hns3_update_copper_link_info(struct hns3_hw *hw) 4218 { 4219 struct hns3_mac *mac = &hw->mac; 4220 struct hns3_mac mac_info; 4221 int ret; 4222 4223 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4224 ret = hns3_get_copper_phy_params(hw, &mac_info); 4225 if (ret) 4226 return ret; 4227 4228 if (mac_info.link_speed != mac->link_speed) { 4229 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4230 if (ret) 4231 return ret; 4232 } 4233 4234 mac->link_speed = mac_info.link_speed; 4235 mac->link_duplex = mac_info.link_duplex; 4236 mac->link_autoneg = mac_info.link_autoneg; 4237 mac->supported_speed = mac_info.supported_speed; 4238 mac->advertising = mac_info.advertising; 4239 mac->lp_advertising = mac_info.lp_advertising; 4240 mac->support_autoneg = mac_info.support_autoneg; 4241 4242 return 0; 4243 } 4244 4245 static int 4246 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4247 { 4248 struct hns3_adapter *hns = eth_dev->data->dev_private; 4249 struct hns3_hw *hw = &hns->hw; 4250 4251 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4252 return hns3_update_copper_link_info(hw); 4253 4254 return hns3_update_fiber_link_info(hw); 4255 } 4256 4257 static int 4258 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4259 { 4260 struct hns3_config_mac_mode_cmd *req; 4261 struct hns3_cmd_desc desc; 4262 uint32_t loop_en = 0; 4263 uint8_t val = 0; 4264 int ret; 4265 4266 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4267 4268 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4269 if (enable) 4270 val = 1; 4271 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4272 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4273 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4274 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4275 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4276 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4277 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4278 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4279 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4280 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4281 4282 /* 4283 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4284 * when receiving frames. Otherwise, CRC will be stripped. 4285 */ 4286 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 4287 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4288 else 4289 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4290 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4291 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4292 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4293 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4294 4295 ret = hns3_cmd_send(hw, &desc, 1); 4296 if (ret) 4297 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4298 4299 return ret; 4300 } 4301 4302 static int 4303 hns3_get_mac_link_status(struct hns3_hw *hw) 4304 { 4305 struct hns3_link_status_cmd *req; 4306 struct hns3_cmd_desc desc; 4307 int link_status; 4308 int ret; 4309 4310 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4311 ret = hns3_cmd_send(hw, &desc, 1); 4312 if (ret) { 4313 hns3_err(hw, "get link status cmd failed %d", ret); 4314 return RTE_ETH_LINK_DOWN; 4315 } 4316 4317 req = (struct hns3_link_status_cmd *)desc.data; 4318 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4319 4320 return !!link_status; 4321 } 4322 4323 static bool 4324 hns3_update_link_status(struct hns3_hw *hw) 4325 { 4326 int state; 4327 4328 state = hns3_get_mac_link_status(hw); 4329 if (state != hw->mac.link_status) { 4330 hw->mac.link_status = state; 4331 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4332 return true; 4333 } 4334 4335 return false; 4336 } 4337 4338 void 4339 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4340 { 4341 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4342 struct rte_eth_link new_link; 4343 int ret; 4344 4345 if (query) 4346 hns3_update_port_link_info(dev); 4347 4348 memset(&new_link, 0, sizeof(new_link)); 4349 hns3_setup_linkstatus(dev, &new_link); 4350 4351 ret = rte_eth_linkstatus_set(dev, &new_link); 4352 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4353 hns3_start_report_lse(dev); 4354 } 4355 4356 static void 4357 hns3_service_handler(void *param) 4358 { 4359 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4360 struct hns3_adapter *hns = eth_dev->data->dev_private; 4361 struct hns3_hw *hw = &hns->hw; 4362 4363 if (!hns3_is_reset_pending(hns)) { 4364 hns3_update_linkstatus_and_event(hw, true); 4365 hns3_update_hw_stats(hw); 4366 } else { 4367 hns3_warn(hw, "Cancel the query when reset is pending"); 4368 } 4369 4370 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4371 } 4372 4373 static int 4374 hns3_init_hardware(struct hns3_adapter *hns) 4375 { 4376 struct hns3_hw *hw = &hns->hw; 4377 int ret; 4378 4379 /* 4380 * All queue-related HW operations must be performed after the TCAM 4381 * table is configured. 4382 */ 4383 ret = hns3_map_tqp(hw); 4384 if (ret) { 4385 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4386 return ret; 4387 } 4388 4389 ret = hns3_init_umv_space(hw); 4390 if (ret) { 4391 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4392 return ret; 4393 } 4394 4395 ret = hns3_mac_init(hw); 4396 if (ret) { 4397 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4398 goto err_mac_init; 4399 } 4400 4401 ret = hns3_init_mgr_tbl(hw); 4402 if (ret) { 4403 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4404 goto err_mac_init; 4405 } 4406 4407 ret = hns3_promisc_init(hw); 4408 if (ret) { 4409 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4410 ret); 4411 goto err_mac_init; 4412 } 4413 4414 ret = hns3_init_vlan_config(hns); 4415 if (ret) { 4416 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4417 goto err_mac_init; 4418 } 4419 4420 ret = hns3_dcb_init(hw); 4421 if (ret) { 4422 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4423 goto err_mac_init; 4424 } 4425 4426 ret = hns3_init_fd_config(hns); 4427 if (ret) { 4428 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4429 goto err_mac_init; 4430 } 4431 4432 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4433 if (ret) { 4434 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4435 goto err_mac_init; 4436 } 4437 4438 ret = hns3_config_gro(hw, false); 4439 if (ret) { 4440 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4441 goto err_mac_init; 4442 } 4443 4444 /* 4445 * In the initialization clearing the all hardware mapping relationship 4446 * configurations between queues and interrupt vectors is needed, so 4447 * some error caused by the residual configurations, such as the 4448 * unexpected interrupt, can be avoid. 4449 */ 4450 ret = hns3_init_ring_with_vector(hw); 4451 if (ret) { 4452 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4453 goto err_mac_init; 4454 } 4455 4456 ret = hns3_ptp_init(hw); 4457 if (ret) { 4458 PMD_INIT_LOG(ERR, "Failed to init PTP, ret = %d", ret); 4459 goto err_mac_init; 4460 } 4461 4462 return 0; 4463 4464 err_mac_init: 4465 hns3_uninit_umv_space(hw); 4466 return ret; 4467 } 4468 4469 static int 4470 hns3_clear_hw(struct hns3_hw *hw) 4471 { 4472 struct hns3_cmd_desc desc; 4473 int ret; 4474 4475 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4476 4477 ret = hns3_cmd_send(hw, &desc, 1); 4478 if (ret && ret != -EOPNOTSUPP) 4479 return ret; 4480 4481 return 0; 4482 } 4483 4484 static void 4485 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4486 { 4487 uint32_t val; 4488 4489 /* 4490 * The new firmware support report more hardware error types by 4491 * msix mode. These errors are defined as RAS errors in hardware 4492 * and belong to a different type from the MSI-x errors processed 4493 * by the network driver. 4494 * 4495 * Network driver should open the new error report on initialization. 4496 */ 4497 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4498 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4499 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4500 } 4501 4502 static uint32_t 4503 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 4504 { 4505 struct hns3_mac *mac = &hw->mac; 4506 4507 switch (mac->link_speed) { 4508 case RTE_ETH_SPEED_NUM_1G: 4509 return HNS3_FIBER_LINK_SPEED_1G_BIT; 4510 case RTE_ETH_SPEED_NUM_10G: 4511 return HNS3_FIBER_LINK_SPEED_10G_BIT; 4512 case RTE_ETH_SPEED_NUM_25G: 4513 return HNS3_FIBER_LINK_SPEED_25G_BIT; 4514 case RTE_ETH_SPEED_NUM_40G: 4515 return HNS3_FIBER_LINK_SPEED_40G_BIT; 4516 case RTE_ETH_SPEED_NUM_50G: 4517 return HNS3_FIBER_LINK_SPEED_50G_BIT; 4518 case RTE_ETH_SPEED_NUM_100G: 4519 return HNS3_FIBER_LINK_SPEED_100G_BIT; 4520 case RTE_ETH_SPEED_NUM_200G: 4521 return HNS3_FIBER_LINK_SPEED_200G_BIT; 4522 default: 4523 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 4524 return 0; 4525 } 4526 } 4527 4528 /* 4529 * Validity of supported_speed for fiber and copper media type can be 4530 * guaranteed by the following policy: 4531 * Copper: 4532 * Although the initialization of the phy in the firmware may not be 4533 * completed, the firmware can guarantees that the supported_speed is 4534 * an valid value. 4535 * Firber: 4536 * If the version of firmware supports the active query way of the 4537 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 4538 * through it. If unsupported, use the SFP's speed as the value of the 4539 * supported_speed. 4540 */ 4541 static int 4542 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 4543 { 4544 struct hns3_adapter *hns = eth_dev->data->dev_private; 4545 struct hns3_hw *hw = &hns->hw; 4546 struct hns3_mac *mac = &hw->mac; 4547 int ret; 4548 4549 ret = hns3_update_link_info(eth_dev); 4550 if (ret) 4551 return ret; 4552 4553 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER || 4554 mac->media_type == HNS3_MEDIA_TYPE_BACKPLANE) { 4555 /* 4556 * Some firmware does not support the report of supported_speed, 4557 * and only report the effective speed of SFP/backplane. In this 4558 * case, it is necessary to use the SFP/backplane's speed as the 4559 * supported_speed. 4560 */ 4561 if (mac->supported_speed == 0) 4562 mac->supported_speed = 4563 hns3_set_firber_default_support_speed(hw); 4564 } 4565 4566 return 0; 4567 } 4568 4569 static int 4570 hns3_init_pf(struct rte_eth_dev *eth_dev) 4571 { 4572 struct rte_device *dev = eth_dev->device; 4573 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4574 struct hns3_adapter *hns = eth_dev->data->dev_private; 4575 struct hns3_hw *hw = &hns->hw; 4576 int ret; 4577 4578 PMD_INIT_FUNC_TRACE(); 4579 4580 /* Get hardware io base address from pcie BAR2 IO space */ 4581 hw->io_base = pci_dev->mem_resource[2].addr; 4582 4583 ret = hns3_get_pci_revision_id(hw, &hw->revision); 4584 if (ret) 4585 return ret; 4586 4587 /* Firmware command queue initialize */ 4588 ret = hns3_cmd_init_queue(hw); 4589 if (ret) { 4590 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 4591 goto err_cmd_init_queue; 4592 } 4593 4594 hns3_clear_all_event_cause(hw); 4595 4596 /* Firmware command initialize */ 4597 ret = hns3_cmd_init(hw); 4598 if (ret) { 4599 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 4600 goto err_cmd_init; 4601 } 4602 4603 hns3_tx_push_init(eth_dev); 4604 4605 /* 4606 * To ensure that the hardware environment is clean during 4607 * initialization, the driver actively clear the hardware environment 4608 * during initialization, including PF and corresponding VFs' vlan, mac, 4609 * flow table configurations, etc. 4610 */ 4611 ret = hns3_clear_hw(hw); 4612 if (ret) { 4613 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 4614 goto err_cmd_init; 4615 } 4616 4617 hns3_config_all_msix_error(hw, true); 4618 4619 ret = rte_intr_callback_register(pci_dev->intr_handle, 4620 hns3_interrupt_handler, 4621 eth_dev); 4622 if (ret) { 4623 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 4624 goto err_intr_callback_register; 4625 } 4626 4627 /* Enable interrupt */ 4628 rte_intr_enable(pci_dev->intr_handle); 4629 hns3_pf_enable_irq0(hw); 4630 4631 /* Get configuration */ 4632 ret = hns3_get_configuration(hw); 4633 if (ret) { 4634 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 4635 goto err_get_config; 4636 } 4637 4638 ret = hns3_stats_init(hw); 4639 if (ret) 4640 goto err_get_config; 4641 4642 ret = hns3_init_hardware(hns); 4643 if (ret) { 4644 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 4645 goto err_init_hw; 4646 } 4647 4648 /* Initialize flow director filter list & hash */ 4649 ret = hns3_fdir_filter_init(hns); 4650 if (ret) { 4651 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 4652 goto err_fdir; 4653 } 4654 4655 hns3_rss_set_default_args(hw); 4656 4657 ret = hns3_enable_hw_error_intr(hns, true); 4658 if (ret) { 4659 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 4660 ret); 4661 goto err_enable_intr; 4662 } 4663 4664 ret = hns3_get_port_supported_speed(eth_dev); 4665 if (ret) { 4666 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 4667 "by device, ret = %d.", ret); 4668 goto err_supported_speed; 4669 } 4670 4671 hns3_tm_conf_init(eth_dev); 4672 4673 return 0; 4674 4675 err_supported_speed: 4676 (void)hns3_enable_hw_error_intr(hns, false); 4677 err_enable_intr: 4678 hns3_fdir_filter_uninit(hns); 4679 err_fdir: 4680 hns3_uninit_umv_space(hw); 4681 hns3_ptp_uninit(hw); 4682 err_init_hw: 4683 hns3_stats_uninit(hw); 4684 err_get_config: 4685 hns3_pf_disable_irq0(hw); 4686 rte_intr_disable(pci_dev->intr_handle); 4687 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4688 eth_dev); 4689 err_intr_callback_register: 4690 err_cmd_init: 4691 hns3_cmd_uninit(hw); 4692 hns3_cmd_destroy_queue(hw); 4693 err_cmd_init_queue: 4694 hw->io_base = NULL; 4695 4696 return ret; 4697 } 4698 4699 static void 4700 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 4701 { 4702 struct hns3_adapter *hns = eth_dev->data->dev_private; 4703 struct rte_device *dev = eth_dev->device; 4704 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4705 struct hns3_hw *hw = &hns->hw; 4706 4707 PMD_INIT_FUNC_TRACE(); 4708 4709 hns3_tm_conf_uninit(eth_dev); 4710 hns3_enable_hw_error_intr(hns, false); 4711 hns3_rss_uninit(hns); 4712 (void)hns3_config_gro(hw, false); 4713 hns3_promisc_uninit(hw); 4714 hns3_flow_uninit(eth_dev); 4715 hns3_fdir_filter_uninit(hns); 4716 hns3_uninit_umv_space(hw); 4717 hns3_ptp_uninit(hw); 4718 hns3_stats_uninit(hw); 4719 hns3_config_mac_tnl_int(hw, false); 4720 hns3_pf_disable_irq0(hw); 4721 rte_intr_disable(pci_dev->intr_handle); 4722 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4723 eth_dev); 4724 hns3_config_all_msix_error(hw, false); 4725 hns3_cmd_uninit(hw); 4726 hns3_cmd_destroy_queue(hw); 4727 hw->io_base = NULL; 4728 } 4729 4730 static uint32_t 4731 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 4732 { 4733 uint32_t speed_bit; 4734 4735 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4736 case RTE_ETH_LINK_SPEED_10M: 4737 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 4738 break; 4739 case RTE_ETH_LINK_SPEED_10M_HD: 4740 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 4741 break; 4742 case RTE_ETH_LINK_SPEED_100M: 4743 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 4744 break; 4745 case RTE_ETH_LINK_SPEED_100M_HD: 4746 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 4747 break; 4748 case RTE_ETH_LINK_SPEED_1G: 4749 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 4750 break; 4751 default: 4752 speed_bit = 0; 4753 break; 4754 } 4755 4756 return speed_bit; 4757 } 4758 4759 static uint32_t 4760 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 4761 { 4762 uint32_t speed_bit; 4763 4764 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4765 case RTE_ETH_LINK_SPEED_1G: 4766 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 4767 break; 4768 case RTE_ETH_LINK_SPEED_10G: 4769 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 4770 break; 4771 case RTE_ETH_LINK_SPEED_25G: 4772 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 4773 break; 4774 case RTE_ETH_LINK_SPEED_40G: 4775 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 4776 break; 4777 case RTE_ETH_LINK_SPEED_50G: 4778 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 4779 break; 4780 case RTE_ETH_LINK_SPEED_100G: 4781 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 4782 break; 4783 case RTE_ETH_LINK_SPEED_200G: 4784 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 4785 break; 4786 default: 4787 speed_bit = 0; 4788 break; 4789 } 4790 4791 return speed_bit; 4792 } 4793 4794 static int 4795 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 4796 { 4797 struct hns3_mac *mac = &hw->mac; 4798 uint32_t supported_speed = mac->supported_speed; 4799 uint32_t speed_bit = 0; 4800 4801 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 4802 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 4803 else 4804 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 4805 4806 if (!(speed_bit & supported_speed)) { 4807 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 4808 link_speeds); 4809 return -EINVAL; 4810 } 4811 4812 return 0; 4813 } 4814 4815 static uint32_t 4816 hns3_get_link_speed(uint32_t link_speeds) 4817 { 4818 uint32_t speed = RTE_ETH_SPEED_NUM_NONE; 4819 4820 if (link_speeds & RTE_ETH_LINK_SPEED_10M || 4821 link_speeds & RTE_ETH_LINK_SPEED_10M_HD) 4822 speed = RTE_ETH_SPEED_NUM_10M; 4823 if (link_speeds & RTE_ETH_LINK_SPEED_100M || 4824 link_speeds & RTE_ETH_LINK_SPEED_100M_HD) 4825 speed = RTE_ETH_SPEED_NUM_100M; 4826 if (link_speeds & RTE_ETH_LINK_SPEED_1G) 4827 speed = RTE_ETH_SPEED_NUM_1G; 4828 if (link_speeds & RTE_ETH_LINK_SPEED_10G) 4829 speed = RTE_ETH_SPEED_NUM_10G; 4830 if (link_speeds & RTE_ETH_LINK_SPEED_25G) 4831 speed = RTE_ETH_SPEED_NUM_25G; 4832 if (link_speeds & RTE_ETH_LINK_SPEED_40G) 4833 speed = RTE_ETH_SPEED_NUM_40G; 4834 if (link_speeds & RTE_ETH_LINK_SPEED_50G) 4835 speed = RTE_ETH_SPEED_NUM_50G; 4836 if (link_speeds & RTE_ETH_LINK_SPEED_100G) 4837 speed = RTE_ETH_SPEED_NUM_100G; 4838 if (link_speeds & RTE_ETH_LINK_SPEED_200G) 4839 speed = RTE_ETH_SPEED_NUM_200G; 4840 4841 return speed; 4842 } 4843 4844 static uint8_t 4845 hns3_get_link_duplex(uint32_t link_speeds) 4846 { 4847 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 4848 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 4849 return RTE_ETH_LINK_HALF_DUPLEX; 4850 else 4851 return RTE_ETH_LINK_FULL_DUPLEX; 4852 } 4853 4854 static int 4855 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 4856 struct hns3_set_link_speed_cfg *cfg) 4857 { 4858 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4859 struct hns3_phy_params_bd0_cmd *req; 4860 uint16_t i; 4861 4862 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4863 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4864 false); 4865 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4866 } 4867 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 4868 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4869 req->autoneg = cfg->autoneg; 4870 4871 /* 4872 * The full speed capability is used to negotiate when 4873 * auto-negotiation is enabled. 4874 */ 4875 if (cfg->autoneg) { 4876 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 4877 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 4878 HNS3_PHY_LINK_SPEED_100M_BIT | 4879 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 4880 HNS3_PHY_LINK_SPEED_1000M_BIT; 4881 } else { 4882 req->speed = cfg->speed; 4883 req->duplex = cfg->duplex; 4884 } 4885 4886 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4887 } 4888 4889 static int 4890 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 4891 { 4892 struct hns3_config_auto_neg_cmd *req; 4893 struct hns3_cmd_desc desc; 4894 uint32_t flag = 0; 4895 int ret; 4896 4897 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 4898 4899 req = (struct hns3_config_auto_neg_cmd *)desc.data; 4900 if (enable) 4901 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 4902 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 4903 4904 ret = hns3_cmd_send(hw, &desc, 1); 4905 if (ret) 4906 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 4907 4908 return ret; 4909 } 4910 4911 static int 4912 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 4913 struct hns3_set_link_speed_cfg *cfg) 4914 { 4915 int ret; 4916 4917 if (hw->mac.support_autoneg) { 4918 ret = hns3_set_autoneg(hw, cfg->autoneg); 4919 if (ret) { 4920 hns3_err(hw, "failed to configure auto-negotiation."); 4921 return ret; 4922 } 4923 4924 /* 4925 * To enable auto-negotiation, we only need to open the switch 4926 * of auto-negotiation, then firmware sets all speed 4927 * capabilities. 4928 */ 4929 if (cfg->autoneg) 4930 return 0; 4931 } 4932 4933 /* 4934 * Some hardware doesn't support auto-negotiation, but users may not 4935 * configure link_speeds (default 0), which means auto-negotiation. 4936 * In this case, a warning message need to be printed, instead of 4937 * an error. 4938 */ 4939 if (cfg->autoneg) { 4940 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 4941 return 0; 4942 } 4943 4944 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 4945 } 4946 4947 const char * 4948 hns3_get_media_type_name(uint8_t media_type) 4949 { 4950 if (media_type == HNS3_MEDIA_TYPE_FIBER) 4951 return "fiber"; 4952 else if (media_type == HNS3_MEDIA_TYPE_COPPER) 4953 return "copper"; 4954 else if (media_type == HNS3_MEDIA_TYPE_BACKPLANE) 4955 return "backplane"; 4956 else 4957 return "unknown"; 4958 } 4959 4960 static int 4961 hns3_set_port_link_speed(struct hns3_hw *hw, 4962 struct hns3_set_link_speed_cfg *cfg) 4963 { 4964 int ret; 4965 4966 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4967 ret = hns3_set_copper_port_link_speed(hw, cfg); 4968 else 4969 ret = hns3_set_fiber_port_link_speed(hw, cfg); 4970 4971 if (ret) { 4972 hns3_err(hw, "failed to set %s port link speed, ret = %d.", 4973 hns3_get_media_type_name(hw->mac.media_type), 4974 ret); 4975 return ret; 4976 } 4977 4978 return 0; 4979 } 4980 4981 static int 4982 hns3_apply_link_speed(struct hns3_hw *hw) 4983 { 4984 struct rte_eth_conf *conf = &hw->data->dev_conf; 4985 struct hns3_set_link_speed_cfg cfg; 4986 4987 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 4988 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ? 4989 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 4990 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) { 4991 cfg.speed = hns3_get_link_speed(conf->link_speeds); 4992 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 4993 } 4994 4995 return hns3_set_port_link_speed(hw, &cfg); 4996 } 4997 4998 static int 4999 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5000 { 5001 struct hns3_hw *hw = &hns->hw; 5002 bool link_en; 5003 int ret; 5004 5005 ret = hns3_update_queue_map_configure(hns); 5006 if (ret) { 5007 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5008 ret); 5009 return ret; 5010 } 5011 5012 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5013 ret = hns3_tm_conf_update(hw); 5014 if (ret) { 5015 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5016 return ret; 5017 } 5018 5019 hns3_enable_rxd_adv_layout(hw); 5020 5021 ret = hns3_init_queues(hns, reset_queue); 5022 if (ret) { 5023 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5024 return ret; 5025 } 5026 5027 link_en = hw->set_link_down ? false : true; 5028 ret = hns3_cfg_mac_mode(hw, link_en); 5029 if (ret) { 5030 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5031 goto err_config_mac_mode; 5032 } 5033 5034 ret = hns3_apply_link_speed(hw); 5035 if (ret) 5036 goto err_set_link_speed; 5037 5038 return hns3_restore_filter(hns); 5039 5040 err_set_link_speed: 5041 (void)hns3_cfg_mac_mode(hw, false); 5042 5043 err_config_mac_mode: 5044 hns3_dev_release_mbufs(hns); 5045 /* 5046 * Here is exception handling, hns3_reset_all_tqps will have the 5047 * corresponding error message if it is handled incorrectly, so it is 5048 * not necessary to check hns3_reset_all_tqps return value, here keep 5049 * ret as the error code causing the exception. 5050 */ 5051 (void)hns3_reset_all_tqps(hns); 5052 return ret; 5053 } 5054 5055 static int 5056 hns3_dev_start(struct rte_eth_dev *dev) 5057 { 5058 struct hns3_adapter *hns = dev->data->dev_private; 5059 struct hns3_hw *hw = &hns->hw; 5060 bool old_state = hw->set_link_down; 5061 int ret; 5062 5063 PMD_INIT_FUNC_TRACE(); 5064 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) 5065 return -EBUSY; 5066 5067 rte_spinlock_lock(&hw->lock); 5068 hw->adapter_state = HNS3_NIC_STARTING; 5069 5070 /* 5071 * If the dev_set_link_down() API has been called, the "set_link_down" 5072 * flag can be cleared by dev_start() API. In addition, the flag should 5073 * also be cleared before calling hns3_do_start() so that MAC can be 5074 * enabled in dev_start stage. 5075 */ 5076 hw->set_link_down = false; 5077 ret = hns3_do_start(hns, true); 5078 if (ret) 5079 goto do_start_fail; 5080 5081 ret = hns3_map_rx_interrupt(dev); 5082 if (ret) 5083 goto map_rx_inter_err; 5084 5085 /* 5086 * There are three register used to control the status of a TQP 5087 * (contains a pair of Tx queue and Rx queue) in the new version network 5088 * engine. One is used to control the enabling of Tx queue, the other is 5089 * used to control the enabling of Rx queue, and the last is the master 5090 * switch used to control the enabling of the tqp. The Tx register and 5091 * TQP register must be enabled at the same time to enable a Tx queue. 5092 * The same applies to the Rx queue. For the older network engine, this 5093 * function only refresh the enabled flag, and it is used to update the 5094 * status of queue in the dpdk framework. 5095 */ 5096 ret = hns3_start_all_txqs(dev); 5097 if (ret) 5098 goto map_rx_inter_err; 5099 5100 ret = hns3_start_all_rxqs(dev); 5101 if (ret) 5102 goto start_all_rxqs_fail; 5103 5104 hw->adapter_state = HNS3_NIC_STARTED; 5105 rte_spinlock_unlock(&hw->lock); 5106 5107 hns3_rx_scattered_calc(dev); 5108 hns3_start_rxtx_datapath(dev); 5109 5110 /* Enable interrupt of all rx queues before enabling queues */ 5111 hns3_dev_all_rx_queue_intr_enable(hw, true); 5112 5113 /* 5114 * After finished the initialization, enable tqps to receive/transmit 5115 * packets and refresh all queue status. 5116 */ 5117 hns3_start_tqps(hw); 5118 5119 hns3_tm_dev_start_proc(hw); 5120 5121 if (dev->data->dev_conf.intr_conf.lsc != 0) 5122 hns3_dev_link_update(dev, 0); 5123 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5124 5125 hns3_info(hw, "hns3 dev start successful!"); 5126 5127 return 0; 5128 5129 start_all_rxqs_fail: 5130 hns3_stop_all_txqs(dev); 5131 map_rx_inter_err: 5132 (void)hns3_do_stop(hns); 5133 do_start_fail: 5134 hw->set_link_down = old_state; 5135 hw->adapter_state = HNS3_NIC_CONFIGURED; 5136 rte_spinlock_unlock(&hw->lock); 5137 5138 return ret; 5139 } 5140 5141 static int 5142 hns3_do_stop(struct hns3_adapter *hns) 5143 { 5144 struct hns3_hw *hw = &hns->hw; 5145 int ret; 5146 5147 /* 5148 * The "hns3_do_stop" function will also be called by .stop_service to 5149 * prepare reset. At the time of global or IMP reset, the command cannot 5150 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5151 * accessed during the reset process. So the mbuf can not be released 5152 * during reset and is required to be released after the reset is 5153 * completed. 5154 */ 5155 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) 5156 hns3_dev_release_mbufs(hns); 5157 5158 ret = hns3_cfg_mac_mode(hw, false); 5159 if (ret) 5160 return ret; 5161 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5162 5163 if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) { 5164 hns3_configure_all_mac_addr(hns, true); 5165 ret = hns3_reset_all_tqps(hns); 5166 if (ret) { 5167 hns3_err(hw, "failed to reset all queues ret = %d.", 5168 ret); 5169 return ret; 5170 } 5171 } 5172 5173 return 0; 5174 } 5175 5176 static int 5177 hns3_dev_stop(struct rte_eth_dev *dev) 5178 { 5179 struct hns3_adapter *hns = dev->data->dev_private; 5180 struct hns3_hw *hw = &hns->hw; 5181 5182 PMD_INIT_FUNC_TRACE(); 5183 dev->data->dev_started = 0; 5184 5185 hw->adapter_state = HNS3_NIC_STOPPING; 5186 hns3_stop_rxtx_datapath(dev); 5187 5188 rte_spinlock_lock(&hw->lock); 5189 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) { 5190 hns3_tm_dev_stop_proc(hw); 5191 hns3_config_mac_tnl_int(hw, false); 5192 hns3_stop_tqps(hw); 5193 hns3_do_stop(hns); 5194 hns3_unmap_rx_interrupt(dev); 5195 hw->adapter_state = HNS3_NIC_CONFIGURED; 5196 } 5197 hns3_rx_scattered_reset(dev); 5198 rte_eal_alarm_cancel(hns3_service_handler, dev); 5199 hns3_stop_report_lse(dev); 5200 rte_spinlock_unlock(&hw->lock); 5201 5202 return 0; 5203 } 5204 5205 static int 5206 hns3_dev_close(struct rte_eth_dev *eth_dev) 5207 { 5208 struct hns3_adapter *hns = eth_dev->data->dev_private; 5209 struct hns3_hw *hw = &hns->hw; 5210 int ret = 0; 5211 5212 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5213 hns3_mp_uninit(eth_dev); 5214 return 0; 5215 } 5216 5217 if (hw->adapter_state == HNS3_NIC_STARTED) 5218 ret = hns3_dev_stop(eth_dev); 5219 5220 hw->adapter_state = HNS3_NIC_CLOSING; 5221 hns3_reset_abort(hns); 5222 hw->adapter_state = HNS3_NIC_CLOSED; 5223 5224 hns3_configure_all_mc_mac_addr(hns, true); 5225 hns3_remove_all_vlan_table(hns); 5226 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5227 hns3_uninit_pf(eth_dev); 5228 hns3_free_all_queues(eth_dev); 5229 rte_free(hw->reset.wait_data); 5230 hns3_mp_uninit(eth_dev); 5231 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5232 5233 return ret; 5234 } 5235 5236 static void 5237 hns3_get_autoneg_rxtx_pause(struct hns3_hw *hw, bool *rx_pause, bool *tx_pause) 5238 { 5239 struct hns3_mac *mac = &hw->mac; 5240 uint32_t advertising = mac->advertising; 5241 uint32_t lp_advertising = mac->lp_advertising; 5242 *rx_pause = false; 5243 *tx_pause = false; 5244 5245 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5246 *rx_pause = true; 5247 *tx_pause = true; 5248 } else if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5249 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5250 *rx_pause = true; 5251 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5252 *tx_pause = true; 5253 } 5254 } 5255 5256 static enum hns3_fc_mode 5257 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5258 { 5259 enum hns3_fc_mode current_mode; 5260 bool rx_pause = false; 5261 bool tx_pause = false; 5262 5263 hns3_get_autoneg_rxtx_pause(hw, &rx_pause, &tx_pause); 5264 5265 if (rx_pause && tx_pause) 5266 current_mode = HNS3_FC_FULL; 5267 else if (rx_pause) 5268 current_mode = HNS3_FC_RX_PAUSE; 5269 else if (tx_pause) 5270 current_mode = HNS3_FC_TX_PAUSE; 5271 else 5272 current_mode = HNS3_FC_NONE; 5273 5274 return current_mode; 5275 } 5276 5277 static enum hns3_fc_mode 5278 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 5279 { 5280 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5281 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5282 struct hns3_mac *mac = &hw->mac; 5283 5284 /* 5285 * When the flow control mode is obtained, the device may not complete 5286 * auto-negotiation. It is necessary to wait for link establishment. 5287 */ 5288 (void)hns3_dev_link_update(dev, 1); 5289 5290 /* 5291 * If the link auto-negotiation of the nic is disabled, or the flow 5292 * control auto-negotiation is not supported, the forced flow control 5293 * mode is used. 5294 */ 5295 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 5296 return hw->requested_fc_mode; 5297 5298 return hns3_get_autoneg_fc_mode(hw); 5299 } 5300 5301 int 5302 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5303 { 5304 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5305 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5306 enum hns3_fc_mode current_mode; 5307 5308 current_mode = hns3_get_current_fc_mode(dev); 5309 switch (current_mode) { 5310 case HNS3_FC_FULL: 5311 fc_conf->mode = RTE_ETH_FC_FULL; 5312 break; 5313 case HNS3_FC_TX_PAUSE: 5314 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 5315 break; 5316 case HNS3_FC_RX_PAUSE: 5317 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 5318 break; 5319 case HNS3_FC_NONE: 5320 default: 5321 fc_conf->mode = RTE_ETH_FC_NONE; 5322 break; 5323 } 5324 5325 fc_conf->pause_time = pf->pause_time; 5326 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 5327 5328 return 0; 5329 } 5330 5331 static int 5332 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 5333 { 5334 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5335 5336 if (!pf->support_fc_autoneg) { 5337 if (autoneg != 0) { 5338 hns3_err(hw, "unsupported fc auto-negotiation."); 5339 return -EOPNOTSUPP; 5340 } 5341 5342 return 0; 5343 } 5344 5345 /* 5346 * If flow control auto-negotiation of the NIC is supported, all 5347 * auto-negotiation features are supported. 5348 */ 5349 if (autoneg != hw->mac.link_autoneg) { 5350 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 5351 return -EOPNOTSUPP; 5352 } 5353 5354 return 0; 5355 } 5356 5357 static int 5358 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5359 { 5360 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5361 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5362 int ret; 5363 5364 if (fc_conf->high_water || fc_conf->low_water || 5365 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 5366 hns3_err(hw, "Unsupported flow control settings specified, " 5367 "high_water(%u), low_water(%u), send_xon(%u) and " 5368 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5369 fc_conf->high_water, fc_conf->low_water, 5370 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 5371 return -EINVAL; 5372 } 5373 5374 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 5375 if (ret) 5376 return ret; 5377 5378 if (!fc_conf->pause_time) { 5379 hns3_err(hw, "Invalid pause time %u setting.", 5380 fc_conf->pause_time); 5381 return -EINVAL; 5382 } 5383 5384 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5385 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 5386 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 5387 "current_fc_status = %d", hw->current_fc_status); 5388 return -EOPNOTSUPP; 5389 } 5390 5391 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 5392 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 5393 return -EOPNOTSUPP; 5394 } 5395 5396 rte_spinlock_lock(&hw->lock); 5397 ret = hns3_fc_enable(dev, fc_conf); 5398 rte_spinlock_unlock(&hw->lock); 5399 5400 return ret; 5401 } 5402 5403 static int 5404 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 5405 struct rte_eth_pfc_conf *pfc_conf) 5406 { 5407 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5408 int ret; 5409 5410 if (!hns3_dev_get_support(hw, DCB)) { 5411 hns3_err(hw, "This port does not support dcb configurations."); 5412 return -EOPNOTSUPP; 5413 } 5414 5415 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 5416 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 5417 hns3_err(hw, "Unsupported flow control settings specified, " 5418 "high_water(%u), low_water(%u), send_xon(%u) and " 5419 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5420 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 5421 pfc_conf->fc.send_xon, 5422 pfc_conf->fc.mac_ctrl_frame_fwd); 5423 return -EINVAL; 5424 } 5425 if (pfc_conf->fc.autoneg) { 5426 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5427 return -EINVAL; 5428 } 5429 if (pfc_conf->fc.pause_time == 0) { 5430 hns3_err(hw, "Invalid pause time %u setting.", 5431 pfc_conf->fc.pause_time); 5432 return -EINVAL; 5433 } 5434 5435 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5436 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 5437 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 5438 "current_fc_status = %d", hw->current_fc_status); 5439 return -EOPNOTSUPP; 5440 } 5441 5442 rte_spinlock_lock(&hw->lock); 5443 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 5444 rte_spinlock_unlock(&hw->lock); 5445 5446 return ret; 5447 } 5448 5449 static int 5450 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 5451 { 5452 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5453 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5454 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 5455 int i; 5456 5457 rte_spinlock_lock(&hw->lock); 5458 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 5459 dcb_info->nb_tcs = pf->local_max_tc; 5460 else 5461 dcb_info->nb_tcs = 1; 5462 5463 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 5464 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 5465 for (i = 0; i < dcb_info->nb_tcs; i++) 5466 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 5467 5468 for (i = 0; i < hw->num_tc; i++) { 5469 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 5470 dcb_info->tc_queue.tc_txq[0][i].base = 5471 hw->tc_queue[i].tqp_offset; 5472 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 5473 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 5474 hw->tc_queue[i].tqp_count; 5475 } 5476 rte_spinlock_unlock(&hw->lock); 5477 5478 return 0; 5479 } 5480 5481 static int 5482 hns3_reinit_dev(struct hns3_adapter *hns) 5483 { 5484 struct hns3_hw *hw = &hns->hw; 5485 int ret; 5486 5487 ret = hns3_cmd_init(hw); 5488 if (ret) { 5489 hns3_err(hw, "Failed to init cmd: %d", ret); 5490 return ret; 5491 } 5492 5493 ret = hns3_init_hardware(hns); 5494 if (ret) { 5495 hns3_err(hw, "Failed to init hardware: %d", ret); 5496 return ret; 5497 } 5498 5499 ret = hns3_reset_all_tqps(hns); 5500 if (ret) { 5501 hns3_err(hw, "Failed to reset all queues: %d", ret); 5502 return ret; 5503 } 5504 5505 ret = hns3_enable_hw_error_intr(hns, true); 5506 if (ret) { 5507 hns3_err(hw, "fail to enable hw error interrupts: %d", 5508 ret); 5509 return ret; 5510 } 5511 hns3_info(hw, "Reset done, driver initialization finished."); 5512 5513 return 0; 5514 } 5515 5516 static bool 5517 is_pf_reset_done(struct hns3_hw *hw) 5518 { 5519 uint32_t val, reg, reg_bit; 5520 5521 switch (hw->reset.level) { 5522 case HNS3_IMP_RESET: 5523 reg = HNS3_GLOBAL_RESET_REG; 5524 reg_bit = HNS3_IMP_RESET_BIT; 5525 break; 5526 case HNS3_GLOBAL_RESET: 5527 reg = HNS3_GLOBAL_RESET_REG; 5528 reg_bit = HNS3_GLOBAL_RESET_BIT; 5529 break; 5530 case HNS3_FUNC_RESET: 5531 reg = HNS3_FUN_RST_ING; 5532 reg_bit = HNS3_FUN_RST_ING_B; 5533 break; 5534 case HNS3_FLR_RESET: 5535 default: 5536 hns3_err(hw, "Wait for unsupported reset level: %d", 5537 hw->reset.level); 5538 return true; 5539 } 5540 val = hns3_read_dev(hw, reg); 5541 if (hns3_get_bit(val, reg_bit)) 5542 return false; 5543 else 5544 return true; 5545 } 5546 5547 static enum hns3_reset_level 5548 hns3_detect_reset_event(struct hns3_hw *hw) 5549 { 5550 enum hns3_reset_level new_req = HNS3_NONE_RESET; 5551 uint32_t vector0_intr_state; 5552 5553 vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 5554 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) 5555 new_req = HNS3_IMP_RESET; 5556 else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) 5557 new_req = HNS3_GLOBAL_RESET; 5558 5559 return new_req; 5560 } 5561 5562 bool 5563 hns3_is_reset_pending(struct hns3_adapter *hns) 5564 { 5565 enum hns3_reset_level new_req; 5566 struct hns3_hw *hw = &hns->hw; 5567 enum hns3_reset_level last_req; 5568 5569 /* 5570 * Only primary can process can process the reset event, 5571 * so don't check reset event in secondary. 5572 */ 5573 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5574 return false; 5575 5576 new_req = hns3_detect_reset_event(hw); 5577 if (new_req == HNS3_NONE_RESET) 5578 return false; 5579 5580 last_req = hns3_get_reset_level(hns, &hw->reset.pending); 5581 if (last_req == HNS3_NONE_RESET || last_req < new_req) { 5582 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 5583 hns3_schedule_delayed_reset(hns); 5584 hns3_warn(hw, "High level reset detected, delay do reset"); 5585 return true; 5586 } 5587 last_req = hns3_get_reset_level(hns, &hw->reset.request); 5588 if (last_req != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 5589 hw->reset.level < last_req) { 5590 hns3_warn(hw, "High level reset %d is request", last_req); 5591 return true; 5592 } 5593 return false; 5594 } 5595 5596 static int 5597 hns3_wait_hardware_ready(struct hns3_adapter *hns) 5598 { 5599 struct hns3_hw *hw = &hns->hw; 5600 struct hns3_wait_data *wait_data = hw->reset.wait_data; 5601 struct timeval tv; 5602 5603 if (wait_data->result == HNS3_WAIT_SUCCESS) 5604 return 0; 5605 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 5606 hns3_clock_gettime(&tv); 5607 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 5608 tv.tv_sec, tv.tv_usec); 5609 return -ETIME; 5610 } else if (wait_data->result == HNS3_WAIT_REQUEST) 5611 return -EAGAIN; 5612 5613 wait_data->hns = hns; 5614 wait_data->check_completion = is_pf_reset_done; 5615 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 5616 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 5617 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 5618 wait_data->count = HNS3_RESET_WAIT_CNT; 5619 wait_data->result = HNS3_WAIT_REQUEST; 5620 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 5621 return -EAGAIN; 5622 } 5623 5624 static int 5625 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 5626 { 5627 struct hns3_cmd_desc desc; 5628 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 5629 5630 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 5631 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 5632 req->fun_reset_vfid = func_id; 5633 5634 return hns3_cmd_send(hw, &desc, 1); 5635 } 5636 5637 static void 5638 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 5639 { 5640 struct hns3_hw *hw = &hns->hw; 5641 struct timeval tv; 5642 uint32_t val; 5643 5644 hns3_clock_gettime(&tv); 5645 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 5646 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 5647 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 5648 tv.tv_sec, tv.tv_usec); 5649 return; 5650 } 5651 5652 switch (reset_level) { 5653 case HNS3_IMP_RESET: 5654 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5655 hns3_set_bit(val, HNS3_VECTOR0_TRIGGER_IMP_RESET_B, 1); 5656 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5657 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 5658 tv.tv_sec, tv.tv_usec); 5659 break; 5660 case HNS3_GLOBAL_RESET: 5661 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 5662 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 5663 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 5664 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 5665 tv.tv_sec, tv.tv_usec); 5666 break; 5667 case HNS3_FUNC_RESET: 5668 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 5669 tv.tv_sec, tv.tv_usec); 5670 /* schedule again to check later */ 5671 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 5672 hns3_schedule_reset(hns); 5673 break; 5674 default: 5675 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 5676 return; 5677 } 5678 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 5679 } 5680 5681 static enum hns3_reset_level 5682 hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels) 5683 { 5684 struct hns3_hw *hw = &hns->hw; 5685 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 5686 5687 /* Return the highest priority reset level amongst all */ 5688 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 5689 reset_level = HNS3_IMP_RESET; 5690 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 5691 reset_level = HNS3_GLOBAL_RESET; 5692 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 5693 reset_level = HNS3_FUNC_RESET; 5694 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 5695 reset_level = HNS3_FLR_RESET; 5696 5697 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 5698 return HNS3_NONE_RESET; 5699 5700 return reset_level; 5701 } 5702 5703 static void 5704 hns3_record_imp_error(struct hns3_adapter *hns) 5705 { 5706 struct hns3_hw *hw = &hns->hw; 5707 uint32_t reg_val; 5708 5709 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5710 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 5711 hns3_warn(hw, "Detected IMP RD poison!"); 5712 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 5713 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5714 } 5715 5716 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 5717 hns3_warn(hw, "Detected IMP CMDQ error!"); 5718 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 5719 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5720 } 5721 } 5722 5723 static int 5724 hns3_prepare_reset(struct hns3_adapter *hns) 5725 { 5726 struct hns3_hw *hw = &hns->hw; 5727 uint32_t reg_val; 5728 int ret; 5729 5730 switch (hw->reset.level) { 5731 case HNS3_FUNC_RESET: 5732 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 5733 if (ret) 5734 return ret; 5735 5736 /* 5737 * After performaning pf reset, it is not necessary to do the 5738 * mailbox handling or send any command to firmware, because 5739 * any mailbox handling or command to firmware is only valid 5740 * after hns3_cmd_init is called. 5741 */ 5742 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 5743 hw->reset.stats.request_cnt++; 5744 break; 5745 case HNS3_IMP_RESET: 5746 hns3_record_imp_error(hns); 5747 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5748 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 5749 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 5750 break; 5751 default: 5752 break; 5753 } 5754 return 0; 5755 } 5756 5757 static int 5758 hns3_set_rst_done(struct hns3_hw *hw) 5759 { 5760 struct hns3_pf_rst_done_cmd *req; 5761 struct hns3_cmd_desc desc; 5762 5763 req = (struct hns3_pf_rst_done_cmd *)desc.data; 5764 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 5765 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 5766 return hns3_cmd_send(hw, &desc, 1); 5767 } 5768 5769 static int 5770 hns3_stop_service(struct hns3_adapter *hns) 5771 { 5772 struct hns3_hw *hw = &hns->hw; 5773 struct rte_eth_dev *eth_dev; 5774 5775 eth_dev = &rte_eth_devices[hw->data->port_id]; 5776 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5777 if (hw->adapter_state == HNS3_NIC_STARTED) { 5778 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 5779 hns3_update_linkstatus_and_event(hw, false); 5780 } 5781 hns3_stop_rxtx_datapath(eth_dev); 5782 5783 rte_spinlock_lock(&hw->lock); 5784 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 5785 hw->adapter_state == HNS3_NIC_STOPPING) { 5786 hns3_enable_all_queues(hw, false); 5787 hns3_do_stop(hns); 5788 hw->reset.mbuf_deferred_free = true; 5789 } else 5790 hw->reset.mbuf_deferred_free = false; 5791 5792 /* 5793 * It is cumbersome for hardware to pick-and-choose entries for deletion 5794 * from table space. Hence, for function reset software intervention is 5795 * required to delete the entries 5796 */ 5797 if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) 5798 hns3_configure_all_mc_mac_addr(hns, true); 5799 rte_spinlock_unlock(&hw->lock); 5800 5801 return 0; 5802 } 5803 5804 static int 5805 hns3_start_service(struct hns3_adapter *hns) 5806 { 5807 struct hns3_hw *hw = &hns->hw; 5808 struct rte_eth_dev *eth_dev; 5809 5810 if (hw->reset.level == HNS3_IMP_RESET || 5811 hw->reset.level == HNS3_GLOBAL_RESET) 5812 hns3_set_rst_done(hw); 5813 eth_dev = &rte_eth_devices[hw->data->port_id]; 5814 hns3_start_rxtx_datapath(eth_dev); 5815 if (hw->adapter_state == HNS3_NIC_STARTED) { 5816 /* 5817 * This API parent function already hold the hns3_hw.lock, the 5818 * hns3_service_handler may report lse, in bonding application 5819 * it will call driver's ops which may acquire the hns3_hw.lock 5820 * again, thus lead to deadlock. 5821 * We defer calls hns3_service_handler to avoid the deadlock. 5822 */ 5823 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 5824 hns3_service_handler, eth_dev); 5825 5826 /* Enable interrupt of all rx queues before enabling queues */ 5827 hns3_dev_all_rx_queue_intr_enable(hw, true); 5828 /* 5829 * Enable state of each rxq and txq will be recovered after 5830 * reset, so we need to restore them before enable all tqps; 5831 */ 5832 hns3_restore_tqp_enable_state(hw); 5833 /* 5834 * When finished the initialization, enable queues to receive 5835 * and transmit packets. 5836 */ 5837 hns3_enable_all_queues(hw, true); 5838 } 5839 5840 return 0; 5841 } 5842 5843 static int 5844 hns3_restore_conf(struct hns3_adapter *hns) 5845 { 5846 struct hns3_hw *hw = &hns->hw; 5847 int ret; 5848 5849 ret = hns3_configure_all_mac_addr(hns, false); 5850 if (ret) 5851 return ret; 5852 5853 ret = hns3_configure_all_mc_mac_addr(hns, false); 5854 if (ret) 5855 goto err_mc_mac; 5856 5857 ret = hns3_dev_promisc_restore(hns); 5858 if (ret) 5859 goto err_promisc; 5860 5861 ret = hns3_restore_vlan_table(hns); 5862 if (ret) 5863 goto err_promisc; 5864 5865 ret = hns3_restore_vlan_conf(hns); 5866 if (ret) 5867 goto err_promisc; 5868 5869 ret = hns3_restore_ptp(hns); 5870 if (ret) 5871 goto err_promisc; 5872 5873 ret = hns3_restore_rx_interrupt(hw); 5874 if (ret) 5875 goto err_promisc; 5876 5877 ret = hns3_restore_gro_conf(hw); 5878 if (ret) 5879 goto err_promisc; 5880 5881 ret = hns3_restore_fec(hw); 5882 if (ret) 5883 goto err_promisc; 5884 5885 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 5886 ret = hns3_do_start(hns, false); 5887 if (ret) 5888 goto err_promisc; 5889 hns3_info(hw, "hns3 dev restart successful!"); 5890 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 5891 hw->adapter_state = HNS3_NIC_CONFIGURED; 5892 return 0; 5893 5894 err_promisc: 5895 hns3_configure_all_mc_mac_addr(hns, true); 5896 err_mc_mac: 5897 hns3_configure_all_mac_addr(hns, true); 5898 return ret; 5899 } 5900 5901 static void 5902 hns3_reset_service(void *param) 5903 { 5904 struct hns3_adapter *hns = (struct hns3_adapter *)param; 5905 struct hns3_hw *hw = &hns->hw; 5906 enum hns3_reset_level reset_level; 5907 struct timeval tv_delta; 5908 struct timeval tv_start; 5909 struct timeval tv; 5910 uint64_t msec; 5911 int ret; 5912 5913 /* 5914 * The interrupt is not triggered within the delay time. 5915 * The interrupt may have been lost. It is necessary to handle 5916 * the interrupt to recover from the error. 5917 */ 5918 if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == 5919 SCHEDULE_DEFERRED) { 5920 rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, 5921 rte_memory_order_relaxed); 5922 hns3_err(hw, "Handling interrupts in delayed tasks"); 5923 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 5924 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5925 if (reset_level == HNS3_NONE_RESET) { 5926 hns3_err(hw, "No reset level is set, try IMP reset"); 5927 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 5928 } 5929 } 5930 rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed); 5931 5932 /* 5933 * Check if there is any ongoing reset in the hardware. This status can 5934 * be checked from reset_pending. If there is then, we need to wait for 5935 * hardware to complete reset. 5936 * a. If we are able to figure out in reasonable time that hardware 5937 * has fully resetted then, we can proceed with driver, client 5938 * reset. 5939 * b. else, we can come back later to check this status so re-sched 5940 * now. 5941 */ 5942 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5943 if (reset_level != HNS3_NONE_RESET) { 5944 hns3_clock_gettime(&tv_start); 5945 ret = hns3_reset_process(hns, reset_level); 5946 hns3_clock_gettime(&tv); 5947 timersub(&tv, &tv_start, &tv_delta); 5948 msec = hns3_clock_calctime_ms(&tv_delta); 5949 if (msec > HNS3_RESET_PROCESS_MS) 5950 hns3_err(hw, "%d handle long time delta %" PRIu64 " ms time=%ld.%.6ld", 5951 hw->reset.level, msec, 5952 tv.tv_sec, tv.tv_usec); 5953 if (ret == -EAGAIN) 5954 return; 5955 } 5956 5957 /* Check if we got any *new* reset requests to be honored */ 5958 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 5959 if (reset_level != HNS3_NONE_RESET) 5960 hns3_msix_process(hns, reset_level); 5961 } 5962 5963 static uint32_t 5964 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 5965 uint32_t speed_capa) 5966 { 5967 uint32_t speed_bit; 5968 uint32_t num = 0; 5969 uint32_t i; 5970 5971 for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { 5972 speed_bit = 5973 rte_eth_speed_bitflag(speed_fec_capa_tbl[i].speed, 5974 RTE_ETH_LINK_FULL_DUPLEX); 5975 if ((speed_capa & speed_bit) == 0) 5976 continue; 5977 5978 speed_fec_capa[num].speed = speed_fec_capa_tbl[i].speed; 5979 speed_fec_capa[num].capa = speed_fec_capa_tbl[i].capa; 5980 num++; 5981 } 5982 5983 return num; 5984 } 5985 5986 static int 5987 hns3_fec_get_capability(struct rte_eth_dev *dev, 5988 struct rte_eth_fec_capa *speed_fec_capa, 5989 unsigned int num) 5990 { 5991 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5992 unsigned int speed_num; 5993 uint32_t speed_capa; 5994 5995 speed_capa = hns3_get_speed_capa(hw); 5996 /* speed_num counts number of speed capabilities */ 5997 speed_num = rte_popcount32(speed_capa & HNS3_SPEEDS_SUPP_FEC); 5998 if (speed_num == 0) 5999 return -ENOTSUP; 6000 6001 if (speed_fec_capa == NULL) 6002 return speed_num; 6003 6004 if (num < speed_num) { 6005 hns3_err(hw, "not enough array size(%u) to store FEC capabilities, should not be less than %u", 6006 num, speed_num); 6007 return -EINVAL; 6008 } 6009 6010 return hns3_get_speed_fec_capa(speed_fec_capa, speed_capa); 6011 } 6012 6013 6014 static int 6015 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6016 { 6017 struct hns3_config_fec_cmd *req; 6018 struct hns3_cmd_desc desc; 6019 int ret; 6020 6021 /* 6022 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6023 * in device of link speed 6024 * below 10 Gbps. 6025 */ 6026 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) { 6027 *state = 0; 6028 return 0; 6029 } 6030 6031 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6032 req = (struct hns3_config_fec_cmd *)desc.data; 6033 ret = hns3_cmd_send(hw, &desc, 1); 6034 if (ret) { 6035 hns3_err(hw, "get current fec auto state failed, ret = %d", 6036 ret); 6037 return ret; 6038 } 6039 6040 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6041 return 0; 6042 } 6043 6044 static int 6045 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6046 { 6047 struct hns3_sfp_info_cmd *resp; 6048 uint32_t tmp_fec_capa; 6049 uint8_t auto_state = 0; 6050 struct hns3_cmd_desc desc; 6051 int ret; 6052 6053 /* 6054 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6055 * configured FEC mode is returned. 6056 * If link is up, current FEC mode is returned. 6057 */ 6058 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) { 6059 ret = get_current_fec_auto_state(hw, &auto_state); 6060 if (ret) 6061 return ret; 6062 6063 if (auto_state == 0x1) { 6064 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6065 return 0; 6066 } 6067 } 6068 6069 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6070 resp = (struct hns3_sfp_info_cmd *)desc.data; 6071 resp->query_type = HNS3_ACTIVE_QUERY; 6072 6073 ret = hns3_cmd_send(hw, &desc, 1); 6074 if (ret == -EOPNOTSUPP) { 6075 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6076 return ret; 6077 } else if (ret) { 6078 hns3_err(hw, "get FEC failed, ret = %d", ret); 6079 return ret; 6080 } 6081 6082 /* 6083 * FEC mode order defined in hns3 hardware is inconsistent with 6084 * that defined in the ethdev library. So the sequence needs 6085 * to be converted. 6086 */ 6087 switch (resp->active_fec) { 6088 case HNS3_MAC_FEC_OFF: 6089 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6090 break; 6091 case HNS3_MAC_FEC_BASER: 6092 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6093 break; 6094 case HNS3_MAC_FEC_RS: 6095 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6096 break; 6097 case HNS3_MAC_FEC_LLRS: 6098 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(LLRS); 6099 break; 6100 default: 6101 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6102 break; 6103 } 6104 6105 *fec_capa = tmp_fec_capa; 6106 return 0; 6107 } 6108 6109 static int 6110 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6111 { 6112 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6113 6114 return hns3_fec_get_internal(hw, fec_capa); 6115 } 6116 6117 static int 6118 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6119 { 6120 struct hns3_config_fec_cmd *req; 6121 struct hns3_cmd_desc desc; 6122 int ret; 6123 6124 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6125 6126 req = (struct hns3_config_fec_cmd *)desc.data; 6127 switch (mode) { 6128 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6129 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6130 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6131 break; 6132 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6133 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6134 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6135 break; 6136 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6137 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6138 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6139 break; 6140 case RTE_ETH_FEC_MODE_CAPA_MASK(LLRS): 6141 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6142 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_LLRS); 6143 break; 6144 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6145 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6146 break; 6147 default: 6148 return 0; 6149 } 6150 ret = hns3_cmd_send(hw, &desc, 1); 6151 if (ret) 6152 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6153 6154 return ret; 6155 } 6156 6157 static uint32_t 6158 hns3_parse_hw_fec_capa(uint8_t hw_fec_capa) 6159 { 6160 const struct { 6161 uint32_t hw_fec_capa; 6162 uint32_t fec_capa; 6163 } fec_capa_map[] = { 6164 { HNS3_FIBER_FEC_AUTO_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) }, 6165 { HNS3_FIBER_FEC_BASER_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 6166 { HNS3_FIBER_FEC_RS_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 6167 { HNS3_FIBER_FEC_LLRS_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(LLRS) }, 6168 { HNS3_FIBER_FEC_NOFEC_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) }, 6169 }; 6170 uint32_t capa = 0; 6171 uint32_t i; 6172 6173 for (i = 0; i < RTE_DIM(fec_capa_map); i++) { 6174 if ((hw_fec_capa & fec_capa_map[i].hw_fec_capa) != 0) 6175 capa |= fec_capa_map[i].fec_capa; 6176 } 6177 6178 return capa; 6179 } 6180 6181 static uint32_t 6182 hns3_get_current_speed_fec_cap(struct hns3_mac *mac) 6183 { 6184 uint32_t i; 6185 6186 if (mac->fec_capa != 0) 6187 return hns3_parse_hw_fec_capa(mac->fec_capa); 6188 6189 for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { 6190 if (mac->link_speed == speed_fec_capa_tbl[i].speed) 6191 return speed_fec_capa_tbl[i].capa; 6192 } 6193 6194 return 0; 6195 } 6196 6197 static int 6198 hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode) 6199 { 6200 struct hns3_adapter *hns = dev->data->dev_private; 6201 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6202 uint32_t cur_capa; 6203 6204 if (rte_popcount32(mode) != 1) { 6205 hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode); 6206 return -EINVAL; 6207 } 6208 6209 /* 6210 * Check whether the configured mode is within the FEC capability. 6211 * If not, the configured mode will not be supported. 6212 */ 6213 cur_capa = hns3_get_current_speed_fec_cap(&hw->mac); 6214 if ((cur_capa & mode) == 0) { 6215 hns3_err(hw, "unsupported FEC mode(0x%x)", mode); 6216 return -EINVAL; 6217 } 6218 6219 return 0; 6220 } 6221 6222 static int 6223 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6224 { 6225 struct hns3_adapter *hns = dev->data->dev_private; 6226 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6227 struct hns3_pf *pf = &hns->pf; 6228 int ret; 6229 6230 ret = hns3_fec_mode_valid(dev, mode); 6231 if (ret != 0) 6232 return ret; 6233 6234 rte_spinlock_lock(&hw->lock); 6235 ret = hns3_set_fec_hw(hw, mode); 6236 if (ret) { 6237 rte_spinlock_unlock(&hw->lock); 6238 return ret; 6239 } 6240 6241 pf->fec_mode = mode; 6242 rte_spinlock_unlock(&hw->lock); 6243 6244 return 0; 6245 } 6246 6247 static int 6248 hns3_restore_fec(struct hns3_hw *hw) 6249 { 6250 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6251 struct hns3_pf *pf = &hns->pf; 6252 uint32_t mode = pf->fec_mode; 6253 int ret; 6254 6255 ret = hns3_set_fec_hw(hw, mode); 6256 if (ret) 6257 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6258 mode, ret); 6259 6260 return ret; 6261 } 6262 6263 static int 6264 hns3_query_dev_fec_info(struct hns3_hw *hw) 6265 { 6266 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6267 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 6268 int ret; 6269 6270 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 6271 if (ret) 6272 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6273 6274 return ret; 6275 } 6276 6277 static bool 6278 hns3_optical_module_existed(struct hns3_hw *hw) 6279 { 6280 struct hns3_cmd_desc desc; 6281 bool existed; 6282 int ret; 6283 6284 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 6285 ret = hns3_cmd_send(hw, &desc, 1); 6286 if (ret) { 6287 hns3_err(hw, 6288 "fail to get optical module exist state, ret = %d.", 6289 ret); 6290 return false; 6291 } 6292 existed = !!desc.data[0]; 6293 6294 return existed; 6295 } 6296 6297 static int 6298 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 6299 uint32_t len, uint8_t *data) 6300 { 6301 #define HNS3_SFP_INFO_CMD_NUM 6 6302 #define HNS3_SFP_INFO_MAX_LEN \ 6303 (HNS3_SFP_INFO_BD0_LEN + \ 6304 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 6305 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 6306 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 6307 uint16_t read_len; 6308 uint16_t copy_len; 6309 int ret; 6310 int i; 6311 6312 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6313 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 6314 true); 6315 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 6316 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 6317 } 6318 6319 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 6320 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 6321 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 6322 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 6323 6324 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 6325 if (ret) { 6326 hns3_err(hw, "fail to get module EEPROM info, ret = %d.", 6327 ret); 6328 return ret; 6329 } 6330 6331 /* The data format in BD0 is different with the others. */ 6332 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 6333 memcpy(data, sfp_info_bd0->data, copy_len); 6334 read_len = copy_len; 6335 6336 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6337 if (read_len >= len) 6338 break; 6339 6340 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 6341 memcpy(data + read_len, desc[i].data, copy_len); 6342 read_len += copy_len; 6343 } 6344 6345 return (int)read_len; 6346 } 6347 6348 static int 6349 hns3_get_module_eeprom(struct rte_eth_dev *dev, 6350 struct rte_dev_eeprom_info *info) 6351 { 6352 struct hns3_adapter *hns = dev->data->dev_private; 6353 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6354 uint32_t offset = info->offset; 6355 uint32_t len = info->length; 6356 uint8_t *data = info->data; 6357 uint32_t read_len = 0; 6358 6359 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 6360 return -ENOTSUP; 6361 6362 if (!hns3_optical_module_existed(hw)) { 6363 hns3_err(hw, "fail to read module EEPROM: no module is connected."); 6364 return -EIO; 6365 } 6366 6367 while (read_len < len) { 6368 int ret; 6369 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 6370 len - read_len, 6371 data + read_len); 6372 if (ret < 0) 6373 return -EIO; 6374 read_len += ret; 6375 } 6376 6377 return 0; 6378 } 6379 6380 static int 6381 hns3_get_module_info(struct rte_eth_dev *dev, 6382 struct rte_eth_dev_module_info *modinfo) 6383 { 6384 #define HNS3_SFF8024_ID_SFP 0x03 6385 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 6386 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 6387 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 6388 #define HNS3_SFF_8636_V1_3 0x03 6389 struct hns3_adapter *hns = dev->data->dev_private; 6390 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6391 struct rte_dev_eeprom_info info; 6392 struct hns3_sfp_type sfp_type; 6393 int ret; 6394 6395 memset(&sfp_type, 0, sizeof(sfp_type)); 6396 memset(&info, 0, sizeof(info)); 6397 info.data = (uint8_t *)&sfp_type; 6398 info.length = sizeof(sfp_type); 6399 ret = hns3_get_module_eeprom(dev, &info); 6400 if (ret) 6401 return ret; 6402 6403 switch (sfp_type.type) { 6404 case HNS3_SFF8024_ID_SFP: 6405 modinfo->type = RTE_ETH_MODULE_SFF_8472; 6406 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 6407 break; 6408 case HNS3_SFF8024_ID_QSFP_8438: 6409 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6410 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6411 break; 6412 case HNS3_SFF8024_ID_QSFP_8436_8636: 6413 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 6414 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6415 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6416 } else { 6417 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6418 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6419 } 6420 break; 6421 case HNS3_SFF8024_ID_QSFP28_8636: 6422 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6423 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6424 break; 6425 default: 6426 hns3_err(hw, "unknown module, type = %u, extra_type = %u.", 6427 sfp_type.type, sfp_type.ext_type); 6428 return -EINVAL; 6429 } 6430 6431 return 0; 6432 } 6433 6434 static const struct eth_dev_ops hns3_eth_dev_ops = { 6435 .dev_configure = hns3_dev_configure, 6436 .dev_start = hns3_dev_start, 6437 .dev_stop = hns3_dev_stop, 6438 .dev_close = hns3_dev_close, 6439 .promiscuous_enable = hns3_dev_promiscuous_enable, 6440 .promiscuous_disable = hns3_dev_promiscuous_disable, 6441 .allmulticast_enable = hns3_dev_allmulticast_enable, 6442 .allmulticast_disable = hns3_dev_allmulticast_disable, 6443 .mtu_set = hns3_dev_mtu_set, 6444 .stats_get = hns3_stats_get, 6445 .stats_reset = hns3_stats_reset, 6446 .xstats_get = hns3_dev_xstats_get, 6447 .xstats_get_names = hns3_dev_xstats_get_names, 6448 .xstats_reset = hns3_dev_xstats_reset, 6449 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 6450 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 6451 .dev_infos_get = hns3_dev_infos_get, 6452 .fw_version_get = hns3_fw_version_get, 6453 .rx_queue_setup = hns3_rx_queue_setup, 6454 .tx_queue_setup = hns3_tx_queue_setup, 6455 .rx_queue_release = hns3_dev_rx_queue_release, 6456 .tx_queue_release = hns3_dev_tx_queue_release, 6457 .rx_queue_start = hns3_dev_rx_queue_start, 6458 .rx_queue_stop = hns3_dev_rx_queue_stop, 6459 .tx_queue_start = hns3_dev_tx_queue_start, 6460 .tx_queue_stop = hns3_dev_tx_queue_stop, 6461 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 6462 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 6463 .rxq_info_get = hns3_rxq_info_get, 6464 .txq_info_get = hns3_txq_info_get, 6465 .rx_burst_mode_get = hns3_rx_burst_mode_get, 6466 .tx_burst_mode_get = hns3_tx_burst_mode_get, 6467 .flow_ctrl_get = hns3_flow_ctrl_get, 6468 .flow_ctrl_set = hns3_flow_ctrl_set, 6469 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 6470 .mac_addr_add = hns3_add_mac_addr, 6471 .mac_addr_remove = hns3_remove_mac_addr, 6472 .mac_addr_set = hns3_set_default_mac_addr, 6473 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 6474 .link_update = hns3_dev_link_update, 6475 .dev_set_link_up = hns3_dev_set_link_up, 6476 .dev_set_link_down = hns3_dev_set_link_down, 6477 .rss_hash_update = hns3_dev_rss_hash_update, 6478 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 6479 .reta_update = hns3_dev_rss_reta_update, 6480 .reta_query = hns3_dev_rss_reta_query, 6481 .flow_ops_get = hns3_dev_flow_ops_get, 6482 .vlan_filter_set = hns3_vlan_filter_set, 6483 .vlan_tpid_set = hns3_vlan_tpid_set, 6484 .vlan_offload_set = hns3_vlan_offload_set, 6485 .vlan_pvid_set = hns3_vlan_pvid_set, 6486 .get_reg = hns3_get_regs, 6487 .get_module_info = hns3_get_module_info, 6488 .get_module_eeprom = hns3_get_module_eeprom, 6489 .get_dcb_info = hns3_get_dcb_info, 6490 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 6491 .fec_get_capability = hns3_fec_get_capability, 6492 .fec_get = hns3_fec_get, 6493 .fec_set = hns3_fec_set, 6494 .tm_ops_get = hns3_tm_ops_get, 6495 .tx_done_cleanup = hns3_tx_done_cleanup, 6496 .timesync_enable = hns3_timesync_enable, 6497 .timesync_disable = hns3_timesync_disable, 6498 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 6499 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 6500 .timesync_adjust_time = hns3_timesync_adjust_time, 6501 .timesync_read_time = hns3_timesync_read_time, 6502 .timesync_write_time = hns3_timesync_write_time, 6503 .eth_dev_priv_dump = hns3_eth_dev_priv_dump, 6504 .eth_rx_descriptor_dump = hns3_rx_descriptor_dump, 6505 .eth_tx_descriptor_dump = hns3_tx_descriptor_dump, 6506 .get_monitor_addr = hns3_get_monitor_addr, 6507 }; 6508 6509 static const struct hns3_reset_ops hns3_reset_ops = { 6510 .reset_service = hns3_reset_service, 6511 .stop_service = hns3_stop_service, 6512 .prepare_reset = hns3_prepare_reset, 6513 .wait_hardware_ready = hns3_wait_hardware_ready, 6514 .reinit_dev = hns3_reinit_dev, 6515 .restore_conf = hns3_restore_conf, 6516 .start_service = hns3_start_service, 6517 }; 6518 6519 static void 6520 hns3_init_hw_ops(struct hns3_hw *hw) 6521 { 6522 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr; 6523 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; 6524 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; 6525 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; 6526 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector; 6527 } 6528 6529 static int 6530 hns3_dev_init(struct rte_eth_dev *eth_dev) 6531 { 6532 struct hns3_adapter *hns = eth_dev->data->dev_private; 6533 struct hns3_hw *hw = &hns->hw; 6534 int ret; 6535 6536 PMD_INIT_FUNC_TRACE(); 6537 6538 hns3_flow_init(eth_dev); 6539 6540 hns3_set_rxtx_function(eth_dev); 6541 eth_dev->dev_ops = &hns3_eth_dev_ops; 6542 eth_dev->rx_queue_count = hns3_rx_queue_count; 6543 ret = hns3_mp_init(eth_dev); 6544 if (ret) 6545 goto err_mp_init; 6546 6547 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6548 hns3_tx_push_init(eth_dev); 6549 return 0; 6550 } 6551 6552 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 6553 hns->is_vf = false; 6554 hw->data = eth_dev->data; 6555 hns3_parse_devargs(eth_dev); 6556 6557 /* 6558 * Set default max packet size according to the mtu 6559 * default vale in DPDK frame. 6560 */ 6561 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 6562 6563 ret = hns3_reset_init(hw); 6564 if (ret) 6565 goto err_init_reset; 6566 hw->reset.ops = &hns3_reset_ops; 6567 6568 hns3_init_hw_ops(hw); 6569 ret = hns3_init_pf(eth_dev); 6570 if (ret) { 6571 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 6572 goto err_init_pf; 6573 } 6574 6575 ret = hns3_init_mac_addrs(eth_dev); 6576 if (ret != 0) 6577 goto err_init_mac_addrs; 6578 6579 hw->adapter_state = HNS3_NIC_INITIALIZED; 6580 6581 if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == 6582 SCHEDULE_PENDING) { 6583 hns3_err(hw, "Reschedule reset service after dev_init"); 6584 hns3_schedule_reset(hns); 6585 } else { 6586 /* IMP will wait ready flag before reset */ 6587 hns3_notify_reset_ready(hw, false); 6588 } 6589 6590 hns3_info(hw, "hns3 dev initialization successful!"); 6591 return 0; 6592 6593 err_init_mac_addrs: 6594 hns3_uninit_pf(eth_dev); 6595 6596 err_init_pf: 6597 rte_free(hw->reset.wait_data); 6598 6599 err_init_reset: 6600 hns3_mp_uninit(eth_dev); 6601 6602 err_mp_init: 6603 eth_dev->dev_ops = NULL; 6604 eth_dev->rx_pkt_burst = NULL; 6605 eth_dev->rx_descriptor_status = NULL; 6606 eth_dev->tx_pkt_burst = NULL; 6607 eth_dev->tx_pkt_prepare = NULL; 6608 eth_dev->tx_descriptor_status = NULL; 6609 return ret; 6610 } 6611 6612 static int 6613 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 6614 { 6615 struct hns3_adapter *hns = eth_dev->data->dev_private; 6616 struct hns3_hw *hw = &hns->hw; 6617 6618 PMD_INIT_FUNC_TRACE(); 6619 6620 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6621 hns3_mp_uninit(eth_dev); 6622 return 0; 6623 } 6624 6625 if (hw->adapter_state < HNS3_NIC_CLOSING) 6626 hns3_dev_close(eth_dev); 6627 6628 hw->adapter_state = HNS3_NIC_REMOVED; 6629 return 0; 6630 } 6631 6632 static int 6633 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6634 struct rte_pci_device *pci_dev) 6635 { 6636 return rte_eth_dev_pci_generic_probe(pci_dev, 6637 sizeof(struct hns3_adapter), 6638 hns3_dev_init); 6639 } 6640 6641 static int 6642 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 6643 { 6644 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 6645 } 6646 6647 static const struct rte_pci_id pci_id_hns3_map[] = { 6648 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 6649 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 6650 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 6651 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 6652 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 6653 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 6654 { .vendor_id = 0, }, /* sentinel */ 6655 }; 6656 6657 static struct rte_pci_driver rte_hns3_pmd = { 6658 .id_table = pci_id_hns3_map, 6659 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 6660 .probe = eth_hns3_pci_probe, 6661 .remove = eth_hns3_pci_remove, 6662 }; 6663 6664 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 6665 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 6666 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 6667 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 6668 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 6669 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 6670 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " 6671 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> " 6672 HNS3_DEVARG_FDIR_VLAN_MATCH_MODE "=strict|nostrict " 6673 HNS3_DEVARG_FDIR_TUPLE_CONFIG "=+outvlan-insmac|+outvlan-indmac|" 6674 "+outvlan-insip|+outvlan-indip" 6675 "+outvlan-sctptag|+outvlan-tunvni " 6676 HNS3_DEVARG_FDIR_INDEX_CONFIG "=hash|priority "); 6677 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 6678 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 6679 #ifdef RTE_ETHDEV_DEBUG_RX 6680 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_rx, rx, DEBUG); 6681 #endif 6682 #ifdef RTE_ETHDEV_DEBUG_TX 6683 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_tx, tx, DEBUG); 6684 #endif 6685