1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 9 #include "hns3_ethdev.h" 10 #include "hns3_common.h" 11 #include "hns3_logs.h" 12 #include "hns3_rxtx.h" 13 #include "hns3_intr.h" 14 #include "hns3_regs.h" 15 #include "hns3_dcb.h" 16 #include "hns3_mp.h" 17 #include "hns3_flow.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_mac_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 107 108 109 static void 110 hns3_pf_disable_irq0(struct hns3_hw *hw) 111 { 112 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 113 } 114 115 static void 116 hns3_pf_enable_irq0(struct hns3_hw *hw) 117 { 118 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 119 } 120 121 static enum hns3_evt_cause 122 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 123 uint32_t *vec_val) 124 { 125 struct hns3_hw *hw = &hns->hw; 126 127 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 128 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 129 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 130 if (!is_delay) { 131 hw->reset.stats.imp_cnt++; 132 hns3_warn(hw, "IMP reset detected, clear reset status"); 133 } else { 134 hns3_schedule_delayed_reset(hns); 135 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 136 } 137 138 return HNS3_VECTOR0_EVENT_RST; 139 } 140 141 static enum hns3_evt_cause 142 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 143 uint32_t *vec_val) 144 { 145 struct hns3_hw *hw = &hns->hw; 146 147 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 148 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 149 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 150 if (!is_delay) { 151 hw->reset.stats.global_cnt++; 152 hns3_warn(hw, "Global reset detected, clear reset status"); 153 } else { 154 hns3_schedule_delayed_reset(hns); 155 hns3_warn(hw, 156 "Global reset detected, don't clear reset status"); 157 } 158 159 return HNS3_VECTOR0_EVENT_RST; 160 } 161 162 static enum hns3_evt_cause 163 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 164 { 165 struct hns3_hw *hw = &hns->hw; 166 uint32_t vector0_int_stats; 167 uint32_t cmdq_src_val; 168 uint32_t hw_err_src_reg; 169 uint32_t val; 170 enum hns3_evt_cause ret; 171 bool is_delay; 172 173 /* fetch the events from their corresponding regs */ 174 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 175 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 176 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 177 178 is_delay = clearval == NULL ? true : false; 179 /* 180 * Assumption: If by any chance reset and mailbox events are reported 181 * together then we will only process reset event and defer the 182 * processing of the mailbox events. Since, we would have not cleared 183 * RX CMDQ event this time we would receive again another interrupt 184 * from H/W just for the mailbox. 185 */ 186 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 187 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 188 goto out; 189 } 190 191 /* Global reset */ 192 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 193 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 194 goto out; 195 } 196 197 /* Check for vector0 1588 event source */ 198 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 199 val = BIT(HNS3_VECTOR0_1588_INT_B); 200 ret = HNS3_VECTOR0_EVENT_PTP; 201 goto out; 202 } 203 204 /* check for vector0 msix event source */ 205 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 206 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 207 val = vector0_int_stats | hw_err_src_reg; 208 ret = HNS3_VECTOR0_EVENT_ERR; 209 goto out; 210 } 211 212 /* check for vector0 mailbox(=CMDQ RX) event source */ 213 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 214 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 215 val = cmdq_src_val; 216 ret = HNS3_VECTOR0_EVENT_MBX; 217 goto out; 218 } 219 220 val = vector0_int_stats; 221 ret = HNS3_VECTOR0_EVENT_OTHER; 222 out: 223 224 if (clearval) 225 *clearval = val; 226 return ret; 227 } 228 229 static void 230 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 231 { 232 if (event_type == HNS3_VECTOR0_EVENT_RST || 233 event_type == HNS3_VECTOR0_EVENT_PTP) 234 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 235 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 236 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 237 } 238 239 static void 240 hns3_clear_all_event_cause(struct hns3_hw *hw) 241 { 242 uint32_t vector0_int_stats; 243 244 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 245 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 246 hns3_warn(hw, "Probe during IMP reset interrupt"); 247 248 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 249 hns3_warn(hw, "Probe during Global reset interrupt"); 250 251 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 252 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 253 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 254 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 255 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 256 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 257 BIT(HNS3_VECTOR0_1588_INT_B)); 258 } 259 260 static void 261 hns3_handle_mac_tnl(struct hns3_hw *hw) 262 { 263 struct hns3_cmd_desc desc; 264 uint32_t status; 265 int ret; 266 267 /* query and clear mac tnl interrupt */ 268 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 269 ret = hns3_cmd_send(hw, &desc, 1); 270 if (ret) { 271 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 272 return; 273 } 274 275 status = rte_le_to_cpu_32(desc.data[0]); 276 if (status) { 277 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 278 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 279 false); 280 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 281 ret = hns3_cmd_send(hw, &desc, 1); 282 if (ret) 283 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 284 ret); 285 } 286 } 287 288 static void 289 hns3_interrupt_handler(void *param) 290 { 291 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 292 struct hns3_adapter *hns = dev->data->dev_private; 293 struct hns3_hw *hw = &hns->hw; 294 enum hns3_evt_cause event_cause; 295 uint32_t clearval = 0; 296 uint32_t vector0_int; 297 uint32_t ras_int; 298 uint32_t cmdq_int; 299 300 /* Disable interrupt */ 301 hns3_pf_disable_irq0(hw); 302 303 event_cause = hns3_check_event_cause(hns, &clearval); 304 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 305 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 306 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 307 hns3_clear_event_cause(hw, event_cause, clearval); 308 /* vector 0 interrupt is shared with reset and mailbox source events. */ 309 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 310 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 311 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 312 vector0_int, ras_int, cmdq_int); 313 hns3_handle_mac_tnl(hw); 314 hns3_handle_error(hns); 315 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 316 hns3_warn(hw, "received reset interrupt"); 317 hns3_schedule_reset(hns); 318 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 319 hns3_dev_handle_mbx_msg(hw); 320 } else { 321 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 322 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 323 vector0_int, ras_int, cmdq_int); 324 } 325 326 /* Enable interrupt if it is not cause by reset */ 327 hns3_pf_enable_irq0(hw); 328 } 329 330 static int 331 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 332 { 333 #define HNS3_VLAN_ID_OFFSET_STEP 160 334 #define HNS3_VLAN_BYTE_SIZE 8 335 struct hns3_vlan_filter_pf_cfg_cmd *req; 336 struct hns3_hw *hw = &hns->hw; 337 uint8_t vlan_offset_byte_val; 338 struct hns3_cmd_desc desc; 339 uint8_t vlan_offset_byte; 340 uint8_t vlan_offset_base; 341 int ret; 342 343 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 344 345 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 346 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 347 HNS3_VLAN_BYTE_SIZE; 348 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 349 350 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 351 req->vlan_offset = vlan_offset_base; 352 req->vlan_cfg = on ? 0 : 1; 353 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 354 355 ret = hns3_cmd_send(hw, &desc, 1); 356 if (ret) 357 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 358 vlan_id, ret); 359 360 return ret; 361 } 362 363 static void 364 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 365 { 366 struct hns3_user_vlan_table *vlan_entry; 367 struct hns3_pf *pf = &hns->pf; 368 369 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 370 if (vlan_entry->vlan_id == vlan_id) { 371 if (vlan_entry->hd_tbl_status) 372 hns3_set_port_vlan_filter(hns, vlan_id, 0); 373 LIST_REMOVE(vlan_entry, next); 374 rte_free(vlan_entry); 375 break; 376 } 377 } 378 } 379 380 static void 381 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 382 bool writen_to_tbl) 383 { 384 struct hns3_user_vlan_table *vlan_entry; 385 struct hns3_hw *hw = &hns->hw; 386 struct hns3_pf *pf = &hns->pf; 387 388 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 389 if (vlan_entry->vlan_id == vlan_id) 390 return; 391 } 392 393 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 394 if (vlan_entry == NULL) { 395 hns3_err(hw, "Failed to malloc hns3 vlan table"); 396 return; 397 } 398 399 vlan_entry->hd_tbl_status = writen_to_tbl; 400 vlan_entry->vlan_id = vlan_id; 401 402 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 403 } 404 405 static int 406 hns3_restore_vlan_table(struct hns3_adapter *hns) 407 { 408 struct hns3_user_vlan_table *vlan_entry; 409 struct hns3_hw *hw = &hns->hw; 410 struct hns3_pf *pf = &hns->pf; 411 uint16_t vlan_id; 412 int ret = 0; 413 414 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 415 return hns3_vlan_pvid_configure(hns, 416 hw->port_base_vlan_cfg.pvid, 1); 417 418 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 419 if (vlan_entry->hd_tbl_status) { 420 vlan_id = vlan_entry->vlan_id; 421 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 422 if (ret) 423 break; 424 } 425 } 426 427 return ret; 428 } 429 430 static int 431 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 432 { 433 struct hns3_hw *hw = &hns->hw; 434 bool writen_to_tbl = false; 435 int ret = 0; 436 437 /* 438 * When vlan filter is enabled, hardware regards packets without vlan 439 * as packets with vlan 0. So, to receive packets without vlan, vlan id 440 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 441 */ 442 if (on == 0 && vlan_id == 0) 443 return 0; 444 445 /* 446 * When port base vlan enabled, we use port base vlan as the vlan 447 * filter condition. In this case, we don't update vlan filter table 448 * when user add new vlan or remove exist vlan, just update the 449 * vlan list. The vlan id in vlan list will be written in vlan filter 450 * table until port base vlan disabled 451 */ 452 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 453 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 454 writen_to_tbl = true; 455 } 456 457 if (ret == 0) { 458 if (on) 459 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 460 else 461 hns3_rm_dev_vlan_table(hns, vlan_id); 462 } 463 return ret; 464 } 465 466 static int 467 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 468 { 469 struct hns3_adapter *hns = dev->data->dev_private; 470 struct hns3_hw *hw = &hns->hw; 471 int ret; 472 473 rte_spinlock_lock(&hw->lock); 474 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 475 rte_spinlock_unlock(&hw->lock); 476 return ret; 477 } 478 479 static int 480 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 481 uint16_t tpid) 482 { 483 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 484 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 485 struct hns3_hw *hw = &hns->hw; 486 struct hns3_cmd_desc desc; 487 int ret; 488 489 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && 490 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) { 491 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 492 return -EINVAL; 493 } 494 495 if (tpid != RTE_ETHER_TYPE_VLAN) { 496 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 497 return -EINVAL; 498 } 499 500 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 501 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 502 503 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 504 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 505 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 506 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 507 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 508 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 509 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 510 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 511 } 512 513 ret = hns3_cmd_send(hw, &desc, 1); 514 if (ret) { 515 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 516 ret); 517 return ret; 518 } 519 520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 521 522 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 523 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 524 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 525 526 ret = hns3_cmd_send(hw, &desc, 1); 527 if (ret) 528 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 529 ret); 530 return ret; 531 } 532 533 static int 534 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 535 uint16_t tpid) 536 { 537 struct hns3_adapter *hns = dev->data->dev_private; 538 struct hns3_hw *hw = &hns->hw; 539 int ret; 540 541 rte_spinlock_lock(&hw->lock); 542 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 543 rte_spinlock_unlock(&hw->lock); 544 return ret; 545 } 546 547 static int 548 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 549 struct hns3_rx_vtag_cfg *vcfg) 550 { 551 struct hns3_vport_vtag_rx_cfg_cmd *req; 552 struct hns3_hw *hw = &hns->hw; 553 struct hns3_cmd_desc desc; 554 uint16_t vport_id; 555 uint8_t bitmap; 556 int ret; 557 558 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 559 560 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 561 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 562 vcfg->strip_tag1_en ? 1 : 0); 563 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 564 vcfg->strip_tag2_en ? 1 : 0); 565 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 566 vcfg->vlan1_vlan_prionly ? 1 : 0); 567 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 568 vcfg->vlan2_vlan_prionly ? 1 : 0); 569 570 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 571 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 572 vcfg->strip_tag1_discard_en ? 1 : 0); 573 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 574 vcfg->strip_tag2_discard_en ? 1 : 0); 575 /* 576 * In current version VF is not supported when PF is driven by DPDK 577 * driver, just need to configure parameters for PF vport. 578 */ 579 vport_id = HNS3_PF_FUNC_ID; 580 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 581 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 582 req->vf_bitmap[req->vf_offset] = bitmap; 583 584 ret = hns3_cmd_send(hw, &desc, 1); 585 if (ret) 586 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 587 return ret; 588 } 589 590 static int 591 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 592 { 593 struct hns3_rx_vtag_cfg rxvlan_cfg; 594 struct hns3_hw *hw = &hns->hw; 595 int ret; 596 597 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 598 rxvlan_cfg.strip_tag1_en = false; 599 rxvlan_cfg.strip_tag2_en = enable; 600 rxvlan_cfg.strip_tag2_discard_en = false; 601 } else { 602 rxvlan_cfg.strip_tag1_en = enable; 603 rxvlan_cfg.strip_tag2_en = true; 604 rxvlan_cfg.strip_tag2_discard_en = true; 605 } 606 607 rxvlan_cfg.strip_tag1_discard_en = false; 608 rxvlan_cfg.vlan1_vlan_prionly = false; 609 rxvlan_cfg.vlan2_vlan_prionly = false; 610 rxvlan_cfg.rx_vlan_offload_en = enable; 611 612 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 613 if (ret) { 614 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 615 enable ? "enable" : "disable", ret); 616 return ret; 617 } 618 619 memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg, 620 sizeof(struct hns3_rx_vtag_cfg)); 621 622 return ret; 623 } 624 625 static int 626 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 627 uint8_t fe_type, bool filter_en, uint8_t vf_id) 628 { 629 struct hns3_vlan_filter_ctrl_cmd *req; 630 struct hns3_cmd_desc desc; 631 int ret; 632 633 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 634 635 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 636 req->vlan_type = vlan_type; 637 req->vlan_fe = filter_en ? fe_type : 0; 638 req->vf_id = vf_id; 639 640 ret = hns3_cmd_send(hw, &desc, 1); 641 if (ret) 642 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 643 644 return ret; 645 } 646 647 static int 648 hns3_vlan_filter_init(struct hns3_adapter *hns) 649 { 650 struct hns3_hw *hw = &hns->hw; 651 int ret; 652 653 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 654 HNS3_FILTER_FE_EGRESS, false, 655 HNS3_PF_FUNC_ID); 656 if (ret) { 657 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 658 return ret; 659 } 660 661 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 662 HNS3_FILTER_FE_INGRESS, false, 663 HNS3_PF_FUNC_ID); 664 if (ret) 665 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 666 667 return ret; 668 } 669 670 static int 671 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 672 { 673 struct hns3_hw *hw = &hns->hw; 674 int ret; 675 676 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 677 HNS3_FILTER_FE_INGRESS, enable, 678 HNS3_PF_FUNC_ID); 679 if (ret) 680 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 681 enable ? "enable" : "disable", ret); 682 683 return ret; 684 } 685 686 static int 687 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 688 { 689 struct hns3_adapter *hns = dev->data->dev_private; 690 struct hns3_hw *hw = &hns->hw; 691 struct rte_eth_rxmode *rxmode; 692 unsigned int tmp_mask; 693 bool enable; 694 int ret = 0; 695 696 rte_spinlock_lock(&hw->lock); 697 rxmode = &dev->data->dev_conf.rxmode; 698 tmp_mask = (unsigned int)mask; 699 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { 700 /* ignore vlan filter configuration during promiscuous mode */ 701 if (!dev->data->promiscuous) { 702 /* Enable or disable VLAN filter */ 703 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? 704 true : false; 705 706 ret = hns3_enable_vlan_filter(hns, enable); 707 if (ret) { 708 rte_spinlock_unlock(&hw->lock); 709 hns3_err(hw, "failed to %s rx filter, ret = %d", 710 enable ? "enable" : "disable", ret); 711 return ret; 712 } 713 } 714 } 715 716 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { 717 /* Enable or disable VLAN stripping */ 718 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? 719 true : false; 720 721 ret = hns3_en_hw_strip_rxvtag(hns, enable); 722 if (ret) { 723 rte_spinlock_unlock(&hw->lock); 724 hns3_err(hw, "failed to %s rx strip, ret = %d", 725 enable ? "enable" : "disable", ret); 726 return ret; 727 } 728 } 729 730 rte_spinlock_unlock(&hw->lock); 731 732 return ret; 733 } 734 735 static int 736 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 737 struct hns3_tx_vtag_cfg *vcfg) 738 { 739 struct hns3_vport_vtag_tx_cfg_cmd *req; 740 struct hns3_cmd_desc desc; 741 struct hns3_hw *hw = &hns->hw; 742 uint16_t vport_id; 743 uint8_t bitmap; 744 int ret; 745 746 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 747 748 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 749 req->def_vlan_tag1 = vcfg->default_tag1; 750 req->def_vlan_tag2 = vcfg->default_tag2; 751 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 752 vcfg->accept_tag1 ? 1 : 0); 753 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 754 vcfg->accept_untag1 ? 1 : 0); 755 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 756 vcfg->accept_tag2 ? 1 : 0); 757 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 758 vcfg->accept_untag2 ? 1 : 0); 759 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 760 vcfg->insert_tag1_en ? 1 : 0); 761 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 762 vcfg->insert_tag2_en ? 1 : 0); 763 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 764 765 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */ 766 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 767 vcfg->tag_shift_mode_en ? 1 : 0); 768 769 /* 770 * In current version VF is not supported when PF is driven by DPDK 771 * driver, just need to configure parameters for PF vport. 772 */ 773 vport_id = HNS3_PF_FUNC_ID; 774 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 775 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 776 req->vf_bitmap[req->vf_offset] = bitmap; 777 778 ret = hns3_cmd_send(hw, &desc, 1); 779 if (ret) 780 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 781 782 return ret; 783 } 784 785 static int 786 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 787 uint16_t pvid) 788 { 789 struct hns3_hw *hw = &hns->hw; 790 struct hns3_tx_vtag_cfg txvlan_cfg; 791 int ret; 792 793 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 794 txvlan_cfg.accept_tag1 = true; 795 txvlan_cfg.insert_tag1_en = false; 796 txvlan_cfg.default_tag1 = 0; 797 } else { 798 txvlan_cfg.accept_tag1 = 799 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 800 txvlan_cfg.insert_tag1_en = true; 801 txvlan_cfg.default_tag1 = pvid; 802 } 803 804 txvlan_cfg.accept_untag1 = true; 805 txvlan_cfg.accept_tag2 = true; 806 txvlan_cfg.accept_untag2 = true; 807 txvlan_cfg.insert_tag2_en = false; 808 txvlan_cfg.default_tag2 = 0; 809 txvlan_cfg.tag_shift_mode_en = true; 810 811 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 812 if (ret) { 813 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 814 ret); 815 return ret; 816 } 817 818 memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg, 819 sizeof(struct hns3_tx_vtag_cfg)); 820 821 return ret; 822 } 823 824 825 static void 826 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 827 { 828 struct hns3_user_vlan_table *vlan_entry; 829 struct hns3_pf *pf = &hns->pf; 830 831 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 832 if (vlan_entry->hd_tbl_status) { 833 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 834 vlan_entry->hd_tbl_status = false; 835 } 836 } 837 838 if (is_del_list) { 839 vlan_entry = LIST_FIRST(&pf->vlan_list); 840 while (vlan_entry) { 841 LIST_REMOVE(vlan_entry, next); 842 rte_free(vlan_entry); 843 vlan_entry = LIST_FIRST(&pf->vlan_list); 844 } 845 } 846 } 847 848 static void 849 hns3_add_all_vlan_table(struct hns3_adapter *hns) 850 { 851 struct hns3_user_vlan_table *vlan_entry; 852 struct hns3_pf *pf = &hns->pf; 853 854 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 855 if (!vlan_entry->hd_tbl_status) { 856 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 857 vlan_entry->hd_tbl_status = true; 858 } 859 } 860 } 861 862 static void 863 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 864 { 865 struct hns3_hw *hw = &hns->hw; 866 int ret; 867 868 hns3_rm_all_vlan_table(hns, true); 869 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 870 ret = hns3_set_port_vlan_filter(hns, 871 hw->port_base_vlan_cfg.pvid, 0); 872 if (ret) { 873 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 874 ret); 875 return; 876 } 877 } 878 } 879 880 static int 881 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 882 uint16_t port_base_vlan_state, uint16_t new_pvid) 883 { 884 struct hns3_hw *hw = &hns->hw; 885 uint16_t old_pvid; 886 int ret; 887 888 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 889 old_pvid = hw->port_base_vlan_cfg.pvid; 890 if (old_pvid != HNS3_INVALID_PVID) { 891 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 892 if (ret) { 893 hns3_err(hw, "failed to remove old pvid %u, " 894 "ret = %d", old_pvid, ret); 895 return ret; 896 } 897 } 898 899 hns3_rm_all_vlan_table(hns, false); 900 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 901 if (ret) { 902 hns3_err(hw, "failed to add new pvid %u, ret = %d", 903 new_pvid, ret); 904 return ret; 905 } 906 } else { 907 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 908 if (ret) { 909 hns3_err(hw, "failed to remove pvid %u, ret = %d", 910 new_pvid, ret); 911 return ret; 912 } 913 914 hns3_add_all_vlan_table(hns); 915 } 916 return 0; 917 } 918 919 static int 920 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 921 { 922 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 923 struct hns3_rx_vtag_cfg rx_vlan_cfg; 924 bool rx_strip_en; 925 int ret; 926 927 rx_strip_en = old_cfg->rx_vlan_offload_en; 928 if (on) { 929 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 930 rx_vlan_cfg.strip_tag2_en = true; 931 rx_vlan_cfg.strip_tag2_discard_en = true; 932 } else { 933 rx_vlan_cfg.strip_tag1_en = false; 934 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 935 rx_vlan_cfg.strip_tag2_discard_en = false; 936 } 937 rx_vlan_cfg.strip_tag1_discard_en = false; 938 rx_vlan_cfg.vlan1_vlan_prionly = false; 939 rx_vlan_cfg.vlan2_vlan_prionly = false; 940 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 941 942 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 943 if (ret) 944 return ret; 945 946 memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg, 947 sizeof(struct hns3_rx_vtag_cfg)); 948 949 return ret; 950 } 951 952 static int 953 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 954 { 955 struct hns3_hw *hw = &hns->hw; 956 uint16_t port_base_vlan_state; 957 int ret, err; 958 959 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 960 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 961 hns3_warn(hw, "Invalid operation! As current pvid set " 962 "is %u, disable pvid %u is invalid", 963 hw->port_base_vlan_cfg.pvid, pvid); 964 return 0; 965 } 966 967 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 968 HNS3_PORT_BASE_VLAN_DISABLE; 969 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 970 if (ret) { 971 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 972 ret); 973 return ret; 974 } 975 976 ret = hns3_en_pvid_strip(hns, on); 977 if (ret) { 978 hns3_err(hw, "failed to config rx vlan strip for pvid, " 979 "ret = %d", ret); 980 goto pvid_vlan_strip_fail; 981 } 982 983 if (pvid == HNS3_INVALID_PVID) 984 goto out; 985 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 986 if (ret) { 987 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 988 ret); 989 goto vlan_filter_set_fail; 990 } 991 992 out: 993 hw->port_base_vlan_cfg.state = port_base_vlan_state; 994 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 995 return ret; 996 997 vlan_filter_set_fail: 998 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 999 HNS3_PORT_BASE_VLAN_ENABLE); 1000 if (err) 1001 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1002 1003 pvid_vlan_strip_fail: 1004 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1005 hw->port_base_vlan_cfg.pvid); 1006 if (err) 1007 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1008 1009 return ret; 1010 } 1011 1012 static int 1013 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1014 { 1015 struct hns3_adapter *hns = dev->data->dev_private; 1016 struct hns3_hw *hw = &hns->hw; 1017 bool pvid_en_state_change; 1018 uint16_t pvid_state; 1019 int ret; 1020 1021 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1022 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1023 RTE_ETHER_MAX_VLAN_ID); 1024 return -EINVAL; 1025 } 1026 1027 /* 1028 * If PVID configuration state change, should refresh the PVID 1029 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1030 */ 1031 pvid_state = hw->port_base_vlan_cfg.state; 1032 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1033 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1034 pvid_en_state_change = false; 1035 else 1036 pvid_en_state_change = true; 1037 1038 rte_spinlock_lock(&hw->lock); 1039 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1040 rte_spinlock_unlock(&hw->lock); 1041 if (ret) 1042 return ret; 1043 /* 1044 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1045 * need be processed by PMD. 1046 */ 1047 if (pvid_en_state_change && 1048 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1049 hns3_update_all_queues_pvid_proc_en(hw); 1050 1051 return 0; 1052 } 1053 1054 static int 1055 hns3_default_vlan_config(struct hns3_adapter *hns) 1056 { 1057 struct hns3_hw *hw = &hns->hw; 1058 int ret; 1059 1060 /* 1061 * When vlan filter is enabled, hardware regards packets without vlan 1062 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1063 * table, packets without vlan won't be received. So, add vlan 0 as 1064 * the default vlan. 1065 */ 1066 ret = hns3_vlan_filter_configure(hns, 0, 1); 1067 if (ret) 1068 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1069 return ret; 1070 } 1071 1072 static int 1073 hns3_init_vlan_config(struct hns3_adapter *hns) 1074 { 1075 struct hns3_hw *hw = &hns->hw; 1076 int ret; 1077 1078 /* 1079 * This function can be called in the initialization and reset process, 1080 * when in reset process, it means that hardware had been reseted 1081 * successfully and we need to restore the hardware configuration to 1082 * ensure that the hardware configuration remains unchanged before and 1083 * after reset. 1084 */ 1085 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1086 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1087 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1088 } 1089 1090 ret = hns3_vlan_filter_init(hns); 1091 if (ret) { 1092 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1093 return ret; 1094 } 1095 1096 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER, 1097 RTE_ETHER_TYPE_VLAN); 1098 if (ret) { 1099 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1100 return ret; 1101 } 1102 1103 /* 1104 * When in the reinit dev stage of the reset process, the following 1105 * vlan-related configurations may differ from those at initialization, 1106 * we will restore configurations to hardware in hns3_restore_vlan_table 1107 * and hns3_restore_vlan_conf later. 1108 */ 1109 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1110 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1111 if (ret) { 1112 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1113 return ret; 1114 } 1115 1116 ret = hns3_en_hw_strip_rxvtag(hns, false); 1117 if (ret) { 1118 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1119 ret); 1120 return ret; 1121 } 1122 } 1123 1124 return hns3_default_vlan_config(hns); 1125 } 1126 1127 static int 1128 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1129 { 1130 struct hns3_pf *pf = &hns->pf; 1131 struct hns3_hw *hw = &hns->hw; 1132 uint64_t offloads; 1133 bool enable; 1134 int ret; 1135 1136 if (!hw->data->promiscuous) { 1137 /* restore vlan filter states */ 1138 offloads = hw->data->dev_conf.rxmode.offloads; 1139 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false; 1140 ret = hns3_enable_vlan_filter(hns, enable); 1141 if (ret) { 1142 hns3_err(hw, "failed to restore vlan rx filter conf, " 1143 "ret = %d", ret); 1144 return ret; 1145 } 1146 } 1147 1148 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1149 if (ret) { 1150 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1151 return ret; 1152 } 1153 1154 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1155 if (ret) 1156 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1157 1158 return ret; 1159 } 1160 1161 static int 1162 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1163 { 1164 struct hns3_adapter *hns = dev->data->dev_private; 1165 struct rte_eth_dev_data *data = dev->data; 1166 struct rte_eth_txmode *txmode; 1167 struct hns3_hw *hw = &hns->hw; 1168 int mask; 1169 int ret; 1170 1171 txmode = &data->dev_conf.txmode; 1172 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1173 hns3_warn(hw, 1174 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1175 "configuration is not supported! Ignore these two " 1176 "parameters: hw_vlan_reject_tagged(%u), " 1177 "hw_vlan_reject_untagged(%u)", 1178 txmode->hw_vlan_reject_tagged, 1179 txmode->hw_vlan_reject_untagged); 1180 1181 /* Apply vlan offload setting */ 1182 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK; 1183 ret = hns3_vlan_offload_set(dev, mask); 1184 if (ret) { 1185 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1186 ret); 1187 return ret; 1188 } 1189 1190 /* 1191 * If pvid config is not set in rte_eth_conf, driver needn't to set 1192 * VLAN pvid related configuration to hardware. 1193 */ 1194 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1195 return 0; 1196 1197 /* Apply pvid setting */ 1198 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1199 txmode->hw_vlan_insert_pvid); 1200 if (ret) 1201 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1202 txmode->pvid, ret); 1203 1204 return ret; 1205 } 1206 1207 static int 1208 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1209 unsigned int tso_mss_max) 1210 { 1211 struct hns3_cfg_tso_status_cmd *req; 1212 struct hns3_cmd_desc desc; 1213 uint16_t tso_mss; 1214 1215 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1216 1217 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1218 1219 tso_mss = 0; 1220 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1221 tso_mss_min); 1222 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1223 1224 tso_mss = 0; 1225 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1226 tso_mss_max); 1227 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1228 1229 return hns3_cmd_send(hw, &desc, 1); 1230 } 1231 1232 static int 1233 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1234 uint16_t *allocated_size, bool is_alloc) 1235 { 1236 struct hns3_umv_spc_alc_cmd *req; 1237 struct hns3_cmd_desc desc; 1238 int ret; 1239 1240 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1241 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1242 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1243 req->space_size = rte_cpu_to_le_32(space_size); 1244 1245 ret = hns3_cmd_send(hw, &desc, 1); 1246 if (ret) { 1247 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1248 is_alloc ? "allocate" : "free", ret); 1249 return ret; 1250 } 1251 1252 if (is_alloc && allocated_size) 1253 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1254 1255 return 0; 1256 } 1257 1258 static int 1259 hns3_init_umv_space(struct hns3_hw *hw) 1260 { 1261 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1262 struct hns3_pf *pf = &hns->pf; 1263 uint16_t allocated_size = 0; 1264 int ret; 1265 1266 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1267 true); 1268 if (ret) 1269 return ret; 1270 1271 if (allocated_size < pf->wanted_umv_size) 1272 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1273 pf->wanted_umv_size, allocated_size); 1274 1275 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1276 pf->wanted_umv_size; 1277 pf->used_umv_size = 0; 1278 return 0; 1279 } 1280 1281 static int 1282 hns3_uninit_umv_space(struct hns3_hw *hw) 1283 { 1284 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1285 struct hns3_pf *pf = &hns->pf; 1286 int ret; 1287 1288 if (pf->max_umv_size == 0) 1289 return 0; 1290 1291 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1292 if (ret) 1293 return ret; 1294 1295 pf->max_umv_size = 0; 1296 1297 return 0; 1298 } 1299 1300 static bool 1301 hns3_is_umv_space_full(struct hns3_hw *hw) 1302 { 1303 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1304 struct hns3_pf *pf = &hns->pf; 1305 bool is_full; 1306 1307 is_full = (pf->used_umv_size >= pf->max_umv_size); 1308 1309 return is_full; 1310 } 1311 1312 static void 1313 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1314 { 1315 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1316 struct hns3_pf *pf = &hns->pf; 1317 1318 if (is_free) { 1319 if (pf->used_umv_size > 0) 1320 pf->used_umv_size--; 1321 } else 1322 pf->used_umv_size++; 1323 } 1324 1325 static void 1326 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1327 const uint8_t *addr, bool is_mc) 1328 { 1329 const unsigned char *mac_addr = addr; 1330 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1331 ((uint32_t)mac_addr[2] << 16) | 1332 ((uint32_t)mac_addr[1] << 8) | 1333 (uint32_t)mac_addr[0]; 1334 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1335 1336 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1337 if (is_mc) { 1338 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1339 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1340 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1341 } 1342 1343 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1344 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1345 } 1346 1347 static int 1348 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1349 uint8_t resp_code, 1350 enum hns3_mac_vlan_tbl_opcode op) 1351 { 1352 if (cmdq_resp) { 1353 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1354 cmdq_resp); 1355 return -EIO; 1356 } 1357 1358 if (op == HNS3_MAC_VLAN_ADD) { 1359 if (resp_code == 0 || resp_code == 1) { 1360 return 0; 1361 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1362 hns3_err(hw, "add mac addr failed for uc_overflow"); 1363 return -ENOSPC; 1364 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1365 hns3_err(hw, "add mac addr failed for mc_overflow"); 1366 return -ENOSPC; 1367 } 1368 1369 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1370 resp_code); 1371 return -EIO; 1372 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1373 if (resp_code == 0) { 1374 return 0; 1375 } else if (resp_code == 1) { 1376 hns3_dbg(hw, "remove mac addr failed for miss"); 1377 return -ENOENT; 1378 } 1379 1380 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1381 resp_code); 1382 return -EIO; 1383 } else if (op == HNS3_MAC_VLAN_LKUP) { 1384 if (resp_code == 0) { 1385 return 0; 1386 } else if (resp_code == 1) { 1387 hns3_dbg(hw, "lookup mac addr failed for miss"); 1388 return -ENOENT; 1389 } 1390 1391 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1392 resp_code); 1393 return -EIO; 1394 } 1395 1396 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1397 op); 1398 1399 return -EINVAL; 1400 } 1401 1402 static int 1403 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1404 struct hns3_mac_vlan_tbl_entry_cmd *req, 1405 struct hns3_cmd_desc *desc, uint8_t desc_num) 1406 { 1407 uint8_t resp_code; 1408 uint16_t retval; 1409 int ret; 1410 int i; 1411 1412 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { 1413 for (i = 0; i < desc_num - 1; i++) { 1414 hns3_cmd_setup_basic_desc(&desc[i], 1415 HNS3_OPC_MAC_VLAN_ADD, true); 1416 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1417 if (i == 0) 1418 memcpy(desc[i].data, req, 1419 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1420 } 1421 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, 1422 true); 1423 } else { 1424 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, 1425 true); 1426 memcpy(desc[0].data, req, 1427 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1428 } 1429 ret = hns3_cmd_send(hw, desc, desc_num); 1430 if (ret) { 1431 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1432 ret); 1433 return ret; 1434 } 1435 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1436 retval = rte_le_to_cpu_16(desc[0].retval); 1437 1438 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1439 HNS3_MAC_VLAN_LKUP); 1440 } 1441 1442 static int 1443 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1444 struct hns3_mac_vlan_tbl_entry_cmd *req, 1445 struct hns3_cmd_desc *desc, uint8_t desc_num) 1446 { 1447 uint8_t resp_code; 1448 uint16_t retval; 1449 int cfg_status; 1450 int ret; 1451 int i; 1452 1453 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { 1454 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); 1455 memcpy(desc->data, req, 1456 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1457 ret = hns3_cmd_send(hw, desc, desc_num); 1458 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; 1459 retval = rte_le_to_cpu_16(desc->retval); 1460 1461 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1462 HNS3_MAC_VLAN_ADD); 1463 } else { 1464 for (i = 0; i < desc_num; i++) { 1465 hns3_cmd_reuse_desc(&desc[i], false); 1466 if (i == desc_num - 1) 1467 desc[i].flag &= 1468 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1469 else 1470 desc[i].flag |= 1471 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1472 } 1473 memcpy(desc[0].data, req, 1474 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1475 desc[0].retval = 0; 1476 ret = hns3_cmd_send(hw, desc, desc_num); 1477 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1478 retval = rte_le_to_cpu_16(desc[0].retval); 1479 1480 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1481 HNS3_MAC_VLAN_ADD); 1482 } 1483 1484 if (ret) { 1485 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1486 return ret; 1487 } 1488 1489 return cfg_status; 1490 } 1491 1492 static int 1493 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1494 struct hns3_mac_vlan_tbl_entry_cmd *req) 1495 { 1496 struct hns3_cmd_desc desc; 1497 uint8_t resp_code; 1498 uint16_t retval; 1499 int ret; 1500 1501 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1502 1503 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1504 1505 ret = hns3_cmd_send(hw, &desc, 1); 1506 if (ret) { 1507 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1508 return ret; 1509 } 1510 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1511 retval = rte_le_to_cpu_16(desc.retval); 1512 1513 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1514 HNS3_MAC_VLAN_REMOVE); 1515 } 1516 1517 static int 1518 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1519 { 1520 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1521 struct hns3_mac_vlan_tbl_entry_cmd req; 1522 struct hns3_pf *pf = &hns->pf; 1523 struct hns3_cmd_desc desc; 1524 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1525 uint16_t egress_port = 0; 1526 uint8_t vf_id; 1527 int ret; 1528 1529 /* check if mac addr is valid */ 1530 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1531 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1532 mac_addr); 1533 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1534 mac_str); 1535 return -EINVAL; 1536 } 1537 1538 memset(&req, 0, sizeof(req)); 1539 1540 /* 1541 * In current version VF is not supported when PF is driven by DPDK 1542 * driver, just need to configure parameters for PF vport. 1543 */ 1544 vf_id = HNS3_PF_FUNC_ID; 1545 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1546 HNS3_MAC_EPORT_VFID_S, vf_id); 1547 1548 req.egress_port = rte_cpu_to_le_16(egress_port); 1549 1550 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1551 1552 /* 1553 * Lookup the mac address in the mac_vlan table, and add 1554 * it if the entry is inexistent. Repeated unicast entry 1555 * is not allowed in the mac vlan table. 1556 */ 1557 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, 1558 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1559 if (ret == -ENOENT) { 1560 if (!hns3_is_umv_space_full(hw)) { 1561 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, 1562 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1563 if (!ret) 1564 hns3_update_umv_space(hw, false); 1565 return ret; 1566 } 1567 1568 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1569 1570 return -ENOSPC; 1571 } 1572 1573 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1574 1575 /* check if we just hit the duplicate */ 1576 if (ret == 0) { 1577 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1578 return 0; 1579 } 1580 1581 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1582 mac_str); 1583 1584 return ret; 1585 } 1586 1587 static int 1588 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1589 { 1590 struct hns3_mac_vlan_tbl_entry_cmd req; 1591 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1592 int ret; 1593 1594 /* check if mac addr is valid */ 1595 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1596 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1597 mac_addr); 1598 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1599 mac_str); 1600 return -EINVAL; 1601 } 1602 1603 memset(&req, 0, sizeof(req)); 1604 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1605 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1606 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1607 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1608 return 0; 1609 else if (ret == 0) 1610 hns3_update_umv_space(hw, true); 1611 1612 return ret; 1613 } 1614 1615 static int 1616 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1617 struct rte_ether_addr *mac_addr) 1618 { 1619 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1620 struct rte_ether_addr *oaddr; 1621 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1622 int ret, ret_val; 1623 1624 rte_spinlock_lock(&hw->lock); 1625 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1626 ret = hw->ops.del_uc_mac_addr(hw, oaddr); 1627 if (ret) { 1628 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1629 oaddr); 1630 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1631 mac_str, ret); 1632 1633 rte_spinlock_unlock(&hw->lock); 1634 return ret; 1635 } 1636 1637 ret = hw->ops.add_uc_mac_addr(hw, mac_addr); 1638 if (ret) { 1639 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1640 mac_addr); 1641 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1642 goto err_add_uc_addr; 1643 } 1644 1645 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1646 if (ret) { 1647 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1648 goto err_pause_addr_cfg; 1649 } 1650 1651 rte_ether_addr_copy(mac_addr, 1652 (struct rte_ether_addr *)hw->mac.mac_addr); 1653 rte_spinlock_unlock(&hw->lock); 1654 1655 return 0; 1656 1657 err_pause_addr_cfg: 1658 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr); 1659 if (ret_val) { 1660 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1661 mac_addr); 1662 hns3_warn(hw, 1663 "Failed to roll back to del setted mac addr(%s): %d", 1664 mac_str, ret_val); 1665 } 1666 1667 err_add_uc_addr: 1668 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr); 1669 if (ret_val) { 1670 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1671 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1672 mac_str, ret_val); 1673 } 1674 rte_spinlock_unlock(&hw->lock); 1675 1676 return ret; 1677 } 1678 1679 static void 1680 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1681 { 1682 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1683 uint8_t word_num; 1684 uint8_t bit_num; 1685 1686 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1687 word_num = vfid / 32; 1688 bit_num = vfid % 32; 1689 if (clr) 1690 desc[1].data[word_num] &= 1691 rte_cpu_to_le_32(~(1UL << bit_num)); 1692 else 1693 desc[1].data[word_num] |= 1694 rte_cpu_to_le_32(1UL << bit_num); 1695 } else { 1696 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1697 bit_num = vfid % 32; 1698 if (clr) 1699 desc[2].data[word_num] &= 1700 rte_cpu_to_le_32(~(1UL << bit_num)); 1701 else 1702 desc[2].data[word_num] |= 1703 rte_cpu_to_le_32(1UL << bit_num); 1704 } 1705 } 1706 1707 static int 1708 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1709 { 1710 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; 1711 struct hns3_mac_vlan_tbl_entry_cmd req; 1712 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1713 uint8_t vf_id; 1714 int ret; 1715 1716 /* Check if mac addr is valid */ 1717 if (!rte_is_multicast_ether_addr(mac_addr)) { 1718 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1719 mac_addr); 1720 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1721 mac_str); 1722 return -EINVAL; 1723 } 1724 1725 memset(&req, 0, sizeof(req)); 1726 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1727 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1728 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1729 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1730 if (ret) { 1731 /* This mac addr do not exist, add new entry for it */ 1732 memset(desc[0].data, 0, sizeof(desc[0].data)); 1733 memset(desc[1].data, 0, sizeof(desc[0].data)); 1734 memset(desc[2].data, 0, sizeof(desc[0].data)); 1735 } 1736 1737 /* 1738 * In current version VF is not supported when PF is driven by DPDK 1739 * driver, just need to configure parameters for PF vport. 1740 */ 1741 vf_id = HNS3_PF_FUNC_ID; 1742 hns3_update_desc_vfid(desc, vf_id, false); 1743 ret = hns3_add_mac_vlan_tbl(hw, &req, desc, 1744 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1745 if (ret) { 1746 if (ret == -ENOSPC) 1747 hns3_err(hw, "mc mac vlan table is full"); 1748 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1749 mac_addr); 1750 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1751 } 1752 1753 return ret; 1754 } 1755 1756 static int 1757 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1758 { 1759 struct hns3_mac_vlan_tbl_entry_cmd req; 1760 struct hns3_cmd_desc desc[3]; 1761 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1762 uint8_t vf_id; 1763 int ret; 1764 1765 /* Check if mac addr is valid */ 1766 if (!rte_is_multicast_ether_addr(mac_addr)) { 1767 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1768 mac_addr); 1769 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1770 mac_str); 1771 return -EINVAL; 1772 } 1773 1774 memset(&req, 0, sizeof(req)); 1775 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1776 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1777 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1778 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1779 if (ret == 0) { 1780 /* 1781 * This mac addr exist, remove this handle's VFID for it. 1782 * In current version VF is not supported when PF is driven by 1783 * DPDK driver, just need to configure parameters for PF vport. 1784 */ 1785 vf_id = HNS3_PF_FUNC_ID; 1786 hns3_update_desc_vfid(desc, vf_id, true); 1787 1788 /* All the vfid is zero, so need to delete this entry */ 1789 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1790 } else if (ret == -ENOENT) { 1791 /* This mac addr doesn't exist. */ 1792 return 0; 1793 } 1794 1795 if (ret) { 1796 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1797 mac_addr); 1798 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1799 } 1800 1801 return ret; 1802 } 1803 1804 static int 1805 hns3_check_mq_mode(struct rte_eth_dev *dev) 1806 { 1807 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1808 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1809 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1810 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1811 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 1812 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 1813 uint8_t num_tc; 1814 int max_tc = 0; 1815 int i; 1816 1817 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) || 1818 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB || 1819 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) { 1820 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 1821 rx_mq_mode, tx_mq_mode); 1822 return -EOPNOTSUPP; 1823 } 1824 1825 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 1826 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 1827 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 1828 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 1829 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 1830 dcb_rx_conf->nb_tcs, pf->tc_max); 1831 return -EINVAL; 1832 } 1833 1834 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 1835 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 1836 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, " 1837 "nb_tcs(%d) != %d or %d in rx direction.", 1838 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 1839 return -EINVAL; 1840 } 1841 1842 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 1843 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 1844 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 1845 return -EINVAL; 1846 } 1847 1848 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 1849 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 1850 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 1851 "is not equal to one in tx direction.", 1852 i, dcb_rx_conf->dcb_tc[i]); 1853 return -EINVAL; 1854 } 1855 if (dcb_rx_conf->dcb_tc[i] > max_tc) 1856 max_tc = dcb_rx_conf->dcb_tc[i]; 1857 } 1858 1859 num_tc = max_tc + 1; 1860 if (num_tc > dcb_rx_conf->nb_tcs) { 1861 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 1862 num_tc, dcb_rx_conf->nb_tcs); 1863 return -EINVAL; 1864 } 1865 } 1866 1867 return 0; 1868 } 1869 1870 static int 1871 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 1872 enum hns3_ring_type queue_type, uint16_t queue_id) 1873 { 1874 struct hns3_cmd_desc desc; 1875 struct hns3_ctrl_vector_chain_cmd *req = 1876 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 1877 enum hns3_opcode_type op; 1878 uint16_t tqp_type_and_id = 0; 1879 uint16_t type; 1880 uint16_t gl; 1881 int ret; 1882 1883 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 1884 hns3_cmd_setup_basic_desc(&desc, op, false); 1885 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 1886 HNS3_TQP_INT_ID_L_S); 1887 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 1888 HNS3_TQP_INT_ID_H_S); 1889 1890 if (queue_type == HNS3_RING_TYPE_RX) 1891 gl = HNS3_RING_GL_RX; 1892 else 1893 gl = HNS3_RING_GL_TX; 1894 1895 type = queue_type; 1896 1897 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 1898 type); 1899 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 1900 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 1901 gl); 1902 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 1903 req->int_cause_num = 1; 1904 ret = hns3_cmd_send(hw, &desc, 1); 1905 if (ret) { 1906 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 1907 en ? "Map" : "Unmap", queue_id, vector_id, ret); 1908 return ret; 1909 } 1910 1911 return 0; 1912 } 1913 1914 static int 1915 hns3_setup_dcb(struct rte_eth_dev *dev) 1916 { 1917 struct hns3_adapter *hns = dev->data->dev_private; 1918 struct hns3_hw *hw = &hns->hw; 1919 int ret; 1920 1921 if (!hns3_dev_get_support(hw, DCB)) { 1922 hns3_err(hw, "this port does not support dcb configurations."); 1923 return -EOPNOTSUPP; 1924 } 1925 1926 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 1927 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 1928 return -EOPNOTSUPP; 1929 } 1930 1931 ret = hns3_dcb_configure(hns); 1932 if (ret) 1933 hns3_err(hw, "failed to config dcb: %d", ret); 1934 1935 return ret; 1936 } 1937 1938 static int 1939 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 1940 { 1941 int ret; 1942 1943 /* 1944 * Some hardware doesn't support auto-negotiation, but users may not 1945 * configure link_speeds (default 0), which means auto-negotiation. 1946 * In this case, it should return success. 1947 */ 1948 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG && 1949 hw->mac.support_autoneg == 0) 1950 return 0; 1951 1952 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 1953 ret = hns3_check_port_speed(hw, link_speeds); 1954 if (ret) 1955 return ret; 1956 } 1957 1958 return 0; 1959 } 1960 1961 static int 1962 hns3_check_dev_conf(struct rte_eth_dev *dev) 1963 { 1964 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1965 struct rte_eth_conf *conf = &dev->data->dev_conf; 1966 int ret; 1967 1968 ret = hns3_check_mq_mode(dev); 1969 if (ret) 1970 return ret; 1971 1972 return hns3_check_link_speed(hw, conf->link_speeds); 1973 } 1974 1975 static int 1976 hns3_dev_configure(struct rte_eth_dev *dev) 1977 { 1978 struct hns3_adapter *hns = dev->data->dev_private; 1979 struct rte_eth_conf *conf = &dev->data->dev_conf; 1980 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 1981 struct hns3_hw *hw = &hns->hw; 1982 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1983 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1984 struct rte_eth_rss_conf rss_conf; 1985 bool gro_en; 1986 int ret; 1987 1988 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 1989 1990 /* 1991 * Some versions of hardware network engine does not support 1992 * individually enable/disable/reset the Tx or Rx queue. These devices 1993 * must enable/disable/reset Tx and Rx queues at the same time. When the 1994 * numbers of Tx queues allocated by upper applications are not equal to 1995 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 1996 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 1997 * work as usual. But these fake queues are imperceptible, and can not 1998 * be used by upper applications. 1999 */ 2000 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2001 if (ret) { 2002 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2003 hw->cfg_max_queues = 0; 2004 return ret; 2005 } 2006 2007 hw->adapter_state = HNS3_NIC_CONFIGURING; 2008 ret = hns3_check_dev_conf(dev); 2009 if (ret) 2010 goto cfg_err; 2011 2012 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2013 ret = hns3_setup_dcb(dev); 2014 if (ret) 2015 goto cfg_err; 2016 } 2017 2018 /* When RSS is not configured, redirect the packet queue 0 */ 2019 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2020 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2021 rss_conf = conf->rx_adv_conf.rss_conf; 2022 hw->rss_dis_flag = false; 2023 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2024 if (ret) 2025 goto cfg_err; 2026 } 2027 2028 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu); 2029 if (ret != 0) 2030 goto cfg_err; 2031 2032 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2033 if (ret) 2034 goto cfg_err; 2035 2036 ret = hns3_dev_configure_vlan(dev); 2037 if (ret) 2038 goto cfg_err; 2039 2040 /* config hardware GRO */ 2041 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 2042 ret = hns3_config_gro(hw, gro_en); 2043 if (ret) 2044 goto cfg_err; 2045 2046 hns3_init_rx_ptype_tble(dev); 2047 hw->adapter_state = HNS3_NIC_CONFIGURED; 2048 2049 return 0; 2050 2051 cfg_err: 2052 hw->cfg_max_queues = 0; 2053 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2054 hw->adapter_state = HNS3_NIC_INITIALIZED; 2055 2056 return ret; 2057 } 2058 2059 static int 2060 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2061 { 2062 struct hns3_config_max_frm_size_cmd *req; 2063 struct hns3_cmd_desc desc; 2064 2065 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2066 2067 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2068 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2069 req->min_frm_size = RTE_ETHER_MIN_LEN; 2070 2071 return hns3_cmd_send(hw, &desc, 1); 2072 } 2073 2074 static int 2075 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2076 { 2077 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2078 int err; 2079 int ret; 2080 2081 ret = hns3_set_mac_mtu(hw, mps); 2082 if (ret) { 2083 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2084 return ret; 2085 } 2086 2087 ret = hns3_buffer_alloc(hw); 2088 if (ret) { 2089 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2090 goto rollback; 2091 } 2092 2093 hns->pf.mps = mps; 2094 2095 return 0; 2096 2097 rollback: 2098 err = hns3_set_mac_mtu(hw, hns->pf.mps); 2099 if (err) 2100 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2101 2102 return ret; 2103 } 2104 2105 static int 2106 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2107 { 2108 struct hns3_adapter *hns = dev->data->dev_private; 2109 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2110 struct hns3_hw *hw = &hns->hw; 2111 int ret; 2112 2113 if (dev->data->dev_started) { 2114 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2115 "before configuration", dev->data->port_id); 2116 return -EBUSY; 2117 } 2118 2119 rte_spinlock_lock(&hw->lock); 2120 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2121 2122 /* 2123 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2124 * assign to "uint16_t" type variable. 2125 */ 2126 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2127 if (ret) { 2128 rte_spinlock_unlock(&hw->lock); 2129 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2130 dev->data->port_id, mtu, ret); 2131 return ret; 2132 } 2133 2134 rte_spinlock_unlock(&hw->lock); 2135 2136 return 0; 2137 } 2138 2139 static uint32_t 2140 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2141 { 2142 uint32_t speed_capa = 0; 2143 2144 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2145 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD; 2146 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2147 speed_capa |= RTE_ETH_LINK_SPEED_10M; 2148 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2149 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 2150 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2151 speed_capa |= RTE_ETH_LINK_SPEED_100M; 2152 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2153 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2154 2155 return speed_capa; 2156 } 2157 2158 static uint32_t 2159 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2160 { 2161 uint32_t speed_capa = 0; 2162 2163 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2164 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2165 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2166 speed_capa |= RTE_ETH_LINK_SPEED_10G; 2167 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2168 speed_capa |= RTE_ETH_LINK_SPEED_25G; 2169 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2170 speed_capa |= RTE_ETH_LINK_SPEED_40G; 2171 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2172 speed_capa |= RTE_ETH_LINK_SPEED_50G; 2173 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2174 speed_capa |= RTE_ETH_LINK_SPEED_100G; 2175 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2176 speed_capa |= RTE_ETH_LINK_SPEED_200G; 2177 2178 return speed_capa; 2179 } 2180 2181 uint32_t 2182 hns3_get_speed_capa(struct hns3_hw *hw) 2183 { 2184 struct hns3_mac *mac = &hw->mac; 2185 uint32_t speed_capa; 2186 2187 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2188 speed_capa = 2189 hns3_get_copper_port_speed_capa(mac->supported_speed); 2190 else 2191 speed_capa = 2192 hns3_get_firber_port_speed_capa(mac->supported_speed); 2193 2194 if (mac->support_autoneg == 0) 2195 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 2196 2197 return speed_capa; 2198 } 2199 2200 static int 2201 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2202 { 2203 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2204 int ret; 2205 2206 (void)hns3_update_link_status(hw); 2207 2208 ret = hns3_update_link_info(eth_dev); 2209 if (ret) 2210 hw->mac.link_status = RTE_ETH_LINK_DOWN; 2211 2212 return ret; 2213 } 2214 2215 static void 2216 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2217 struct rte_eth_link *new_link) 2218 { 2219 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2220 struct hns3_mac *mac = &hw->mac; 2221 2222 switch (mac->link_speed) { 2223 case RTE_ETH_SPEED_NUM_10M: 2224 case RTE_ETH_SPEED_NUM_100M: 2225 case RTE_ETH_SPEED_NUM_1G: 2226 case RTE_ETH_SPEED_NUM_10G: 2227 case RTE_ETH_SPEED_NUM_25G: 2228 case RTE_ETH_SPEED_NUM_40G: 2229 case RTE_ETH_SPEED_NUM_50G: 2230 case RTE_ETH_SPEED_NUM_100G: 2231 case RTE_ETH_SPEED_NUM_200G: 2232 if (mac->link_status) 2233 new_link->link_speed = mac->link_speed; 2234 break; 2235 default: 2236 if (mac->link_status) 2237 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 2238 break; 2239 } 2240 2241 if (!mac->link_status) 2242 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE; 2243 2244 new_link->link_duplex = mac->link_duplex; 2245 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 2246 new_link->link_autoneg = mac->link_autoneg; 2247 } 2248 2249 static int 2250 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2251 { 2252 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2253 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2254 2255 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2256 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2257 struct hns3_mac *mac = &hw->mac; 2258 struct rte_eth_link new_link; 2259 int ret; 2260 2261 /* When port is stopped, report link down. */ 2262 if (eth_dev->data->dev_started == 0) { 2263 new_link.link_autoneg = mac->link_autoneg; 2264 new_link.link_duplex = mac->link_duplex; 2265 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2266 new_link.link_status = RTE_ETH_LINK_DOWN; 2267 goto out; 2268 } 2269 2270 do { 2271 ret = hns3_update_port_link_info(eth_dev); 2272 if (ret) { 2273 hns3_err(hw, "failed to get port link info, ret = %d.", 2274 ret); 2275 break; 2276 } 2277 2278 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP) 2279 break; 2280 2281 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2282 } while (retry_cnt--); 2283 2284 memset(&new_link, 0, sizeof(new_link)); 2285 hns3_setup_linkstatus(eth_dev, &new_link); 2286 2287 out: 2288 return rte_eth_linkstatus_set(eth_dev, &new_link); 2289 } 2290 2291 static int 2292 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2293 { 2294 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2295 int ret; 2296 2297 /* 2298 * The "tx_pkt_burst" will be restored. But the secondary process does 2299 * not support the mechanism for notifying the primary process. 2300 */ 2301 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2302 hns3_err(hw, "secondary process does not support to set link up."); 2303 return -ENOTSUP; 2304 } 2305 2306 /* 2307 * If device isn't started Rx/Tx function is still disabled, setting 2308 * link up is not allowed. But it is probably better to return success 2309 * to reduce the impact on the upper layer. 2310 */ 2311 if (hw->adapter_state != HNS3_NIC_STARTED) { 2312 hns3_info(hw, "device isn't started, can't set link up."); 2313 return 0; 2314 } 2315 2316 if (!hw->set_link_down) 2317 return 0; 2318 2319 rte_spinlock_lock(&hw->lock); 2320 ret = hns3_cfg_mac_mode(hw, true); 2321 if (ret) { 2322 rte_spinlock_unlock(&hw->lock); 2323 hns3_err(hw, "failed to set link up, ret = %d", ret); 2324 return ret; 2325 } 2326 2327 hw->set_link_down = false; 2328 hns3_start_tx_datapath(dev); 2329 rte_spinlock_unlock(&hw->lock); 2330 2331 return 0; 2332 } 2333 2334 static int 2335 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2336 { 2337 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2338 int ret; 2339 2340 /* 2341 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2342 * process does not support the mechanism for notifying the primary 2343 * process. 2344 */ 2345 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2346 hns3_err(hw, "secondary process does not support to set link down."); 2347 return -ENOTSUP; 2348 } 2349 2350 /* 2351 * If device isn't started or the API has been called, link status is 2352 * down, return success. 2353 */ 2354 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2355 return 0; 2356 2357 rte_spinlock_lock(&hw->lock); 2358 hns3_stop_tx_datapath(dev); 2359 ret = hns3_cfg_mac_mode(hw, false); 2360 if (ret) { 2361 hns3_start_tx_datapath(dev); 2362 rte_spinlock_unlock(&hw->lock); 2363 hns3_err(hw, "failed to set link down, ret = %d", ret); 2364 return ret; 2365 } 2366 2367 hw->set_link_down = true; 2368 rte_spinlock_unlock(&hw->lock); 2369 2370 return 0; 2371 } 2372 2373 static int 2374 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2375 { 2376 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2377 struct hns3_pf *pf = &hns->pf; 2378 2379 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2380 return -EINVAL; 2381 2382 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2383 2384 return 0; 2385 } 2386 2387 static int 2388 hns3_query_function_status(struct hns3_hw *hw) 2389 { 2390 #define HNS3_QUERY_MAX_CNT 10 2391 #define HNS3_QUERY_SLEEP_MSCOEND 1 2392 struct hns3_func_status_cmd *req; 2393 struct hns3_cmd_desc desc; 2394 int timeout = 0; 2395 int ret; 2396 2397 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2398 req = (struct hns3_func_status_cmd *)desc.data; 2399 2400 do { 2401 ret = hns3_cmd_send(hw, &desc, 1); 2402 if (ret) { 2403 PMD_INIT_LOG(ERR, "query function status failed %d", 2404 ret); 2405 return ret; 2406 } 2407 2408 /* Check pf reset is done */ 2409 if (req->pf_state) 2410 break; 2411 2412 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2413 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2414 2415 return hns3_parse_func_status(hw, req); 2416 } 2417 2418 static int 2419 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2420 { 2421 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2422 struct hns3_pf *pf = &hns->pf; 2423 2424 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2425 /* 2426 * The total_tqps_num obtained from firmware is maximum tqp 2427 * numbers of this port, which should be used for PF and VFs. 2428 * There is no need for pf to have so many tqp numbers in 2429 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2430 * coming from config file, is assigned to maximum queue number 2431 * for the PF of this port by user. So users can modify the 2432 * maximum queue number of PF according to their own application 2433 * scenarios, which is more flexible to use. In addition, many 2434 * memories can be saved due to allocating queue statistics 2435 * room according to the actual number of queues required. The 2436 * maximum queue number of PF for network engine with 2437 * revision_id greater than 0x30 is assigned by config file. 2438 */ 2439 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2440 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2441 "must be greater than 0.", 2442 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2443 return -EINVAL; 2444 } 2445 2446 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2447 hw->total_tqps_num); 2448 } else { 2449 /* 2450 * Due to the limitation on the number of PF interrupts 2451 * available, the maximum queue number assigned to PF on 2452 * the network engine with revision_id 0x21 is 64. 2453 */ 2454 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2455 HNS3_MAX_TQP_NUM_HIP08_PF); 2456 } 2457 2458 return 0; 2459 } 2460 2461 static int 2462 hns3_query_pf_resource(struct hns3_hw *hw) 2463 { 2464 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2465 struct hns3_pf *pf = &hns->pf; 2466 struct hns3_pf_res_cmd *req; 2467 struct hns3_cmd_desc desc; 2468 int ret; 2469 2470 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2471 ret = hns3_cmd_send(hw, &desc, 1); 2472 if (ret) { 2473 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2474 return ret; 2475 } 2476 2477 req = (struct hns3_pf_res_cmd *)desc.data; 2478 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2479 rte_le_to_cpu_16(req->ext_tqp_num); 2480 ret = hns3_get_pf_max_tqp_num(hw); 2481 if (ret) 2482 return ret; 2483 2484 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2485 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2486 2487 if (req->tx_buf_size) 2488 pf->tx_buf_size = 2489 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2490 else 2491 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2492 2493 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2494 2495 if (req->dv_buf_size) 2496 pf->dv_buf_size = 2497 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2498 else 2499 pf->dv_buf_size = HNS3_DEFAULT_DV; 2500 2501 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2502 2503 hw->num_msi = 2504 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2505 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2506 2507 return 0; 2508 } 2509 2510 static void 2511 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2512 { 2513 struct hns3_cfg_param_cmd *req; 2514 uint64_t mac_addr_tmp_high; 2515 uint8_t ext_rss_size_max; 2516 uint64_t mac_addr_tmp; 2517 uint32_t i; 2518 2519 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2520 2521 /* get the configuration */ 2522 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2523 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 2524 2525 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2526 HNS3_CFG_PHY_ADDR_M, 2527 HNS3_CFG_PHY_ADDR_S); 2528 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2529 HNS3_CFG_MEDIA_TP_M, 2530 HNS3_CFG_MEDIA_TP_S); 2531 /* get mac address */ 2532 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 2533 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2534 HNS3_CFG_MAC_ADDR_H_M, 2535 HNS3_CFG_MAC_ADDR_H_S); 2536 2537 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 2538 2539 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2540 HNS3_CFG_DEFAULT_SPEED_M, 2541 HNS3_CFG_DEFAULT_SPEED_S); 2542 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2543 HNS3_CFG_RSS_SIZE_M, 2544 HNS3_CFG_RSS_SIZE_S); 2545 2546 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2547 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 2548 2549 req = (struct hns3_cfg_param_cmd *)desc[1].data; 2550 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 2551 2552 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2553 HNS3_CFG_SPEED_ABILITY_M, 2554 HNS3_CFG_SPEED_ABILITY_S); 2555 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2556 HNS3_CFG_UMV_TBL_SPACE_M, 2557 HNS3_CFG_UMV_TBL_SPACE_S); 2558 if (!cfg->umv_space) 2559 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 2560 2561 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 2562 HNS3_CFG_EXT_RSS_SIZE_M, 2563 HNS3_CFG_EXT_RSS_SIZE_S); 2564 /* 2565 * Field ext_rss_size_max obtained from firmware will be more flexible 2566 * for future changes and expansions, which is an exponent of 2, instead 2567 * of reading out directly. If this field is not zero, hns3 PF PMD 2568 * uses it as rss_size_max under one TC. Device, whose revision 2569 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 2570 * maximum number of queues supported under a TC through this field. 2571 */ 2572 if (ext_rss_size_max) 2573 cfg->rss_size_max = 1U << ext_rss_size_max; 2574 } 2575 2576 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 2577 * @hw: pointer to struct hns3_hw 2578 * @hcfg: the config structure to be getted 2579 */ 2580 static int 2581 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 2582 { 2583 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 2584 struct hns3_cfg_param_cmd *req; 2585 uint32_t offset; 2586 uint32_t i; 2587 int ret; 2588 2589 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 2590 offset = 0; 2591 req = (struct hns3_cfg_param_cmd *)desc[i].data; 2592 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 2593 true); 2594 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 2595 i * HNS3_CFG_RD_LEN_BYTES); 2596 /* Len should be divided by 4 when send to hardware */ 2597 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 2598 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 2599 req->offset = rte_cpu_to_le_32(offset); 2600 } 2601 2602 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 2603 if (ret) { 2604 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 2605 return ret; 2606 } 2607 2608 hns3_parse_cfg(hcfg, desc); 2609 2610 return 0; 2611 } 2612 2613 static int 2614 hns3_parse_speed(int speed_cmd, uint32_t *speed) 2615 { 2616 switch (speed_cmd) { 2617 case HNS3_CFG_SPEED_10M: 2618 *speed = RTE_ETH_SPEED_NUM_10M; 2619 break; 2620 case HNS3_CFG_SPEED_100M: 2621 *speed = RTE_ETH_SPEED_NUM_100M; 2622 break; 2623 case HNS3_CFG_SPEED_1G: 2624 *speed = RTE_ETH_SPEED_NUM_1G; 2625 break; 2626 case HNS3_CFG_SPEED_10G: 2627 *speed = RTE_ETH_SPEED_NUM_10G; 2628 break; 2629 case HNS3_CFG_SPEED_25G: 2630 *speed = RTE_ETH_SPEED_NUM_25G; 2631 break; 2632 case HNS3_CFG_SPEED_40G: 2633 *speed = RTE_ETH_SPEED_NUM_40G; 2634 break; 2635 case HNS3_CFG_SPEED_50G: 2636 *speed = RTE_ETH_SPEED_NUM_50G; 2637 break; 2638 case HNS3_CFG_SPEED_100G: 2639 *speed = RTE_ETH_SPEED_NUM_100G; 2640 break; 2641 case HNS3_CFG_SPEED_200G: 2642 *speed = RTE_ETH_SPEED_NUM_200G; 2643 break; 2644 default: 2645 return -EINVAL; 2646 } 2647 2648 return 0; 2649 } 2650 2651 static void 2652 hns3_set_default_dev_specifications(struct hns3_hw *hw) 2653 { 2654 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 2655 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 2656 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 2657 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 2658 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 2659 } 2660 2661 static void 2662 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 2663 { 2664 struct hns3_dev_specs_0_cmd *req0; 2665 2666 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 2667 2668 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 2669 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 2670 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 2671 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 2672 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 2673 } 2674 2675 static int 2676 hns3_check_dev_specifications(struct hns3_hw *hw) 2677 { 2678 if (hw->rss_ind_tbl_size == 0 || 2679 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 2680 hns3_err(hw, "the size of hash lookup table configured (%u)" 2681 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 2682 HNS3_RSS_IND_TBL_SIZE_MAX); 2683 return -EINVAL; 2684 } 2685 2686 return 0; 2687 } 2688 2689 static int 2690 hns3_query_dev_specifications(struct hns3_hw *hw) 2691 { 2692 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 2693 int ret; 2694 int i; 2695 2696 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2697 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 2698 true); 2699 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 2700 } 2701 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 2702 2703 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 2704 if (ret) 2705 return ret; 2706 2707 hns3_parse_dev_specifications(hw, desc); 2708 2709 return hns3_check_dev_specifications(hw); 2710 } 2711 2712 static int 2713 hns3_get_capability(struct hns3_hw *hw) 2714 { 2715 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2716 struct rte_pci_device *pci_dev; 2717 struct hns3_pf *pf = &hns->pf; 2718 struct rte_eth_dev *eth_dev; 2719 uint16_t device_id; 2720 int ret; 2721 2722 eth_dev = &rte_eth_devices[hw->data->port_id]; 2723 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2724 device_id = pci_dev->id.device_id; 2725 2726 if (device_id == HNS3_DEV_ID_25GE_RDMA || 2727 device_id == HNS3_DEV_ID_50GE_RDMA || 2728 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 2729 device_id == HNS3_DEV_ID_200G_RDMA) 2730 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 2731 2732 ret = hns3_get_pci_revision_id(hw, &hw->revision); 2733 if (ret) 2734 return ret; 2735 2736 ret = hns3_query_mac_stats_reg_num(hw); 2737 if (ret) 2738 return ret; 2739 2740 if (hw->revision < PCI_REVISION_ID_HIP09_A) { 2741 hns3_set_default_dev_specifications(hw); 2742 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 2743 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 2744 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 2745 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 2746 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 2747 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 2748 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 2749 hw->rss_info.ipv6_sctp_offload_supported = false; 2750 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 2751 pf->support_multi_tc_pause = false; 2752 return 0; 2753 } 2754 2755 ret = hns3_query_dev_specifications(hw); 2756 if (ret) { 2757 PMD_INIT_LOG(ERR, 2758 "failed to query dev specifications, ret = %d", 2759 ret); 2760 return ret; 2761 } 2762 2763 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 2764 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 2765 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 2766 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 2767 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 2768 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 2769 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 2770 hw->rss_info.ipv6_sctp_offload_supported = true; 2771 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 2772 pf->support_multi_tc_pause = true; 2773 2774 return 0; 2775 } 2776 2777 static int 2778 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 2779 { 2780 int ret; 2781 2782 switch (media_type) { 2783 case HNS3_MEDIA_TYPE_COPPER: 2784 if (!hns3_dev_get_support(hw, COPPER)) { 2785 PMD_INIT_LOG(ERR, 2786 "Media type is copper, not supported."); 2787 ret = -EOPNOTSUPP; 2788 } else { 2789 ret = 0; 2790 } 2791 break; 2792 case HNS3_MEDIA_TYPE_FIBER: 2793 ret = 0; 2794 break; 2795 case HNS3_MEDIA_TYPE_BACKPLANE: 2796 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 2797 ret = -EOPNOTSUPP; 2798 break; 2799 default: 2800 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 2801 ret = -EINVAL; 2802 break; 2803 } 2804 2805 return ret; 2806 } 2807 2808 static int 2809 hns3_get_board_configuration(struct hns3_hw *hw) 2810 { 2811 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2812 struct hns3_pf *pf = &hns->pf; 2813 struct hns3_cfg cfg; 2814 int ret; 2815 2816 ret = hns3_get_board_cfg(hw, &cfg); 2817 if (ret) { 2818 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 2819 return ret; 2820 } 2821 2822 ret = hns3_check_media_type(hw, cfg.media_type); 2823 if (ret) 2824 return ret; 2825 2826 hw->mac.media_type = cfg.media_type; 2827 hw->rss_size_max = cfg.rss_size_max; 2828 hw->rss_dis_flag = false; 2829 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 2830 hw->mac.phy_addr = cfg.phy_addr; 2831 hw->dcb_info.num_pg = 1; 2832 hw->dcb_info.hw_pfc_map = 0; 2833 2834 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 2835 if (ret) { 2836 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 2837 cfg.default_speed, ret); 2838 return ret; 2839 } 2840 2841 pf->tc_max = cfg.tc_num; 2842 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 2843 PMD_INIT_LOG(WARNING, 2844 "Get TC num(%u) from flash, set TC num to 1", 2845 pf->tc_max); 2846 pf->tc_max = 1; 2847 } 2848 2849 /* Dev does not support DCB */ 2850 if (!hns3_dev_get_support(hw, DCB)) { 2851 pf->tc_max = 1; 2852 pf->pfc_max = 0; 2853 } else 2854 pf->pfc_max = pf->tc_max; 2855 2856 hw->dcb_info.num_tc = 1; 2857 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 2858 hw->tqps_num / hw->dcb_info.num_tc); 2859 hns3_set_bit(hw->hw_tc_map, 0, 1); 2860 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 2861 2862 pf->wanted_umv_size = cfg.umv_space; 2863 2864 return ret; 2865 } 2866 2867 static int 2868 hns3_get_configuration(struct hns3_hw *hw) 2869 { 2870 int ret; 2871 2872 ret = hns3_query_function_status(hw); 2873 if (ret) { 2874 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 2875 return ret; 2876 } 2877 2878 /* Get device capability */ 2879 ret = hns3_get_capability(hw); 2880 if (ret) { 2881 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 2882 return ret; 2883 } 2884 2885 /* Get pf resource */ 2886 ret = hns3_query_pf_resource(hw); 2887 if (ret) { 2888 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 2889 return ret; 2890 } 2891 2892 ret = hns3_get_board_configuration(hw); 2893 if (ret) { 2894 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 2895 return ret; 2896 } 2897 2898 ret = hns3_query_dev_fec_info(hw); 2899 if (ret) 2900 PMD_INIT_LOG(ERR, 2901 "failed to query FEC information, ret = %d", ret); 2902 2903 return ret; 2904 } 2905 2906 static int 2907 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 2908 uint16_t tqp_vid, bool is_pf) 2909 { 2910 struct hns3_tqp_map_cmd *req; 2911 struct hns3_cmd_desc desc; 2912 int ret; 2913 2914 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 2915 2916 req = (struct hns3_tqp_map_cmd *)desc.data; 2917 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 2918 req->tqp_vf = func_id; 2919 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 2920 if (!is_pf) 2921 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 2922 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 2923 2924 ret = hns3_cmd_send(hw, &desc, 1); 2925 if (ret) 2926 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 2927 2928 return ret; 2929 } 2930 2931 static int 2932 hns3_map_tqp(struct hns3_hw *hw) 2933 { 2934 int ret; 2935 int i; 2936 2937 /* 2938 * In current version, VF is not supported when PF is driven by DPDK 2939 * driver, so we assign total tqps_num tqps allocated to this port 2940 * to PF. 2941 */ 2942 for (i = 0; i < hw->total_tqps_num; i++) { 2943 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 2944 if (ret) 2945 return ret; 2946 } 2947 2948 return 0; 2949 } 2950 2951 static int 2952 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 2953 { 2954 struct hns3_config_mac_speed_dup_cmd *req; 2955 struct hns3_cmd_desc desc; 2956 int ret; 2957 2958 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 2959 2960 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 2961 2962 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 2963 2964 switch (speed) { 2965 case RTE_ETH_SPEED_NUM_10M: 2966 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2967 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 2968 break; 2969 case RTE_ETH_SPEED_NUM_100M: 2970 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2971 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 2972 break; 2973 case RTE_ETH_SPEED_NUM_1G: 2974 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2975 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 2976 break; 2977 case RTE_ETH_SPEED_NUM_10G: 2978 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2979 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 2980 break; 2981 case RTE_ETH_SPEED_NUM_25G: 2982 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2983 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 2984 break; 2985 case RTE_ETH_SPEED_NUM_40G: 2986 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2987 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 2988 break; 2989 case RTE_ETH_SPEED_NUM_50G: 2990 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2991 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 2992 break; 2993 case RTE_ETH_SPEED_NUM_100G: 2994 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2995 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 2996 break; 2997 case RTE_ETH_SPEED_NUM_200G: 2998 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 2999 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3000 break; 3001 default: 3002 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3003 return -EINVAL; 3004 } 3005 3006 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3007 3008 ret = hns3_cmd_send(hw, &desc, 1); 3009 if (ret) 3010 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3011 3012 return ret; 3013 } 3014 3015 static int 3016 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3017 { 3018 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3019 struct hns3_pf *pf = &hns->pf; 3020 struct hns3_priv_buf *priv; 3021 uint32_t i, total_size; 3022 3023 total_size = pf->pkt_buf_size; 3024 3025 /* alloc tx buffer for all enabled tc */ 3026 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3027 priv = &buf_alloc->priv_buf[i]; 3028 3029 if (hw->hw_tc_map & BIT(i)) { 3030 if (total_size < pf->tx_buf_size) 3031 return -ENOMEM; 3032 3033 priv->tx_buf_size = pf->tx_buf_size; 3034 } else 3035 priv->tx_buf_size = 0; 3036 3037 total_size -= priv->tx_buf_size; 3038 } 3039 3040 return 0; 3041 } 3042 3043 static int 3044 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3045 { 3046 /* TX buffer size is unit by 128 byte */ 3047 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3048 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3049 struct hns3_tx_buff_alloc_cmd *req; 3050 struct hns3_cmd_desc desc; 3051 uint32_t buf_size; 3052 uint32_t i; 3053 int ret; 3054 3055 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3056 3057 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3058 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3059 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3060 3061 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3062 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3063 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3064 } 3065 3066 ret = hns3_cmd_send(hw, &desc, 1); 3067 if (ret) 3068 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3069 3070 return ret; 3071 } 3072 3073 static int 3074 hns3_get_tc_num(struct hns3_hw *hw) 3075 { 3076 int cnt = 0; 3077 uint8_t i; 3078 3079 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3080 if (hw->hw_tc_map & BIT(i)) 3081 cnt++; 3082 return cnt; 3083 } 3084 3085 static uint32_t 3086 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3087 { 3088 struct hns3_priv_buf *priv; 3089 uint32_t rx_priv = 0; 3090 int i; 3091 3092 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3093 priv = &buf_alloc->priv_buf[i]; 3094 if (priv->enable) 3095 rx_priv += priv->buf_size; 3096 } 3097 return rx_priv; 3098 } 3099 3100 static uint32_t 3101 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3102 { 3103 uint32_t total_tx_size = 0; 3104 uint32_t i; 3105 3106 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3107 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3108 3109 return total_tx_size; 3110 } 3111 3112 /* Get the number of pfc enabled TCs, which have private buffer */ 3113 static int 3114 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3115 { 3116 struct hns3_priv_buf *priv; 3117 int cnt = 0; 3118 uint8_t i; 3119 3120 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3121 priv = &buf_alloc->priv_buf[i]; 3122 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3123 cnt++; 3124 } 3125 3126 return cnt; 3127 } 3128 3129 /* Get the number of pfc disabled TCs, which have private buffer */ 3130 static int 3131 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3132 struct hns3_pkt_buf_alloc *buf_alloc) 3133 { 3134 struct hns3_priv_buf *priv; 3135 int cnt = 0; 3136 uint8_t i; 3137 3138 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3139 priv = &buf_alloc->priv_buf[i]; 3140 if (hw->hw_tc_map & BIT(i) && 3141 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3142 cnt++; 3143 } 3144 3145 return cnt; 3146 } 3147 3148 static bool 3149 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3150 uint32_t rx_all) 3151 { 3152 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3153 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3154 struct hns3_pf *pf = &hns->pf; 3155 uint32_t shared_buf, aligned_mps; 3156 uint32_t rx_priv; 3157 uint8_t tc_num; 3158 uint8_t i; 3159 3160 tc_num = hns3_get_tc_num(hw); 3161 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3162 3163 if (hns3_dev_get_support(hw, DCB)) 3164 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3165 pf->dv_buf_size; 3166 else 3167 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3168 + pf->dv_buf_size; 3169 3170 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3171 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3172 HNS3_BUF_SIZE_UNIT); 3173 3174 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3175 if (rx_all < rx_priv + shared_std) 3176 return false; 3177 3178 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3179 buf_alloc->s_buf.buf_size = shared_buf; 3180 if (hns3_dev_get_support(hw, DCB)) { 3181 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3182 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3183 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3184 HNS3_BUF_SIZE_UNIT); 3185 } else { 3186 buf_alloc->s_buf.self.high = 3187 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3188 buf_alloc->s_buf.self.low = aligned_mps; 3189 } 3190 3191 if (hns3_dev_get_support(hw, DCB)) { 3192 hi_thrd = shared_buf - pf->dv_buf_size; 3193 3194 if (tc_num <= NEED_RESERVE_TC_NUM) 3195 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3196 BUF_MAX_PERCENT; 3197 3198 if (tc_num) 3199 hi_thrd = hi_thrd / tc_num; 3200 3201 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3202 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3203 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3204 } else { 3205 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3206 lo_thrd = aligned_mps; 3207 } 3208 3209 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3210 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3211 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3212 } 3213 3214 return true; 3215 } 3216 3217 static bool 3218 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3219 struct hns3_pkt_buf_alloc *buf_alloc) 3220 { 3221 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3222 struct hns3_pf *pf = &hns->pf; 3223 struct hns3_priv_buf *priv; 3224 uint32_t aligned_mps; 3225 uint32_t rx_all; 3226 uint8_t i; 3227 3228 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3229 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3230 3231 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3232 priv = &buf_alloc->priv_buf[i]; 3233 3234 priv->enable = 0; 3235 priv->wl.low = 0; 3236 priv->wl.high = 0; 3237 priv->buf_size = 0; 3238 3239 if (!(hw->hw_tc_map & BIT(i))) 3240 continue; 3241 3242 priv->enable = 1; 3243 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3244 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3245 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3246 HNS3_BUF_SIZE_UNIT); 3247 } else { 3248 priv->wl.low = 0; 3249 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3250 aligned_mps; 3251 } 3252 3253 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3254 } 3255 3256 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3257 } 3258 3259 static bool 3260 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3261 struct hns3_pkt_buf_alloc *buf_alloc) 3262 { 3263 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3264 struct hns3_pf *pf = &hns->pf; 3265 struct hns3_priv_buf *priv; 3266 int no_pfc_priv_num; 3267 uint32_t rx_all; 3268 uint8_t mask; 3269 int i; 3270 3271 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3272 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3273 3274 /* let the last to be cleared first */ 3275 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3276 priv = &buf_alloc->priv_buf[i]; 3277 mask = BIT((uint8_t)i); 3278 if (hw->hw_tc_map & mask && 3279 !(hw->dcb_info.hw_pfc_map & mask)) { 3280 /* Clear the no pfc TC private buffer */ 3281 priv->wl.low = 0; 3282 priv->wl.high = 0; 3283 priv->buf_size = 0; 3284 priv->enable = 0; 3285 no_pfc_priv_num--; 3286 } 3287 3288 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3289 no_pfc_priv_num == 0) 3290 break; 3291 } 3292 3293 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3294 } 3295 3296 static bool 3297 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3298 struct hns3_pkt_buf_alloc *buf_alloc) 3299 { 3300 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3301 struct hns3_pf *pf = &hns->pf; 3302 struct hns3_priv_buf *priv; 3303 uint32_t rx_all; 3304 int pfc_priv_num; 3305 uint8_t mask; 3306 int i; 3307 3308 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3309 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3310 3311 /* let the last to be cleared first */ 3312 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3313 priv = &buf_alloc->priv_buf[i]; 3314 mask = BIT((uint8_t)i); 3315 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3316 /* Reduce the number of pfc TC with private buffer */ 3317 priv->wl.low = 0; 3318 priv->enable = 0; 3319 priv->wl.high = 0; 3320 priv->buf_size = 0; 3321 pfc_priv_num--; 3322 } 3323 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3324 pfc_priv_num == 0) 3325 break; 3326 } 3327 3328 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3329 } 3330 3331 static bool 3332 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3333 struct hns3_pkt_buf_alloc *buf_alloc) 3334 { 3335 #define COMPENSATE_BUFFER 0x3C00 3336 #define COMPENSATE_HALF_MPS_NUM 5 3337 #define PRIV_WL_GAP 0x1800 3338 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3339 struct hns3_pf *pf = &hns->pf; 3340 uint32_t tc_num = hns3_get_tc_num(hw); 3341 uint32_t half_mps = pf->mps >> 1; 3342 struct hns3_priv_buf *priv; 3343 uint32_t min_rx_priv; 3344 uint32_t rx_priv; 3345 uint8_t i; 3346 3347 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3348 if (tc_num) 3349 rx_priv = rx_priv / tc_num; 3350 3351 if (tc_num <= NEED_RESERVE_TC_NUM) 3352 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3353 3354 /* 3355 * Minimum value of private buffer in rx direction (min_rx_priv) is 3356 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3357 * buffer if rx_priv is greater than min_rx_priv. 3358 */ 3359 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3360 COMPENSATE_HALF_MPS_NUM * half_mps; 3361 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3362 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3363 if (rx_priv < min_rx_priv) 3364 return false; 3365 3366 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3367 priv = &buf_alloc->priv_buf[i]; 3368 priv->enable = 0; 3369 priv->wl.low = 0; 3370 priv->wl.high = 0; 3371 priv->buf_size = 0; 3372 3373 if (!(hw->hw_tc_map & BIT(i))) 3374 continue; 3375 3376 priv->enable = 1; 3377 priv->buf_size = rx_priv; 3378 priv->wl.high = rx_priv - pf->dv_buf_size; 3379 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3380 } 3381 3382 buf_alloc->s_buf.buf_size = 0; 3383 3384 return true; 3385 } 3386 3387 /* 3388 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3389 * @hw: pointer to struct hns3_hw 3390 * @buf_alloc: pointer to buffer calculation data 3391 * @return: 0: calculate successful, negative: fail 3392 */ 3393 static int 3394 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3395 { 3396 /* When DCB is not supported, rx private buffer is not allocated. */ 3397 if (!hns3_dev_get_support(hw, DCB)) { 3398 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3399 struct hns3_pf *pf = &hns->pf; 3400 uint32_t rx_all = pf->pkt_buf_size; 3401 3402 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3403 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3404 return -ENOMEM; 3405 3406 return 0; 3407 } 3408 3409 /* 3410 * Try to allocate privated packet buffer for all TCs without share 3411 * buffer. 3412 */ 3413 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3414 return 0; 3415 3416 /* 3417 * Try to allocate privated packet buffer for all TCs with share 3418 * buffer. 3419 */ 3420 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3421 return 0; 3422 3423 /* 3424 * For different application scenes, the enabled port number, TC number 3425 * and no_drop TC number are different. In order to obtain the better 3426 * performance, software could allocate the buffer size and configure 3427 * the waterline by trying to decrease the private buffer size according 3428 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 3429 * enabled tc. 3430 */ 3431 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3432 return 0; 3433 3434 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3435 return 0; 3436 3437 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3438 return 0; 3439 3440 return -ENOMEM; 3441 } 3442 3443 static int 3444 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3445 { 3446 struct hns3_rx_priv_buff_cmd *req; 3447 struct hns3_cmd_desc desc; 3448 uint32_t buf_size; 3449 int ret; 3450 int i; 3451 3452 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3453 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3454 3455 /* Alloc private buffer TCs */ 3456 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3457 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3458 3459 req->buf_num[i] = 3460 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3461 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3462 } 3463 3464 buf_size = buf_alloc->s_buf.buf_size; 3465 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3466 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3467 3468 ret = hns3_cmd_send(hw, &desc, 1); 3469 if (ret) 3470 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3471 3472 return ret; 3473 } 3474 3475 static int 3476 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3477 { 3478 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3479 struct hns3_rx_priv_wl_buf *req; 3480 struct hns3_priv_buf *priv; 3481 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3482 int i, j; 3483 int ret; 3484 3485 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3486 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3487 false); 3488 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3489 3490 /* The first descriptor set the NEXT bit to 1 */ 3491 if (i == 0) 3492 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3493 else 3494 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3495 3496 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3497 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3498 3499 priv = &buf_alloc->priv_buf[idx]; 3500 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3501 HNS3_BUF_UNIT_S); 3502 req->tc_wl[j].high |= 3503 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3504 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3505 HNS3_BUF_UNIT_S); 3506 req->tc_wl[j].low |= 3507 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3508 } 3509 } 3510 3511 /* Send 2 descriptor at one time */ 3512 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 3513 if (ret) 3514 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 3515 ret); 3516 return ret; 3517 } 3518 3519 static int 3520 hns3_common_thrd_config(struct hns3_hw *hw, 3521 struct hns3_pkt_buf_alloc *buf_alloc) 3522 { 3523 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 3524 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 3525 struct hns3_rx_com_thrd *req; 3526 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 3527 struct hns3_tc_thrd *tc; 3528 int tc_idx; 3529 int i, j; 3530 int ret; 3531 3532 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 3533 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 3534 false); 3535 req = (struct hns3_rx_com_thrd *)&desc[i].data; 3536 3537 /* The first descriptor set the NEXT bit to 1 */ 3538 if (i == 0) 3539 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3540 else 3541 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3542 3543 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3544 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 3545 tc = &s_buf->tc_thrd[tc_idx]; 3546 3547 req->com_thrd[j].high = 3548 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 3549 req->com_thrd[j].high |= 3550 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3551 req->com_thrd[j].low = 3552 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 3553 req->com_thrd[j].low |= 3554 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3555 } 3556 } 3557 3558 /* Send 2 descriptors at one time */ 3559 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 3560 if (ret) 3561 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 3562 3563 return ret; 3564 } 3565 3566 static int 3567 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3568 { 3569 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 3570 struct hns3_rx_com_wl *req; 3571 struct hns3_cmd_desc desc; 3572 int ret; 3573 3574 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 3575 3576 req = (struct hns3_rx_com_wl *)desc.data; 3577 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 3578 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3579 3580 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 3581 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3582 3583 ret = hns3_cmd_send(hw, &desc, 1); 3584 if (ret) 3585 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 3586 3587 return ret; 3588 } 3589 3590 int 3591 hns3_buffer_alloc(struct hns3_hw *hw) 3592 { 3593 struct hns3_pkt_buf_alloc pkt_buf; 3594 int ret; 3595 3596 memset(&pkt_buf, 0, sizeof(pkt_buf)); 3597 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 3598 if (ret) { 3599 PMD_INIT_LOG(ERR, 3600 "could not calc tx buffer size for all TCs %d", 3601 ret); 3602 return ret; 3603 } 3604 3605 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 3606 if (ret) { 3607 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 3608 return ret; 3609 } 3610 3611 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 3612 if (ret) { 3613 PMD_INIT_LOG(ERR, 3614 "could not calc rx priv buffer size for all TCs %d", 3615 ret); 3616 return ret; 3617 } 3618 3619 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 3620 if (ret) { 3621 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 3622 return ret; 3623 } 3624 3625 if (hns3_dev_get_support(hw, DCB)) { 3626 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 3627 if (ret) { 3628 PMD_INIT_LOG(ERR, 3629 "could not configure rx private waterline %d", 3630 ret); 3631 return ret; 3632 } 3633 3634 ret = hns3_common_thrd_config(hw, &pkt_buf); 3635 if (ret) { 3636 PMD_INIT_LOG(ERR, 3637 "could not configure common threshold %d", 3638 ret); 3639 return ret; 3640 } 3641 } 3642 3643 ret = hns3_common_wl_config(hw, &pkt_buf); 3644 if (ret) 3645 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 3646 ret); 3647 3648 return ret; 3649 } 3650 3651 static int 3652 hns3_mac_init(struct hns3_hw *hw) 3653 { 3654 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3655 struct hns3_mac *mac = &hw->mac; 3656 struct hns3_pf *pf = &hns->pf; 3657 int ret; 3658 3659 pf->support_sfp_query = true; 3660 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 3661 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 3662 if (ret) { 3663 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 3664 return ret; 3665 } 3666 3667 mac->link_status = RTE_ETH_LINK_DOWN; 3668 3669 return hns3_config_mtu(hw, pf->mps); 3670 } 3671 3672 static int 3673 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 3674 { 3675 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 3676 #define HNS3_ETHERTYPE_ALREADY_ADD 1 3677 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 3678 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 3679 int return_status; 3680 3681 if (cmdq_resp) { 3682 PMD_INIT_LOG(ERR, 3683 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 3684 cmdq_resp); 3685 return -EIO; 3686 } 3687 3688 switch (resp_code) { 3689 case HNS3_ETHERTYPE_SUCCESS_ADD: 3690 case HNS3_ETHERTYPE_ALREADY_ADD: 3691 return_status = 0; 3692 break; 3693 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 3694 PMD_INIT_LOG(ERR, 3695 "add mac ethertype failed for manager table overflow."); 3696 return_status = -EIO; 3697 break; 3698 case HNS3_ETHERTYPE_KEY_CONFLICT: 3699 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 3700 return_status = -EIO; 3701 break; 3702 default: 3703 PMD_INIT_LOG(ERR, 3704 "add mac ethertype failed for undefined, code=%u.", 3705 resp_code); 3706 return_status = -EIO; 3707 break; 3708 } 3709 3710 return return_status; 3711 } 3712 3713 static int 3714 hns3_add_mgr_tbl(struct hns3_hw *hw, 3715 const struct hns3_mac_mgr_tbl_entry_cmd *req) 3716 { 3717 struct hns3_cmd_desc desc; 3718 uint8_t resp_code; 3719 uint16_t retval; 3720 int ret; 3721 3722 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 3723 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 3724 3725 ret = hns3_cmd_send(hw, &desc, 1); 3726 if (ret) { 3727 PMD_INIT_LOG(ERR, 3728 "add mac ethertype failed for cmd_send, ret =%d.", 3729 ret); 3730 return ret; 3731 } 3732 3733 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 3734 retval = rte_le_to_cpu_16(desc.retval); 3735 3736 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 3737 } 3738 3739 static void 3740 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 3741 int *table_item_num) 3742 { 3743 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 3744 3745 /* 3746 * In current version, we add one item in management table as below: 3747 * 0x0180C200000E -- LLDP MC address 3748 */ 3749 tbl = mgr_table; 3750 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 3751 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 3752 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 3753 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 3754 tbl->i_port_bitmap = 0x1; 3755 *table_item_num = 1; 3756 } 3757 3758 static int 3759 hns3_init_mgr_tbl(struct hns3_hw *hw) 3760 { 3761 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 3762 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 3763 int table_item_num; 3764 int ret; 3765 int i; 3766 3767 memset(mgr_table, 0, sizeof(mgr_table)); 3768 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 3769 for (i = 0; i < table_item_num; i++) { 3770 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 3771 if (ret) { 3772 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 3773 ret); 3774 return ret; 3775 } 3776 } 3777 3778 return 0; 3779 } 3780 3781 static void 3782 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 3783 bool en_mc, bool en_bc, int vport_id) 3784 { 3785 if (!param) 3786 return; 3787 3788 memset(param, 0, sizeof(struct hns3_promisc_param)); 3789 if (en_uc) 3790 param->enable = HNS3_PROMISC_EN_UC; 3791 if (en_mc) 3792 param->enable |= HNS3_PROMISC_EN_MC; 3793 if (en_bc) 3794 param->enable |= HNS3_PROMISC_EN_BC; 3795 param->vf_id = vport_id; 3796 } 3797 3798 static int 3799 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 3800 { 3801 struct hns3_promisc_cfg_cmd *req; 3802 struct hns3_cmd_desc desc; 3803 int ret; 3804 3805 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 3806 3807 req = (struct hns3_promisc_cfg_cmd *)desc.data; 3808 req->vf_id = param->vf_id; 3809 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 3810 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 3811 3812 ret = hns3_cmd_send(hw, &desc, 1); 3813 if (ret) 3814 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 3815 3816 return ret; 3817 } 3818 3819 static int 3820 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 3821 { 3822 struct hns3_promisc_param param; 3823 bool en_bc_pmc = true; 3824 uint8_t vf_id; 3825 3826 /* 3827 * In current version VF is not supported when PF is driven by DPDK 3828 * driver, just need to configure parameters for PF vport. 3829 */ 3830 vf_id = HNS3_PF_FUNC_ID; 3831 3832 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 3833 return hns3_cmd_set_promisc_mode(hw, ¶m); 3834 } 3835 3836 static int 3837 hns3_promisc_init(struct hns3_hw *hw) 3838 { 3839 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3840 struct hns3_pf *pf = &hns->pf; 3841 struct hns3_promisc_param param; 3842 uint16_t func_id; 3843 int ret; 3844 3845 ret = hns3_set_promisc_mode(hw, false, false); 3846 if (ret) { 3847 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 3848 return ret; 3849 } 3850 3851 /* 3852 * In current version VFs are not supported when PF is driven by DPDK 3853 * driver. After PF has been taken over by DPDK, the original VF will 3854 * be invalid. So, there is a possibility of entry residues. It should 3855 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 3856 * during init. 3857 */ 3858 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 3859 hns3_promisc_param_init(¶m, false, false, false, func_id); 3860 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3861 if (ret) { 3862 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 3863 " ret = %d", func_id, ret); 3864 return ret; 3865 } 3866 } 3867 3868 return 0; 3869 } 3870 3871 static void 3872 hns3_promisc_uninit(struct hns3_hw *hw) 3873 { 3874 struct hns3_promisc_param param; 3875 uint16_t func_id; 3876 int ret; 3877 3878 func_id = HNS3_PF_FUNC_ID; 3879 3880 /* 3881 * In current version VFs are not supported when PF is driven by 3882 * DPDK driver, and VFs' promisc mode status has been cleared during 3883 * init and their status will not change. So just clear PF's promisc 3884 * mode status during uninit. 3885 */ 3886 hns3_promisc_param_init(¶m, false, false, false, func_id); 3887 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 3888 if (ret) 3889 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 3890 " uninit, ret = %d", ret); 3891 } 3892 3893 static int 3894 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 3895 { 3896 bool allmulti = dev->data->all_multicast ? true : false; 3897 struct hns3_adapter *hns = dev->data->dev_private; 3898 struct hns3_hw *hw = &hns->hw; 3899 uint64_t offloads; 3900 int err; 3901 int ret; 3902 3903 rte_spinlock_lock(&hw->lock); 3904 ret = hns3_set_promisc_mode(hw, true, true); 3905 if (ret) { 3906 rte_spinlock_unlock(&hw->lock); 3907 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 3908 ret); 3909 return ret; 3910 } 3911 3912 /* 3913 * When promiscuous mode was enabled, disable the vlan filter to let 3914 * all packets coming in in the receiving direction. 3915 */ 3916 offloads = dev->data->dev_conf.rxmode.offloads; 3917 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3918 ret = hns3_enable_vlan_filter(hns, false); 3919 if (ret) { 3920 hns3_err(hw, "failed to enable promiscuous mode due to " 3921 "failure to disable vlan filter, ret = %d", 3922 ret); 3923 err = hns3_set_promisc_mode(hw, false, allmulti); 3924 if (err) 3925 hns3_err(hw, "failed to restore promiscuous " 3926 "status after disable vlan filter " 3927 "failed during enabling promiscuous " 3928 "mode, ret = %d", ret); 3929 } 3930 } 3931 3932 rte_spinlock_unlock(&hw->lock); 3933 3934 return ret; 3935 } 3936 3937 static int 3938 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 3939 { 3940 bool allmulti = dev->data->all_multicast ? true : false; 3941 struct hns3_adapter *hns = dev->data->dev_private; 3942 struct hns3_hw *hw = &hns->hw; 3943 uint64_t offloads; 3944 int err; 3945 int ret; 3946 3947 /* If now in all_multicast mode, must remain in all_multicast mode. */ 3948 rte_spinlock_lock(&hw->lock); 3949 ret = hns3_set_promisc_mode(hw, false, allmulti); 3950 if (ret) { 3951 rte_spinlock_unlock(&hw->lock); 3952 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 3953 ret); 3954 return ret; 3955 } 3956 /* when promiscuous mode was disabled, restore the vlan filter status */ 3957 offloads = dev->data->dev_conf.rxmode.offloads; 3958 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3959 ret = hns3_enable_vlan_filter(hns, true); 3960 if (ret) { 3961 hns3_err(hw, "failed to disable promiscuous mode due to" 3962 " failure to restore vlan filter, ret = %d", 3963 ret); 3964 err = hns3_set_promisc_mode(hw, true, true); 3965 if (err) 3966 hns3_err(hw, "failed to restore promiscuous " 3967 "status after enabling vlan filter " 3968 "failed during disabling promiscuous " 3969 "mode, ret = %d", ret); 3970 } 3971 } 3972 rte_spinlock_unlock(&hw->lock); 3973 3974 return ret; 3975 } 3976 3977 static int 3978 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 3979 { 3980 struct hns3_adapter *hns = dev->data->dev_private; 3981 struct hns3_hw *hw = &hns->hw; 3982 int ret; 3983 3984 if (dev->data->promiscuous) 3985 return 0; 3986 3987 rte_spinlock_lock(&hw->lock); 3988 ret = hns3_set_promisc_mode(hw, false, true); 3989 rte_spinlock_unlock(&hw->lock); 3990 if (ret) 3991 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 3992 ret); 3993 3994 return ret; 3995 } 3996 3997 static int 3998 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 3999 { 4000 struct hns3_adapter *hns = dev->data->dev_private; 4001 struct hns3_hw *hw = &hns->hw; 4002 int ret; 4003 4004 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4005 if (dev->data->promiscuous) 4006 return 0; 4007 4008 rte_spinlock_lock(&hw->lock); 4009 ret = hns3_set_promisc_mode(hw, false, false); 4010 rte_spinlock_unlock(&hw->lock); 4011 if (ret) 4012 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4013 ret); 4014 4015 return ret; 4016 } 4017 4018 static int 4019 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4020 { 4021 struct hns3_hw *hw = &hns->hw; 4022 bool allmulti = hw->data->all_multicast ? true : false; 4023 int ret; 4024 4025 if (hw->data->promiscuous) { 4026 ret = hns3_set_promisc_mode(hw, true, true); 4027 if (ret) 4028 hns3_err(hw, "failed to restore promiscuous mode, " 4029 "ret = %d", ret); 4030 return ret; 4031 } 4032 4033 ret = hns3_set_promisc_mode(hw, false, allmulti); 4034 if (ret) 4035 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4036 ret); 4037 return ret; 4038 } 4039 4040 static int 4041 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4042 { 4043 struct hns3_sfp_info_cmd *resp; 4044 struct hns3_cmd_desc desc; 4045 int ret; 4046 4047 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4048 resp = (struct hns3_sfp_info_cmd *)desc.data; 4049 resp->query_type = HNS3_ACTIVE_QUERY; 4050 4051 ret = hns3_cmd_send(hw, &desc, 1); 4052 if (ret == -EOPNOTSUPP) { 4053 hns3_warn(hw, "firmware does not support get SFP info," 4054 " ret = %d.", ret); 4055 return ret; 4056 } else if (ret) { 4057 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4058 return ret; 4059 } 4060 4061 /* 4062 * In some case, the speed of MAC obtained from firmware may be 0, it 4063 * shouldn't be set to mac->speed. 4064 */ 4065 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4066 return 0; 4067 4068 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4069 /* 4070 * if resp->supported_speed is 0, it means it's an old version 4071 * firmware, do not update these params. 4072 */ 4073 if (resp->supported_speed) { 4074 mac_info->query_type = HNS3_ACTIVE_QUERY; 4075 mac_info->supported_speed = 4076 rte_le_to_cpu_32(resp->supported_speed); 4077 mac_info->support_autoneg = resp->autoneg_ability; 4078 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED 4079 : RTE_ETH_LINK_AUTONEG; 4080 } else { 4081 mac_info->query_type = HNS3_DEFAULT_QUERY; 4082 } 4083 4084 return 0; 4085 } 4086 4087 static uint8_t 4088 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4089 { 4090 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M)) 4091 duplex = RTE_ETH_LINK_FULL_DUPLEX; 4092 4093 return duplex; 4094 } 4095 4096 static int 4097 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4098 { 4099 struct hns3_mac *mac = &hw->mac; 4100 int ret; 4101 4102 duplex = hns3_check_speed_dup(duplex, speed); 4103 if (mac->link_speed == speed && mac->link_duplex == duplex) 4104 return 0; 4105 4106 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4107 if (ret) 4108 return ret; 4109 4110 ret = hns3_port_shaper_update(hw, speed); 4111 if (ret) 4112 return ret; 4113 4114 mac->link_speed = speed; 4115 mac->link_duplex = duplex; 4116 4117 return 0; 4118 } 4119 4120 static int 4121 hns3_update_fiber_link_info(struct hns3_hw *hw) 4122 { 4123 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4124 struct hns3_mac *mac = &hw->mac; 4125 struct hns3_mac mac_info; 4126 int ret; 4127 4128 /* If firmware do not support get SFP/qSFP speed, return directly */ 4129 if (!pf->support_sfp_query) 4130 return 0; 4131 4132 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4133 ret = hns3_get_sfp_info(hw, &mac_info); 4134 if (ret == -EOPNOTSUPP) { 4135 pf->support_sfp_query = false; 4136 return ret; 4137 } else if (ret) 4138 return ret; 4139 4140 /* Do nothing if no SFP */ 4141 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE) 4142 return 0; 4143 4144 /* 4145 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4146 * to reconfigure the speed of MAC. Otherwise, it indicates 4147 * that the current firmware only supports to obtain the 4148 * speed of the SFP, and the speed of MAC needs to reconfigure. 4149 */ 4150 mac->query_type = mac_info.query_type; 4151 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4152 if (mac_info.link_speed != mac->link_speed) { 4153 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4154 if (ret) 4155 return ret; 4156 } 4157 4158 mac->link_speed = mac_info.link_speed; 4159 mac->supported_speed = mac_info.supported_speed; 4160 mac->support_autoneg = mac_info.support_autoneg; 4161 mac->link_autoneg = mac_info.link_autoneg; 4162 4163 return 0; 4164 } 4165 4166 /* Config full duplex for SFP */ 4167 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4168 RTE_ETH_LINK_FULL_DUPLEX); 4169 } 4170 4171 static void 4172 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4173 { 4174 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4175 4176 struct hns3_phy_params_bd0_cmd *req; 4177 uint32_t supported; 4178 4179 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4180 mac->link_speed = rte_le_to_cpu_32(req->speed); 4181 mac->link_duplex = hns3_get_bit(req->duplex, 4182 HNS3_PHY_DUPLEX_CFG_B); 4183 mac->link_autoneg = hns3_get_bit(req->autoneg, 4184 HNS3_PHY_AUTONEG_CFG_B); 4185 mac->advertising = rte_le_to_cpu_32(req->advertising); 4186 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4187 supported = rte_le_to_cpu_32(req->supported); 4188 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4189 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4190 } 4191 4192 static int 4193 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4194 { 4195 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4196 uint16_t i; 4197 int ret; 4198 4199 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4200 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4201 true); 4202 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4203 } 4204 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4205 4206 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4207 if (ret) { 4208 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4209 return ret; 4210 } 4211 4212 hns3_parse_copper_phy_params(desc, mac); 4213 4214 return 0; 4215 } 4216 4217 static int 4218 hns3_update_copper_link_info(struct hns3_hw *hw) 4219 { 4220 struct hns3_mac *mac = &hw->mac; 4221 struct hns3_mac mac_info; 4222 int ret; 4223 4224 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4225 ret = hns3_get_copper_phy_params(hw, &mac_info); 4226 if (ret) 4227 return ret; 4228 4229 if (mac_info.link_speed != mac->link_speed) { 4230 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4231 if (ret) 4232 return ret; 4233 } 4234 4235 mac->link_speed = mac_info.link_speed; 4236 mac->link_duplex = mac_info.link_duplex; 4237 mac->link_autoneg = mac_info.link_autoneg; 4238 mac->supported_speed = mac_info.supported_speed; 4239 mac->advertising = mac_info.advertising; 4240 mac->lp_advertising = mac_info.lp_advertising; 4241 mac->support_autoneg = mac_info.support_autoneg; 4242 4243 return 0; 4244 } 4245 4246 static int 4247 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4248 { 4249 struct hns3_adapter *hns = eth_dev->data->dev_private; 4250 struct hns3_hw *hw = &hns->hw; 4251 int ret = 0; 4252 4253 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4254 ret = hns3_update_copper_link_info(hw); 4255 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4256 ret = hns3_update_fiber_link_info(hw); 4257 4258 return ret; 4259 } 4260 4261 static int 4262 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4263 { 4264 struct hns3_config_mac_mode_cmd *req; 4265 struct hns3_cmd_desc desc; 4266 uint32_t loop_en = 0; 4267 uint8_t val = 0; 4268 int ret; 4269 4270 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4271 4272 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4273 if (enable) 4274 val = 1; 4275 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4276 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4277 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4278 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4279 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4280 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4281 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4282 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4283 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4284 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4285 4286 /* 4287 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4288 * when receiving frames. Otherwise, CRC will be stripped. 4289 */ 4290 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 4291 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4292 else 4293 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4294 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4295 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4296 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4297 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4298 4299 ret = hns3_cmd_send(hw, &desc, 1); 4300 if (ret) 4301 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4302 4303 return ret; 4304 } 4305 4306 static int 4307 hns3_get_mac_link_status(struct hns3_hw *hw) 4308 { 4309 struct hns3_link_status_cmd *req; 4310 struct hns3_cmd_desc desc; 4311 int link_status; 4312 int ret; 4313 4314 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4315 ret = hns3_cmd_send(hw, &desc, 1); 4316 if (ret) { 4317 hns3_err(hw, "get link status cmd failed %d", ret); 4318 return RTE_ETH_LINK_DOWN; 4319 } 4320 4321 req = (struct hns3_link_status_cmd *)desc.data; 4322 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4323 4324 return !!link_status; 4325 } 4326 4327 static bool 4328 hns3_update_link_status(struct hns3_hw *hw) 4329 { 4330 int state; 4331 4332 state = hns3_get_mac_link_status(hw); 4333 if (state != hw->mac.link_status) { 4334 hw->mac.link_status = state; 4335 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4336 return true; 4337 } 4338 4339 return false; 4340 } 4341 4342 void 4343 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4344 { 4345 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4346 struct rte_eth_link new_link; 4347 int ret; 4348 4349 if (query) 4350 hns3_update_port_link_info(dev); 4351 4352 memset(&new_link, 0, sizeof(new_link)); 4353 hns3_setup_linkstatus(dev, &new_link); 4354 4355 ret = rte_eth_linkstatus_set(dev, &new_link); 4356 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4357 hns3_start_report_lse(dev); 4358 } 4359 4360 static void 4361 hns3_service_handler(void *param) 4362 { 4363 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4364 struct hns3_adapter *hns = eth_dev->data->dev_private; 4365 struct hns3_hw *hw = &hns->hw; 4366 4367 if (!hns3_is_reset_pending(hns)) 4368 hns3_update_linkstatus_and_event(hw, true); 4369 else 4370 hns3_warn(hw, "Cancel the query when reset is pending"); 4371 4372 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4373 } 4374 4375 static int 4376 hns3_init_hardware(struct hns3_adapter *hns) 4377 { 4378 struct hns3_hw *hw = &hns->hw; 4379 int ret; 4380 4381 /* 4382 * All queue-related HW operations must be performed after the TCAM 4383 * table is configured. 4384 */ 4385 ret = hns3_map_tqp(hw); 4386 if (ret) { 4387 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4388 return ret; 4389 } 4390 4391 ret = hns3_init_umv_space(hw); 4392 if (ret) { 4393 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4394 return ret; 4395 } 4396 4397 ret = hns3_mac_init(hw); 4398 if (ret) { 4399 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4400 goto err_mac_init; 4401 } 4402 4403 ret = hns3_init_mgr_tbl(hw); 4404 if (ret) { 4405 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4406 goto err_mac_init; 4407 } 4408 4409 ret = hns3_promisc_init(hw); 4410 if (ret) { 4411 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4412 ret); 4413 goto err_mac_init; 4414 } 4415 4416 ret = hns3_init_vlan_config(hns); 4417 if (ret) { 4418 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4419 goto err_mac_init; 4420 } 4421 4422 ret = hns3_dcb_init(hw); 4423 if (ret) { 4424 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4425 goto err_mac_init; 4426 } 4427 4428 ret = hns3_init_fd_config(hns); 4429 if (ret) { 4430 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4431 goto err_mac_init; 4432 } 4433 4434 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4435 if (ret) { 4436 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4437 goto err_mac_init; 4438 } 4439 4440 ret = hns3_config_gro(hw, false); 4441 if (ret) { 4442 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4443 goto err_mac_init; 4444 } 4445 4446 /* 4447 * In the initialization clearing the all hardware mapping relationship 4448 * configurations between queues and interrupt vectors is needed, so 4449 * some error caused by the residual configurations, such as the 4450 * unexpected interrupt, can be avoid. 4451 */ 4452 ret = hns3_init_ring_with_vector(hw); 4453 if (ret) { 4454 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4455 goto err_mac_init; 4456 } 4457 4458 return 0; 4459 4460 err_mac_init: 4461 hns3_uninit_umv_space(hw); 4462 return ret; 4463 } 4464 4465 static int 4466 hns3_clear_hw(struct hns3_hw *hw) 4467 { 4468 struct hns3_cmd_desc desc; 4469 int ret; 4470 4471 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4472 4473 ret = hns3_cmd_send(hw, &desc, 1); 4474 if (ret && ret != -EOPNOTSUPP) 4475 return ret; 4476 4477 return 0; 4478 } 4479 4480 static void 4481 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4482 { 4483 uint32_t val; 4484 4485 /* 4486 * The new firmware support report more hardware error types by 4487 * msix mode. These errors are defined as RAS errors in hardware 4488 * and belong to a different type from the MSI-x errors processed 4489 * by the network driver. 4490 * 4491 * Network driver should open the new error report on initialization. 4492 */ 4493 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4494 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4495 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4496 } 4497 4498 static uint32_t 4499 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 4500 { 4501 struct hns3_mac *mac = &hw->mac; 4502 4503 switch (mac->link_speed) { 4504 case RTE_ETH_SPEED_NUM_1G: 4505 return HNS3_FIBER_LINK_SPEED_1G_BIT; 4506 case RTE_ETH_SPEED_NUM_10G: 4507 return HNS3_FIBER_LINK_SPEED_10G_BIT; 4508 case RTE_ETH_SPEED_NUM_25G: 4509 return HNS3_FIBER_LINK_SPEED_25G_BIT; 4510 case RTE_ETH_SPEED_NUM_40G: 4511 return HNS3_FIBER_LINK_SPEED_40G_BIT; 4512 case RTE_ETH_SPEED_NUM_50G: 4513 return HNS3_FIBER_LINK_SPEED_50G_BIT; 4514 case RTE_ETH_SPEED_NUM_100G: 4515 return HNS3_FIBER_LINK_SPEED_100G_BIT; 4516 case RTE_ETH_SPEED_NUM_200G: 4517 return HNS3_FIBER_LINK_SPEED_200G_BIT; 4518 default: 4519 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 4520 return 0; 4521 } 4522 } 4523 4524 /* 4525 * Validity of supported_speed for fiber and copper media type can be 4526 * guaranteed by the following policy: 4527 * Copper: 4528 * Although the initialization of the phy in the firmware may not be 4529 * completed, the firmware can guarantees that the supported_speed is 4530 * an valid value. 4531 * Firber: 4532 * If the version of firmware supports the active query way of the 4533 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 4534 * through it. If unsupported, use the SFP's speed as the value of the 4535 * supported_speed. 4536 */ 4537 static int 4538 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 4539 { 4540 struct hns3_adapter *hns = eth_dev->data->dev_private; 4541 struct hns3_hw *hw = &hns->hw; 4542 struct hns3_mac *mac = &hw->mac; 4543 int ret; 4544 4545 ret = hns3_update_link_info(eth_dev); 4546 if (ret) 4547 return ret; 4548 4549 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 4550 /* 4551 * Some firmware does not support the report of supported_speed, 4552 * and only report the effective speed of SFP. In this case, it 4553 * is necessary to use the SFP's speed as the supported_speed. 4554 */ 4555 if (mac->supported_speed == 0) 4556 mac->supported_speed = 4557 hns3_set_firber_default_support_speed(hw); 4558 } 4559 4560 return 0; 4561 } 4562 4563 static void 4564 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 4565 { 4566 struct hns3_mac *mac = &hns->hw.mac; 4567 4568 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 4569 hns->pf.support_fc_autoneg = true; 4570 return; 4571 } 4572 4573 /* 4574 * Flow control auto-negotiation requires the cooperation of the driver 4575 * and firmware. Currently, the optical port does not support flow 4576 * control auto-negotiation. 4577 */ 4578 hns->pf.support_fc_autoneg = false; 4579 } 4580 4581 static int 4582 hns3_init_pf(struct rte_eth_dev *eth_dev) 4583 { 4584 struct rte_device *dev = eth_dev->device; 4585 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4586 struct hns3_adapter *hns = eth_dev->data->dev_private; 4587 struct hns3_hw *hw = &hns->hw; 4588 int ret; 4589 4590 PMD_INIT_FUNC_TRACE(); 4591 4592 /* Get hardware io base address from pcie BAR2 IO space */ 4593 hw->io_base = pci_dev->mem_resource[2].addr; 4594 4595 /* Firmware command queue initialize */ 4596 ret = hns3_cmd_init_queue(hw); 4597 if (ret) { 4598 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 4599 goto err_cmd_init_queue; 4600 } 4601 4602 hns3_clear_all_event_cause(hw); 4603 4604 /* Firmware command initialize */ 4605 ret = hns3_cmd_init(hw); 4606 if (ret) { 4607 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 4608 goto err_cmd_init; 4609 } 4610 4611 hns3_tx_push_init(eth_dev); 4612 4613 /* 4614 * To ensure that the hardware environment is clean during 4615 * initialization, the driver actively clear the hardware environment 4616 * during initialization, including PF and corresponding VFs' vlan, mac, 4617 * flow table configurations, etc. 4618 */ 4619 ret = hns3_clear_hw(hw); 4620 if (ret) { 4621 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 4622 goto err_cmd_init; 4623 } 4624 4625 /* Hardware statistics of imissed registers cleared. */ 4626 ret = hns3_update_imissed_stats(hw, true); 4627 if (ret) { 4628 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 4629 goto err_cmd_init; 4630 } 4631 4632 hns3_config_all_msix_error(hw, true); 4633 4634 ret = rte_intr_callback_register(pci_dev->intr_handle, 4635 hns3_interrupt_handler, 4636 eth_dev); 4637 if (ret) { 4638 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 4639 goto err_intr_callback_register; 4640 } 4641 4642 ret = hns3_ptp_init(hw); 4643 if (ret) 4644 goto err_get_config; 4645 4646 /* Enable interrupt */ 4647 rte_intr_enable(pci_dev->intr_handle); 4648 hns3_pf_enable_irq0(hw); 4649 4650 /* Get configuration */ 4651 ret = hns3_get_configuration(hw); 4652 if (ret) { 4653 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 4654 goto err_get_config; 4655 } 4656 4657 ret = hns3_tqp_stats_init(hw); 4658 if (ret) 4659 goto err_get_config; 4660 4661 ret = hns3_init_hardware(hns); 4662 if (ret) { 4663 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 4664 goto err_init_hw; 4665 } 4666 4667 /* Initialize flow director filter list & hash */ 4668 ret = hns3_fdir_filter_init(hns); 4669 if (ret) { 4670 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 4671 goto err_fdir; 4672 } 4673 4674 hns3_rss_set_default_args(hw); 4675 4676 ret = hns3_enable_hw_error_intr(hns, true); 4677 if (ret) { 4678 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 4679 ret); 4680 goto err_enable_intr; 4681 } 4682 4683 ret = hns3_get_port_supported_speed(eth_dev); 4684 if (ret) { 4685 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 4686 "by device, ret = %d.", ret); 4687 goto err_supported_speed; 4688 } 4689 4690 hns3_get_fc_autoneg_capability(hns); 4691 4692 hns3_tm_conf_init(eth_dev); 4693 4694 return 0; 4695 4696 err_supported_speed: 4697 (void)hns3_enable_hw_error_intr(hns, false); 4698 err_enable_intr: 4699 hns3_fdir_filter_uninit(hns); 4700 err_fdir: 4701 hns3_uninit_umv_space(hw); 4702 err_init_hw: 4703 hns3_tqp_stats_uninit(hw); 4704 err_get_config: 4705 hns3_pf_disable_irq0(hw); 4706 rte_intr_disable(pci_dev->intr_handle); 4707 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4708 eth_dev); 4709 err_intr_callback_register: 4710 err_cmd_init: 4711 hns3_cmd_uninit(hw); 4712 hns3_cmd_destroy_queue(hw); 4713 err_cmd_init_queue: 4714 hw->io_base = NULL; 4715 4716 return ret; 4717 } 4718 4719 static void 4720 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 4721 { 4722 struct hns3_adapter *hns = eth_dev->data->dev_private; 4723 struct rte_device *dev = eth_dev->device; 4724 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4725 struct hns3_hw *hw = &hns->hw; 4726 4727 PMD_INIT_FUNC_TRACE(); 4728 4729 hns3_tm_conf_uninit(eth_dev); 4730 hns3_enable_hw_error_intr(hns, false); 4731 hns3_rss_uninit(hns); 4732 (void)hns3_config_gro(hw, false); 4733 hns3_promisc_uninit(hw); 4734 hns3_flow_uninit(eth_dev); 4735 hns3_fdir_filter_uninit(hns); 4736 hns3_uninit_umv_space(hw); 4737 hns3_tqp_stats_uninit(hw); 4738 hns3_config_mac_tnl_int(hw, false); 4739 hns3_pf_disable_irq0(hw); 4740 rte_intr_disable(pci_dev->intr_handle); 4741 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 4742 eth_dev); 4743 hns3_config_all_msix_error(hw, false); 4744 hns3_cmd_uninit(hw); 4745 hns3_cmd_destroy_queue(hw); 4746 hw->io_base = NULL; 4747 } 4748 4749 static uint32_t 4750 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 4751 { 4752 uint32_t speed_bit; 4753 4754 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4755 case RTE_ETH_LINK_SPEED_10M: 4756 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 4757 break; 4758 case RTE_ETH_LINK_SPEED_10M_HD: 4759 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 4760 break; 4761 case RTE_ETH_LINK_SPEED_100M: 4762 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 4763 break; 4764 case RTE_ETH_LINK_SPEED_100M_HD: 4765 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 4766 break; 4767 case RTE_ETH_LINK_SPEED_1G: 4768 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 4769 break; 4770 default: 4771 speed_bit = 0; 4772 break; 4773 } 4774 4775 return speed_bit; 4776 } 4777 4778 static uint32_t 4779 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 4780 { 4781 uint32_t speed_bit; 4782 4783 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 4784 case RTE_ETH_LINK_SPEED_1G: 4785 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 4786 break; 4787 case RTE_ETH_LINK_SPEED_10G: 4788 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 4789 break; 4790 case RTE_ETH_LINK_SPEED_25G: 4791 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 4792 break; 4793 case RTE_ETH_LINK_SPEED_40G: 4794 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 4795 break; 4796 case RTE_ETH_LINK_SPEED_50G: 4797 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 4798 break; 4799 case RTE_ETH_LINK_SPEED_100G: 4800 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 4801 break; 4802 case RTE_ETH_LINK_SPEED_200G: 4803 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 4804 break; 4805 default: 4806 speed_bit = 0; 4807 break; 4808 } 4809 4810 return speed_bit; 4811 } 4812 4813 static int 4814 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 4815 { 4816 struct hns3_mac *mac = &hw->mac; 4817 uint32_t supported_speed = mac->supported_speed; 4818 uint32_t speed_bit = 0; 4819 4820 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 4821 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 4822 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 4823 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 4824 4825 if (!(speed_bit & supported_speed)) { 4826 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 4827 link_speeds); 4828 return -EINVAL; 4829 } 4830 4831 return 0; 4832 } 4833 4834 static uint32_t 4835 hns3_get_link_speed(uint32_t link_speeds) 4836 { 4837 uint32_t speed = RTE_ETH_SPEED_NUM_NONE; 4838 4839 if (link_speeds & RTE_ETH_LINK_SPEED_10M || 4840 link_speeds & RTE_ETH_LINK_SPEED_10M_HD) 4841 speed = RTE_ETH_SPEED_NUM_10M; 4842 if (link_speeds & RTE_ETH_LINK_SPEED_100M || 4843 link_speeds & RTE_ETH_LINK_SPEED_100M_HD) 4844 speed = RTE_ETH_SPEED_NUM_100M; 4845 if (link_speeds & RTE_ETH_LINK_SPEED_1G) 4846 speed = RTE_ETH_SPEED_NUM_1G; 4847 if (link_speeds & RTE_ETH_LINK_SPEED_10G) 4848 speed = RTE_ETH_SPEED_NUM_10G; 4849 if (link_speeds & RTE_ETH_LINK_SPEED_25G) 4850 speed = RTE_ETH_SPEED_NUM_25G; 4851 if (link_speeds & RTE_ETH_LINK_SPEED_40G) 4852 speed = RTE_ETH_SPEED_NUM_40G; 4853 if (link_speeds & RTE_ETH_LINK_SPEED_50G) 4854 speed = RTE_ETH_SPEED_NUM_50G; 4855 if (link_speeds & RTE_ETH_LINK_SPEED_100G) 4856 speed = RTE_ETH_SPEED_NUM_100G; 4857 if (link_speeds & RTE_ETH_LINK_SPEED_200G) 4858 speed = RTE_ETH_SPEED_NUM_200G; 4859 4860 return speed; 4861 } 4862 4863 static uint8_t 4864 hns3_get_link_duplex(uint32_t link_speeds) 4865 { 4866 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 4867 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 4868 return RTE_ETH_LINK_HALF_DUPLEX; 4869 else 4870 return RTE_ETH_LINK_FULL_DUPLEX; 4871 } 4872 4873 static int 4874 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 4875 struct hns3_set_link_speed_cfg *cfg) 4876 { 4877 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4878 struct hns3_phy_params_bd0_cmd *req; 4879 uint16_t i; 4880 4881 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4882 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4883 false); 4884 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4885 } 4886 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 4887 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4888 req->autoneg = cfg->autoneg; 4889 4890 /* 4891 * The full speed capability is used to negotiate when 4892 * auto-negotiation is enabled. 4893 */ 4894 if (cfg->autoneg) { 4895 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 4896 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 4897 HNS3_PHY_LINK_SPEED_100M_BIT | 4898 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 4899 HNS3_PHY_LINK_SPEED_1000M_BIT; 4900 } else { 4901 req->speed = cfg->speed; 4902 req->duplex = cfg->duplex; 4903 } 4904 4905 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4906 } 4907 4908 static int 4909 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 4910 { 4911 struct hns3_config_auto_neg_cmd *req; 4912 struct hns3_cmd_desc desc; 4913 uint32_t flag = 0; 4914 int ret; 4915 4916 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 4917 4918 req = (struct hns3_config_auto_neg_cmd *)desc.data; 4919 if (enable) 4920 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 4921 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 4922 4923 ret = hns3_cmd_send(hw, &desc, 1); 4924 if (ret) 4925 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 4926 4927 return ret; 4928 } 4929 4930 static int 4931 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 4932 struct hns3_set_link_speed_cfg *cfg) 4933 { 4934 int ret; 4935 4936 if (hw->mac.support_autoneg) { 4937 ret = hns3_set_autoneg(hw, cfg->autoneg); 4938 if (ret) { 4939 hns3_err(hw, "failed to configure auto-negotiation."); 4940 return ret; 4941 } 4942 4943 /* 4944 * To enable auto-negotiation, we only need to open the switch 4945 * of auto-negotiation, then firmware sets all speed 4946 * capabilities. 4947 */ 4948 if (cfg->autoneg) 4949 return 0; 4950 } 4951 4952 /* 4953 * Some hardware doesn't support auto-negotiation, but users may not 4954 * configure link_speeds (default 0), which means auto-negotiation. 4955 * In this case, a warning message need to be printed, instead of 4956 * an error. 4957 */ 4958 if (cfg->autoneg) { 4959 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 4960 return 0; 4961 } 4962 4963 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 4964 } 4965 4966 static int 4967 hns3_set_port_link_speed(struct hns3_hw *hw, 4968 struct hns3_set_link_speed_cfg *cfg) 4969 { 4970 int ret; 4971 4972 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 4973 #if defined(RTE_HNS3_ONLY_1630_FPGA) 4974 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4975 if (pf->is_tmp_phy) 4976 return 0; 4977 #endif 4978 4979 ret = hns3_set_copper_port_link_speed(hw, cfg); 4980 if (ret) { 4981 hns3_err(hw, "failed to set copper port link speed," 4982 "ret = %d.", ret); 4983 return ret; 4984 } 4985 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 4986 ret = hns3_set_fiber_port_link_speed(hw, cfg); 4987 if (ret) { 4988 hns3_err(hw, "failed to set fiber port link speed," 4989 "ret = %d.", ret); 4990 return ret; 4991 } 4992 } 4993 4994 return 0; 4995 } 4996 4997 static int 4998 hns3_apply_link_speed(struct hns3_hw *hw) 4999 { 5000 struct rte_eth_conf *conf = &hw->data->dev_conf; 5001 struct hns3_set_link_speed_cfg cfg; 5002 5003 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5004 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ? 5005 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 5006 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) { 5007 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5008 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5009 } 5010 5011 return hns3_set_port_link_speed(hw, &cfg); 5012 } 5013 5014 static int 5015 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5016 { 5017 struct hns3_hw *hw = &hns->hw; 5018 bool link_en; 5019 int ret; 5020 5021 ret = hns3_update_queue_map_configure(hns); 5022 if (ret) { 5023 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5024 ret); 5025 return ret; 5026 } 5027 5028 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5029 ret = hns3_tm_conf_update(hw); 5030 if (ret) { 5031 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5032 return ret; 5033 } 5034 5035 hns3_enable_rxd_adv_layout(hw); 5036 5037 ret = hns3_init_queues(hns, reset_queue); 5038 if (ret) { 5039 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5040 return ret; 5041 } 5042 5043 link_en = hw->set_link_down ? false : true; 5044 ret = hns3_cfg_mac_mode(hw, link_en); 5045 if (ret) { 5046 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5047 goto err_config_mac_mode; 5048 } 5049 5050 ret = hns3_apply_link_speed(hw); 5051 if (ret) 5052 goto err_set_link_speed; 5053 5054 return 0; 5055 5056 err_set_link_speed: 5057 (void)hns3_cfg_mac_mode(hw, false); 5058 5059 err_config_mac_mode: 5060 hns3_dev_release_mbufs(hns); 5061 /* 5062 * Here is exception handling, hns3_reset_all_tqps will have the 5063 * corresponding error message if it is handled incorrectly, so it is 5064 * not necessary to check hns3_reset_all_tqps return value, here keep 5065 * ret as the error code causing the exception. 5066 */ 5067 (void)hns3_reset_all_tqps(hns); 5068 return ret; 5069 } 5070 5071 static void 5072 hns3_restore_filter(struct rte_eth_dev *dev) 5073 { 5074 hns3_restore_rss_filter(dev); 5075 } 5076 5077 static int 5078 hns3_dev_start(struct rte_eth_dev *dev) 5079 { 5080 struct hns3_adapter *hns = dev->data->dev_private; 5081 struct hns3_hw *hw = &hns->hw; 5082 bool old_state = hw->set_link_down; 5083 int ret; 5084 5085 PMD_INIT_FUNC_TRACE(); 5086 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5087 return -EBUSY; 5088 5089 rte_spinlock_lock(&hw->lock); 5090 hw->adapter_state = HNS3_NIC_STARTING; 5091 5092 /* 5093 * If the dev_set_link_down() API has been called, the "set_link_down" 5094 * flag can be cleared by dev_start() API. In addition, the flag should 5095 * also be cleared before calling hns3_do_start() so that MAC can be 5096 * enabled in dev_start stage. 5097 */ 5098 hw->set_link_down = false; 5099 ret = hns3_do_start(hns, true); 5100 if (ret) 5101 goto do_start_fail; 5102 5103 ret = hns3_map_rx_interrupt(dev); 5104 if (ret) 5105 goto map_rx_inter_err; 5106 5107 /* 5108 * There are three register used to control the status of a TQP 5109 * (contains a pair of Tx queue and Rx queue) in the new version network 5110 * engine. One is used to control the enabling of Tx queue, the other is 5111 * used to control the enabling of Rx queue, and the last is the master 5112 * switch used to control the enabling of the tqp. The Tx register and 5113 * TQP register must be enabled at the same time to enable a Tx queue. 5114 * The same applies to the Rx queue. For the older network engine, this 5115 * function only refresh the enabled flag, and it is used to update the 5116 * status of queue in the dpdk framework. 5117 */ 5118 ret = hns3_start_all_txqs(dev); 5119 if (ret) 5120 goto map_rx_inter_err; 5121 5122 ret = hns3_start_all_rxqs(dev); 5123 if (ret) 5124 goto start_all_rxqs_fail; 5125 5126 hw->adapter_state = HNS3_NIC_STARTED; 5127 rte_spinlock_unlock(&hw->lock); 5128 5129 hns3_rx_scattered_calc(dev); 5130 hns3_set_rxtx_function(dev); 5131 hns3_mp_req_start_rxtx(dev); 5132 5133 hns3_restore_filter(dev); 5134 5135 /* Enable interrupt of all rx queues before enabling queues */ 5136 hns3_dev_all_rx_queue_intr_enable(hw, true); 5137 5138 /* 5139 * After finished the initialization, enable tqps to receive/transmit 5140 * packets and refresh all queue status. 5141 */ 5142 hns3_start_tqps(hw); 5143 5144 hns3_tm_dev_start_proc(hw); 5145 5146 if (dev->data->dev_conf.intr_conf.lsc != 0) 5147 hns3_dev_link_update(dev, 0); 5148 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5149 5150 hns3_info(hw, "hns3 dev start successful!"); 5151 5152 return 0; 5153 5154 start_all_rxqs_fail: 5155 hns3_stop_all_txqs(dev); 5156 map_rx_inter_err: 5157 (void)hns3_do_stop(hns); 5158 do_start_fail: 5159 hw->set_link_down = old_state; 5160 hw->adapter_state = HNS3_NIC_CONFIGURED; 5161 rte_spinlock_unlock(&hw->lock); 5162 5163 return ret; 5164 } 5165 5166 static int 5167 hns3_do_stop(struct hns3_adapter *hns) 5168 { 5169 struct hns3_hw *hw = &hns->hw; 5170 int ret; 5171 5172 /* 5173 * The "hns3_do_stop" function will also be called by .stop_service to 5174 * prepare reset. At the time of global or IMP reset, the command cannot 5175 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5176 * accessed during the reset process. So the mbuf can not be released 5177 * during reset and is required to be released after the reset is 5178 * completed. 5179 */ 5180 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5181 hns3_dev_release_mbufs(hns); 5182 5183 ret = hns3_cfg_mac_mode(hw, false); 5184 if (ret) 5185 return ret; 5186 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5187 5188 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5189 hns3_configure_all_mac_addr(hns, true); 5190 ret = hns3_reset_all_tqps(hns); 5191 if (ret) { 5192 hns3_err(hw, "failed to reset all queues ret = %d.", 5193 ret); 5194 return ret; 5195 } 5196 } 5197 5198 return 0; 5199 } 5200 5201 static int 5202 hns3_dev_stop(struct rte_eth_dev *dev) 5203 { 5204 struct hns3_adapter *hns = dev->data->dev_private; 5205 struct hns3_hw *hw = &hns->hw; 5206 5207 PMD_INIT_FUNC_TRACE(); 5208 dev->data->dev_started = 0; 5209 5210 hw->adapter_state = HNS3_NIC_STOPPING; 5211 hns3_set_rxtx_function(dev); 5212 rte_wmb(); 5213 /* Disable datapath on secondary process. */ 5214 hns3_mp_req_stop_rxtx(dev); 5215 /* Prevent crashes when queues are still in use. */ 5216 rte_delay_ms(hw->cfg_max_queues); 5217 5218 rte_spinlock_lock(&hw->lock); 5219 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5220 hns3_tm_dev_stop_proc(hw); 5221 hns3_config_mac_tnl_int(hw, false); 5222 hns3_stop_tqps(hw); 5223 hns3_do_stop(hns); 5224 hns3_unmap_rx_interrupt(dev); 5225 hw->adapter_state = HNS3_NIC_CONFIGURED; 5226 } 5227 hns3_rx_scattered_reset(dev); 5228 rte_eal_alarm_cancel(hns3_service_handler, dev); 5229 hns3_stop_report_lse(dev); 5230 rte_spinlock_unlock(&hw->lock); 5231 5232 return 0; 5233 } 5234 5235 static int 5236 hns3_dev_close(struct rte_eth_dev *eth_dev) 5237 { 5238 struct hns3_adapter *hns = eth_dev->data->dev_private; 5239 struct hns3_hw *hw = &hns->hw; 5240 int ret = 0; 5241 5242 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5243 hns3_mp_uninit(eth_dev); 5244 return 0; 5245 } 5246 5247 if (hw->adapter_state == HNS3_NIC_STARTED) 5248 ret = hns3_dev_stop(eth_dev); 5249 5250 hw->adapter_state = HNS3_NIC_CLOSING; 5251 hns3_reset_abort(hns); 5252 hw->adapter_state = HNS3_NIC_CLOSED; 5253 5254 hns3_configure_all_mc_mac_addr(hns, true); 5255 hns3_remove_all_vlan_table(hns); 5256 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5257 hns3_uninit_pf(eth_dev); 5258 hns3_free_all_queues(eth_dev); 5259 rte_free(hw->reset.wait_data); 5260 hns3_mp_uninit(eth_dev); 5261 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5262 5263 return ret; 5264 } 5265 5266 static void 5267 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5268 bool *tx_pause) 5269 { 5270 struct hns3_mac *mac = &hw->mac; 5271 uint32_t advertising = mac->advertising; 5272 uint32_t lp_advertising = mac->lp_advertising; 5273 *rx_pause = false; 5274 *tx_pause = false; 5275 5276 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5277 *rx_pause = true; 5278 *tx_pause = true; 5279 } else if (advertising & lp_advertising & 5280 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5281 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5282 *rx_pause = true; 5283 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5284 *tx_pause = true; 5285 } 5286 } 5287 5288 static enum hns3_fc_mode 5289 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5290 { 5291 enum hns3_fc_mode current_mode; 5292 bool rx_pause = false; 5293 bool tx_pause = false; 5294 5295 switch (hw->mac.media_type) { 5296 case HNS3_MEDIA_TYPE_COPPER: 5297 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 5298 break; 5299 5300 /* 5301 * Flow control auto-negotiation is not supported for fiber and 5302 * backplane media type. 5303 */ 5304 case HNS3_MEDIA_TYPE_FIBER: 5305 case HNS3_MEDIA_TYPE_BACKPLANE: 5306 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 5307 current_mode = hw->requested_fc_mode; 5308 goto out; 5309 default: 5310 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 5311 hw->mac.media_type); 5312 current_mode = HNS3_FC_NONE; 5313 goto out; 5314 } 5315 5316 if (rx_pause && tx_pause) 5317 current_mode = HNS3_FC_FULL; 5318 else if (rx_pause) 5319 current_mode = HNS3_FC_RX_PAUSE; 5320 else if (tx_pause) 5321 current_mode = HNS3_FC_TX_PAUSE; 5322 else 5323 current_mode = HNS3_FC_NONE; 5324 5325 out: 5326 return current_mode; 5327 } 5328 5329 static enum hns3_fc_mode 5330 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 5331 { 5332 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5333 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5334 struct hns3_mac *mac = &hw->mac; 5335 5336 /* 5337 * When the flow control mode is obtained, the device may not complete 5338 * auto-negotiation. It is necessary to wait for link establishment. 5339 */ 5340 (void)hns3_dev_link_update(dev, 1); 5341 5342 /* 5343 * If the link auto-negotiation of the nic is disabled, or the flow 5344 * control auto-negotiation is not supported, the forced flow control 5345 * mode is used. 5346 */ 5347 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 5348 return hw->requested_fc_mode; 5349 5350 return hns3_get_autoneg_fc_mode(hw); 5351 } 5352 5353 int 5354 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5355 { 5356 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5357 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5358 enum hns3_fc_mode current_mode; 5359 5360 current_mode = hns3_get_current_fc_mode(dev); 5361 switch (current_mode) { 5362 case HNS3_FC_FULL: 5363 fc_conf->mode = RTE_ETH_FC_FULL; 5364 break; 5365 case HNS3_FC_TX_PAUSE: 5366 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 5367 break; 5368 case HNS3_FC_RX_PAUSE: 5369 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 5370 break; 5371 case HNS3_FC_NONE: 5372 default: 5373 fc_conf->mode = RTE_ETH_FC_NONE; 5374 break; 5375 } 5376 5377 fc_conf->pause_time = pf->pause_time; 5378 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 5379 5380 return 0; 5381 } 5382 5383 static int 5384 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 5385 { 5386 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5387 5388 if (!pf->support_fc_autoneg) { 5389 if (autoneg != 0) { 5390 hns3_err(hw, "unsupported fc auto-negotiation setting."); 5391 return -EOPNOTSUPP; 5392 } 5393 5394 /* 5395 * Flow control auto-negotiation of the NIC is not supported, 5396 * but other auto-negotiation features may be supported. 5397 */ 5398 if (autoneg != hw->mac.link_autoneg) { 5399 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 5400 return -EOPNOTSUPP; 5401 } 5402 5403 return 0; 5404 } 5405 5406 /* 5407 * If flow control auto-negotiation of the NIC is supported, all 5408 * auto-negotiation features are supported. 5409 */ 5410 if (autoneg != hw->mac.link_autoneg) { 5411 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 5412 return -EOPNOTSUPP; 5413 } 5414 5415 return 0; 5416 } 5417 5418 static int 5419 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5420 { 5421 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5422 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5423 int ret; 5424 5425 if (fc_conf->high_water || fc_conf->low_water || 5426 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 5427 hns3_err(hw, "Unsupported flow control settings specified, " 5428 "high_water(%u), low_water(%u), send_xon(%u) and " 5429 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5430 fc_conf->high_water, fc_conf->low_water, 5431 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 5432 return -EINVAL; 5433 } 5434 5435 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 5436 if (ret) 5437 return ret; 5438 5439 if (!fc_conf->pause_time) { 5440 hns3_err(hw, "Invalid pause time %u setting.", 5441 fc_conf->pause_time); 5442 return -EINVAL; 5443 } 5444 5445 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5446 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 5447 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 5448 "current_fc_status = %d", hw->current_fc_status); 5449 return -EOPNOTSUPP; 5450 } 5451 5452 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 5453 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 5454 return -EOPNOTSUPP; 5455 } 5456 5457 rte_spinlock_lock(&hw->lock); 5458 ret = hns3_fc_enable(dev, fc_conf); 5459 rte_spinlock_unlock(&hw->lock); 5460 5461 return ret; 5462 } 5463 5464 static int 5465 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 5466 struct rte_eth_pfc_conf *pfc_conf) 5467 { 5468 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5469 int ret; 5470 5471 if (!hns3_dev_get_support(hw, DCB)) { 5472 hns3_err(hw, "This port does not support dcb configurations."); 5473 return -EOPNOTSUPP; 5474 } 5475 5476 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 5477 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 5478 hns3_err(hw, "Unsupported flow control settings specified, " 5479 "high_water(%u), low_water(%u), send_xon(%u) and " 5480 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5481 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 5482 pfc_conf->fc.send_xon, 5483 pfc_conf->fc.mac_ctrl_frame_fwd); 5484 return -EINVAL; 5485 } 5486 if (pfc_conf->fc.autoneg) { 5487 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5488 return -EINVAL; 5489 } 5490 if (pfc_conf->fc.pause_time == 0) { 5491 hns3_err(hw, "Invalid pause time %u setting.", 5492 pfc_conf->fc.pause_time); 5493 return -EINVAL; 5494 } 5495 5496 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5497 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 5498 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 5499 "current_fc_status = %d", hw->current_fc_status); 5500 return -EOPNOTSUPP; 5501 } 5502 5503 rte_spinlock_lock(&hw->lock); 5504 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 5505 rte_spinlock_unlock(&hw->lock); 5506 5507 return ret; 5508 } 5509 5510 static int 5511 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 5512 { 5513 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5514 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5515 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 5516 int i; 5517 5518 rte_spinlock_lock(&hw->lock); 5519 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 5520 dcb_info->nb_tcs = pf->local_max_tc; 5521 else 5522 dcb_info->nb_tcs = 1; 5523 5524 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 5525 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 5526 for (i = 0; i < dcb_info->nb_tcs; i++) 5527 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 5528 5529 for (i = 0; i < hw->num_tc; i++) { 5530 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 5531 dcb_info->tc_queue.tc_txq[0][i].base = 5532 hw->tc_queue[i].tqp_offset; 5533 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 5534 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 5535 hw->tc_queue[i].tqp_count; 5536 } 5537 rte_spinlock_unlock(&hw->lock); 5538 5539 return 0; 5540 } 5541 5542 static int 5543 hns3_reinit_dev(struct hns3_adapter *hns) 5544 { 5545 struct hns3_hw *hw = &hns->hw; 5546 int ret; 5547 5548 ret = hns3_cmd_init(hw); 5549 if (ret) { 5550 hns3_err(hw, "Failed to init cmd: %d", ret); 5551 return ret; 5552 } 5553 5554 ret = hns3_init_hardware(hns); 5555 if (ret) { 5556 hns3_err(hw, "Failed to init hardware: %d", ret); 5557 return ret; 5558 } 5559 5560 ret = hns3_reset_all_tqps(hns); 5561 if (ret) { 5562 hns3_err(hw, "Failed to reset all queues: %d", ret); 5563 return ret; 5564 } 5565 5566 ret = hns3_enable_hw_error_intr(hns, true); 5567 if (ret) { 5568 hns3_err(hw, "fail to enable hw error interrupts: %d", 5569 ret); 5570 return ret; 5571 } 5572 hns3_info(hw, "Reset done, driver initialization finished."); 5573 5574 return 0; 5575 } 5576 5577 static bool 5578 is_pf_reset_done(struct hns3_hw *hw) 5579 { 5580 uint32_t val, reg, reg_bit; 5581 5582 switch (hw->reset.level) { 5583 case HNS3_IMP_RESET: 5584 reg = HNS3_GLOBAL_RESET_REG; 5585 reg_bit = HNS3_IMP_RESET_BIT; 5586 break; 5587 case HNS3_GLOBAL_RESET: 5588 reg = HNS3_GLOBAL_RESET_REG; 5589 reg_bit = HNS3_GLOBAL_RESET_BIT; 5590 break; 5591 case HNS3_FUNC_RESET: 5592 reg = HNS3_FUN_RST_ING; 5593 reg_bit = HNS3_FUN_RST_ING_B; 5594 break; 5595 case HNS3_FLR_RESET: 5596 default: 5597 hns3_err(hw, "Wait for unsupported reset level: %d", 5598 hw->reset.level); 5599 return true; 5600 } 5601 val = hns3_read_dev(hw, reg); 5602 if (hns3_get_bit(val, reg_bit)) 5603 return false; 5604 else 5605 return true; 5606 } 5607 5608 bool 5609 hns3_is_reset_pending(struct hns3_adapter *hns) 5610 { 5611 struct hns3_hw *hw = &hns->hw; 5612 enum hns3_reset_level reset; 5613 5614 hns3_check_event_cause(hns, NULL); 5615 reset = hns3_get_reset_level(hns, &hw->reset.pending); 5616 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 5617 hw->reset.level < reset) { 5618 hns3_warn(hw, "High level reset %d is pending", reset); 5619 return true; 5620 } 5621 reset = hns3_get_reset_level(hns, &hw->reset.request); 5622 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 5623 hw->reset.level < reset) { 5624 hns3_warn(hw, "High level reset %d is request", reset); 5625 return true; 5626 } 5627 return false; 5628 } 5629 5630 static int 5631 hns3_wait_hardware_ready(struct hns3_adapter *hns) 5632 { 5633 struct hns3_hw *hw = &hns->hw; 5634 struct hns3_wait_data *wait_data = hw->reset.wait_data; 5635 struct timeval tv; 5636 5637 if (wait_data->result == HNS3_WAIT_SUCCESS) 5638 return 0; 5639 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 5640 hns3_clock_gettime(&tv); 5641 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 5642 tv.tv_sec, tv.tv_usec); 5643 return -ETIME; 5644 } else if (wait_data->result == HNS3_WAIT_REQUEST) 5645 return -EAGAIN; 5646 5647 wait_data->hns = hns; 5648 wait_data->check_completion = is_pf_reset_done; 5649 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 5650 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 5651 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 5652 wait_data->count = HNS3_RESET_WAIT_CNT; 5653 wait_data->result = HNS3_WAIT_REQUEST; 5654 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 5655 return -EAGAIN; 5656 } 5657 5658 static int 5659 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 5660 { 5661 struct hns3_cmd_desc desc; 5662 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 5663 5664 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 5665 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 5666 req->fun_reset_vfid = func_id; 5667 5668 return hns3_cmd_send(hw, &desc, 1); 5669 } 5670 5671 static int 5672 hns3_imp_reset_cmd(struct hns3_hw *hw) 5673 { 5674 struct hns3_cmd_desc desc; 5675 5676 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 5677 desc.data[0] = 0xeedd; 5678 5679 return hns3_cmd_send(hw, &desc, 1); 5680 } 5681 5682 static void 5683 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 5684 { 5685 struct hns3_hw *hw = &hns->hw; 5686 struct timeval tv; 5687 uint32_t val; 5688 5689 hns3_clock_gettime(&tv); 5690 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 5691 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 5692 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 5693 tv.tv_sec, tv.tv_usec); 5694 return; 5695 } 5696 5697 switch (reset_level) { 5698 case HNS3_IMP_RESET: 5699 hns3_imp_reset_cmd(hw); 5700 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 5701 tv.tv_sec, tv.tv_usec); 5702 break; 5703 case HNS3_GLOBAL_RESET: 5704 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 5705 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 5706 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 5707 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 5708 tv.tv_sec, tv.tv_usec); 5709 break; 5710 case HNS3_FUNC_RESET: 5711 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 5712 tv.tv_sec, tv.tv_usec); 5713 /* schedule again to check later */ 5714 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 5715 hns3_schedule_reset(hns); 5716 break; 5717 default: 5718 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 5719 return; 5720 } 5721 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 5722 } 5723 5724 static enum hns3_reset_level 5725 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 5726 { 5727 struct hns3_hw *hw = &hns->hw; 5728 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 5729 5730 /* Return the highest priority reset level amongst all */ 5731 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 5732 reset_level = HNS3_IMP_RESET; 5733 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 5734 reset_level = HNS3_GLOBAL_RESET; 5735 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 5736 reset_level = HNS3_FUNC_RESET; 5737 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 5738 reset_level = HNS3_FLR_RESET; 5739 5740 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 5741 return HNS3_NONE_RESET; 5742 5743 return reset_level; 5744 } 5745 5746 static void 5747 hns3_record_imp_error(struct hns3_adapter *hns) 5748 { 5749 struct hns3_hw *hw = &hns->hw; 5750 uint32_t reg_val; 5751 5752 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5753 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 5754 hns3_warn(hw, "Detected IMP RD poison!"); 5755 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 5756 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5757 } 5758 5759 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 5760 hns3_warn(hw, "Detected IMP CMDQ error!"); 5761 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 5762 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5763 } 5764 } 5765 5766 static int 5767 hns3_prepare_reset(struct hns3_adapter *hns) 5768 { 5769 struct hns3_hw *hw = &hns->hw; 5770 uint32_t reg_val; 5771 int ret; 5772 5773 switch (hw->reset.level) { 5774 case HNS3_FUNC_RESET: 5775 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 5776 if (ret) 5777 return ret; 5778 5779 /* 5780 * After performaning pf reset, it is not necessary to do the 5781 * mailbox handling or send any command to firmware, because 5782 * any mailbox handling or command to firmware is only valid 5783 * after hns3_cmd_init is called. 5784 */ 5785 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 5786 hw->reset.stats.request_cnt++; 5787 break; 5788 case HNS3_IMP_RESET: 5789 hns3_record_imp_error(hns); 5790 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5791 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 5792 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 5793 break; 5794 default: 5795 break; 5796 } 5797 return 0; 5798 } 5799 5800 static int 5801 hns3_set_rst_done(struct hns3_hw *hw) 5802 { 5803 struct hns3_pf_rst_done_cmd *req; 5804 struct hns3_cmd_desc desc; 5805 5806 req = (struct hns3_pf_rst_done_cmd *)desc.data; 5807 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 5808 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 5809 return hns3_cmd_send(hw, &desc, 1); 5810 } 5811 5812 static int 5813 hns3_stop_service(struct hns3_adapter *hns) 5814 { 5815 struct hns3_hw *hw = &hns->hw; 5816 struct rte_eth_dev *eth_dev; 5817 5818 eth_dev = &rte_eth_devices[hw->data->port_id]; 5819 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5820 if (hw->adapter_state == HNS3_NIC_STARTED) { 5821 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 5822 hns3_update_linkstatus_and_event(hw, false); 5823 } 5824 5825 hns3_set_rxtx_function(eth_dev); 5826 rte_wmb(); 5827 /* Disable datapath on secondary process. */ 5828 hns3_mp_req_stop_rxtx(eth_dev); 5829 rte_delay_ms(hw->cfg_max_queues); 5830 5831 rte_spinlock_lock(&hw->lock); 5832 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 5833 hw->adapter_state == HNS3_NIC_STOPPING) { 5834 hns3_enable_all_queues(hw, false); 5835 hns3_do_stop(hns); 5836 hw->reset.mbuf_deferred_free = true; 5837 } else 5838 hw->reset.mbuf_deferred_free = false; 5839 5840 /* 5841 * It is cumbersome for hardware to pick-and-choose entries for deletion 5842 * from table space. Hence, for function reset software intervention is 5843 * required to delete the entries 5844 */ 5845 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 5846 hns3_configure_all_mc_mac_addr(hns, true); 5847 rte_spinlock_unlock(&hw->lock); 5848 5849 return 0; 5850 } 5851 5852 static int 5853 hns3_start_service(struct hns3_adapter *hns) 5854 { 5855 struct hns3_hw *hw = &hns->hw; 5856 struct rte_eth_dev *eth_dev; 5857 5858 if (hw->reset.level == HNS3_IMP_RESET || 5859 hw->reset.level == HNS3_GLOBAL_RESET) 5860 hns3_set_rst_done(hw); 5861 eth_dev = &rte_eth_devices[hw->data->port_id]; 5862 hns3_set_rxtx_function(eth_dev); 5863 hns3_mp_req_start_rxtx(eth_dev); 5864 if (hw->adapter_state == HNS3_NIC_STARTED) { 5865 /* 5866 * This API parent function already hold the hns3_hw.lock, the 5867 * hns3_service_handler may report lse, in bonding application 5868 * it will call driver's ops which may acquire the hns3_hw.lock 5869 * again, thus lead to deadlock. 5870 * We defer calls hns3_service_handler to avoid the deadlock. 5871 */ 5872 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 5873 hns3_service_handler, eth_dev); 5874 5875 /* Enable interrupt of all rx queues before enabling queues */ 5876 hns3_dev_all_rx_queue_intr_enable(hw, true); 5877 /* 5878 * Enable state of each rxq and txq will be recovered after 5879 * reset, so we need to restore them before enable all tqps; 5880 */ 5881 hns3_restore_tqp_enable_state(hw); 5882 /* 5883 * When finished the initialization, enable queues to receive 5884 * and transmit packets. 5885 */ 5886 hns3_enable_all_queues(hw, true); 5887 } 5888 5889 return 0; 5890 } 5891 5892 static int 5893 hns3_restore_conf(struct hns3_adapter *hns) 5894 { 5895 struct hns3_hw *hw = &hns->hw; 5896 int ret; 5897 5898 ret = hns3_configure_all_mac_addr(hns, false); 5899 if (ret) 5900 return ret; 5901 5902 ret = hns3_configure_all_mc_mac_addr(hns, false); 5903 if (ret) 5904 goto err_mc_mac; 5905 5906 ret = hns3_dev_promisc_restore(hns); 5907 if (ret) 5908 goto err_promisc; 5909 5910 ret = hns3_restore_vlan_table(hns); 5911 if (ret) 5912 goto err_promisc; 5913 5914 ret = hns3_restore_vlan_conf(hns); 5915 if (ret) 5916 goto err_promisc; 5917 5918 ret = hns3_restore_all_fdir_filter(hns); 5919 if (ret) 5920 goto err_promisc; 5921 5922 ret = hns3_restore_ptp(hns); 5923 if (ret) 5924 goto err_promisc; 5925 5926 ret = hns3_restore_rx_interrupt(hw); 5927 if (ret) 5928 goto err_promisc; 5929 5930 ret = hns3_restore_gro_conf(hw); 5931 if (ret) 5932 goto err_promisc; 5933 5934 ret = hns3_restore_fec(hw); 5935 if (ret) 5936 goto err_promisc; 5937 5938 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 5939 ret = hns3_do_start(hns, false); 5940 if (ret) 5941 goto err_promisc; 5942 hns3_info(hw, "hns3 dev restart successful!"); 5943 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 5944 hw->adapter_state = HNS3_NIC_CONFIGURED; 5945 return 0; 5946 5947 err_promisc: 5948 hns3_configure_all_mc_mac_addr(hns, true); 5949 err_mc_mac: 5950 hns3_configure_all_mac_addr(hns, true); 5951 return ret; 5952 } 5953 5954 static void 5955 hns3_reset_service(void *param) 5956 { 5957 struct hns3_adapter *hns = (struct hns3_adapter *)param; 5958 struct hns3_hw *hw = &hns->hw; 5959 enum hns3_reset_level reset_level; 5960 struct timeval tv_delta; 5961 struct timeval tv_start; 5962 struct timeval tv; 5963 uint64_t msec; 5964 int ret; 5965 5966 /* 5967 * The interrupt is not triggered within the delay time. 5968 * The interrupt may have been lost. It is necessary to handle 5969 * the interrupt to recover from the error. 5970 */ 5971 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 5972 SCHEDULE_DEFERRED) { 5973 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 5974 __ATOMIC_RELAXED); 5975 hns3_err(hw, "Handling interrupts in delayed tasks"); 5976 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 5977 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5978 if (reset_level == HNS3_NONE_RESET) { 5979 hns3_err(hw, "No reset level is set, try IMP reset"); 5980 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 5981 } 5982 } 5983 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 5984 5985 /* 5986 * Check if there is any ongoing reset in the hardware. This status can 5987 * be checked from reset_pending. If there is then, we need to wait for 5988 * hardware to complete reset. 5989 * a. If we are able to figure out in reasonable time that hardware 5990 * has fully resetted then, we can proceed with driver, client 5991 * reset. 5992 * b. else, we can come back later to check this status so re-sched 5993 * now. 5994 */ 5995 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5996 if (reset_level != HNS3_NONE_RESET) { 5997 hns3_clock_gettime(&tv_start); 5998 ret = hns3_reset_process(hns, reset_level); 5999 hns3_clock_gettime(&tv); 6000 timersub(&tv, &tv_start, &tv_delta); 6001 msec = hns3_clock_calctime_ms(&tv_delta); 6002 if (msec > HNS3_RESET_PROCESS_MS) 6003 hns3_err(hw, "%d handle long time delta %" PRIu64 6004 " ms time=%ld.%.6ld", 6005 hw->reset.level, msec, 6006 tv.tv_sec, tv.tv_usec); 6007 if (ret == -EAGAIN) 6008 return; 6009 } 6010 6011 /* Check if we got any *new* reset requests to be honored */ 6012 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6013 if (reset_level != HNS3_NONE_RESET) 6014 hns3_msix_process(hns, reset_level); 6015 } 6016 6017 static unsigned int 6018 hns3_get_speed_capa_num(uint16_t device_id) 6019 { 6020 unsigned int num; 6021 6022 switch (device_id) { 6023 case HNS3_DEV_ID_25GE: 6024 case HNS3_DEV_ID_25GE_RDMA: 6025 num = 2; 6026 break; 6027 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6028 case HNS3_DEV_ID_200G_RDMA: 6029 num = 1; 6030 break; 6031 default: 6032 num = 0; 6033 break; 6034 } 6035 6036 return num; 6037 } 6038 6039 static int 6040 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6041 uint16_t device_id) 6042 { 6043 switch (device_id) { 6044 case HNS3_DEV_ID_25GE: 6045 /* fallthrough */ 6046 case HNS3_DEV_ID_25GE_RDMA: 6047 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6048 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6049 6050 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6051 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6052 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6053 break; 6054 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6055 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6056 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6057 break; 6058 case HNS3_DEV_ID_200G_RDMA: 6059 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6060 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6061 break; 6062 default: 6063 return -ENOTSUP; 6064 } 6065 6066 return 0; 6067 } 6068 6069 static int 6070 hns3_fec_get_capability(struct rte_eth_dev *dev, 6071 struct rte_eth_fec_capa *speed_fec_capa, 6072 unsigned int num) 6073 { 6074 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6075 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6076 uint16_t device_id = pci_dev->id.device_id; 6077 unsigned int capa_num; 6078 int ret; 6079 6080 capa_num = hns3_get_speed_capa_num(device_id); 6081 if (capa_num == 0) { 6082 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6083 device_id); 6084 return -ENOTSUP; 6085 } 6086 6087 if (speed_fec_capa == NULL || num < capa_num) 6088 return capa_num; 6089 6090 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6091 if (ret) 6092 return -ENOTSUP; 6093 6094 return capa_num; 6095 } 6096 6097 static int 6098 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6099 { 6100 struct hns3_config_fec_cmd *req; 6101 struct hns3_cmd_desc desc; 6102 int ret; 6103 6104 /* 6105 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6106 * in device of link speed 6107 * below 10 Gbps. 6108 */ 6109 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) { 6110 *state = 0; 6111 return 0; 6112 } 6113 6114 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6115 req = (struct hns3_config_fec_cmd *)desc.data; 6116 ret = hns3_cmd_send(hw, &desc, 1); 6117 if (ret) { 6118 hns3_err(hw, "get current fec auto state failed, ret = %d", 6119 ret); 6120 return ret; 6121 } 6122 6123 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6124 return 0; 6125 } 6126 6127 static int 6128 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6129 { 6130 struct hns3_sfp_info_cmd *resp; 6131 uint32_t tmp_fec_capa; 6132 uint8_t auto_state; 6133 struct hns3_cmd_desc desc; 6134 int ret; 6135 6136 /* 6137 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6138 * configured FEC mode is returned. 6139 * If link is up, current FEC mode is returned. 6140 */ 6141 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) { 6142 ret = get_current_fec_auto_state(hw, &auto_state); 6143 if (ret) 6144 return ret; 6145 6146 if (auto_state == 0x1) { 6147 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6148 return 0; 6149 } 6150 } 6151 6152 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6153 resp = (struct hns3_sfp_info_cmd *)desc.data; 6154 resp->query_type = HNS3_ACTIVE_QUERY; 6155 6156 ret = hns3_cmd_send(hw, &desc, 1); 6157 if (ret == -EOPNOTSUPP) { 6158 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6159 return ret; 6160 } else if (ret) { 6161 hns3_err(hw, "get FEC failed, ret = %d", ret); 6162 return ret; 6163 } 6164 6165 /* 6166 * FEC mode order defined in hns3 hardware is inconsistent with 6167 * that defined in the ethdev library. So the sequence needs 6168 * to be converted. 6169 */ 6170 switch (resp->active_fec) { 6171 case HNS3_HW_FEC_MODE_NOFEC: 6172 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6173 break; 6174 case HNS3_HW_FEC_MODE_BASER: 6175 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6176 break; 6177 case HNS3_HW_FEC_MODE_RS: 6178 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6179 break; 6180 default: 6181 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6182 break; 6183 } 6184 6185 *fec_capa = tmp_fec_capa; 6186 return 0; 6187 } 6188 6189 static int 6190 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6191 { 6192 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6193 6194 return hns3_fec_get_internal(hw, fec_capa); 6195 } 6196 6197 static int 6198 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6199 { 6200 struct hns3_config_fec_cmd *req; 6201 struct hns3_cmd_desc desc; 6202 int ret; 6203 6204 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6205 6206 req = (struct hns3_config_fec_cmd *)desc.data; 6207 switch (mode) { 6208 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6209 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6210 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6211 break; 6212 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6213 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6214 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6215 break; 6216 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6217 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6218 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6219 break; 6220 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6221 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6222 break; 6223 default: 6224 return 0; 6225 } 6226 ret = hns3_cmd_send(hw, &desc, 1); 6227 if (ret) 6228 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6229 6230 return ret; 6231 } 6232 6233 static uint32_t 6234 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6235 { 6236 struct hns3_mac *mac = &hw->mac; 6237 uint32_t cur_capa; 6238 6239 switch (mac->link_speed) { 6240 case RTE_ETH_SPEED_NUM_10G: 6241 cur_capa = fec_capa[1].capa; 6242 break; 6243 case RTE_ETH_SPEED_NUM_25G: 6244 case RTE_ETH_SPEED_NUM_100G: 6245 case RTE_ETH_SPEED_NUM_200G: 6246 cur_capa = fec_capa[0].capa; 6247 break; 6248 default: 6249 cur_capa = 0; 6250 break; 6251 } 6252 6253 return cur_capa; 6254 } 6255 6256 static bool 6257 is_fec_mode_one_bit_set(uint32_t mode) 6258 { 6259 int cnt = 0; 6260 uint8_t i; 6261 6262 for (i = 0; i < sizeof(mode); i++) 6263 if (mode >> i & 0x1) 6264 cnt++; 6265 6266 return cnt == 1 ? true : false; 6267 } 6268 6269 static int 6270 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6271 { 6272 #define FEC_CAPA_NUM 2 6273 struct hns3_adapter *hns = dev->data->dev_private; 6274 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6275 struct hns3_pf *pf = &hns->pf; 6276 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6277 uint32_t cur_capa; 6278 uint32_t num = FEC_CAPA_NUM; 6279 int ret; 6280 6281 ret = hns3_fec_get_capability(dev, fec_capa, num); 6282 if (ret < 0) 6283 return ret; 6284 6285 /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */ 6286 if (!is_fec_mode_one_bit_set(mode)) { 6287 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 6288 "FEC mode should be only one bit set", mode); 6289 return -EINVAL; 6290 } 6291 6292 /* 6293 * Check whether the configured mode is within the FEC capability. 6294 * If not, the configured mode will not be supported. 6295 */ 6296 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6297 if (!(cur_capa & mode)) { 6298 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6299 return -EINVAL; 6300 } 6301 6302 rte_spinlock_lock(&hw->lock); 6303 ret = hns3_set_fec_hw(hw, mode); 6304 if (ret) { 6305 rte_spinlock_unlock(&hw->lock); 6306 return ret; 6307 } 6308 6309 pf->fec_mode = mode; 6310 rte_spinlock_unlock(&hw->lock); 6311 6312 return 0; 6313 } 6314 6315 static int 6316 hns3_restore_fec(struct hns3_hw *hw) 6317 { 6318 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6319 struct hns3_pf *pf = &hns->pf; 6320 uint32_t mode = pf->fec_mode; 6321 int ret; 6322 6323 ret = hns3_set_fec_hw(hw, mode); 6324 if (ret) 6325 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6326 mode, ret); 6327 6328 return ret; 6329 } 6330 6331 static int 6332 hns3_query_dev_fec_info(struct hns3_hw *hw) 6333 { 6334 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6335 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 6336 int ret; 6337 6338 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 6339 if (ret) 6340 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6341 6342 return ret; 6343 } 6344 6345 static bool 6346 hns3_optical_module_existed(struct hns3_hw *hw) 6347 { 6348 struct hns3_cmd_desc desc; 6349 bool existed; 6350 int ret; 6351 6352 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 6353 ret = hns3_cmd_send(hw, &desc, 1); 6354 if (ret) { 6355 hns3_err(hw, 6356 "fail to get optical module exist state, ret = %d.\n", 6357 ret); 6358 return false; 6359 } 6360 existed = !!desc.data[0]; 6361 6362 return existed; 6363 } 6364 6365 static int 6366 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 6367 uint32_t len, uint8_t *data) 6368 { 6369 #define HNS3_SFP_INFO_CMD_NUM 6 6370 #define HNS3_SFP_INFO_MAX_LEN \ 6371 (HNS3_SFP_INFO_BD0_LEN + \ 6372 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 6373 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 6374 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 6375 uint16_t read_len; 6376 uint16_t copy_len; 6377 int ret; 6378 int i; 6379 6380 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6381 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 6382 true); 6383 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 6384 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 6385 } 6386 6387 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 6388 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 6389 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 6390 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 6391 6392 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 6393 if (ret) { 6394 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 6395 ret); 6396 return ret; 6397 } 6398 6399 /* The data format in BD0 is different with the others. */ 6400 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 6401 memcpy(data, sfp_info_bd0->data, copy_len); 6402 read_len = copy_len; 6403 6404 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6405 if (read_len >= len) 6406 break; 6407 6408 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 6409 memcpy(data + read_len, desc[i].data, copy_len); 6410 read_len += copy_len; 6411 } 6412 6413 return (int)read_len; 6414 } 6415 6416 static int 6417 hns3_get_module_eeprom(struct rte_eth_dev *dev, 6418 struct rte_dev_eeprom_info *info) 6419 { 6420 struct hns3_adapter *hns = dev->data->dev_private; 6421 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6422 uint32_t offset = info->offset; 6423 uint32_t len = info->length; 6424 uint8_t *data = info->data; 6425 uint32_t read_len = 0; 6426 6427 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 6428 return -ENOTSUP; 6429 6430 if (!hns3_optical_module_existed(hw)) { 6431 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 6432 return -EIO; 6433 } 6434 6435 while (read_len < len) { 6436 int ret; 6437 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 6438 len - read_len, 6439 data + read_len); 6440 if (ret < 0) 6441 return -EIO; 6442 read_len += ret; 6443 } 6444 6445 return 0; 6446 } 6447 6448 static int 6449 hns3_get_module_info(struct rte_eth_dev *dev, 6450 struct rte_eth_dev_module_info *modinfo) 6451 { 6452 #define HNS3_SFF8024_ID_SFP 0x03 6453 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 6454 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 6455 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 6456 #define HNS3_SFF_8636_V1_3 0x03 6457 struct hns3_adapter *hns = dev->data->dev_private; 6458 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6459 struct rte_dev_eeprom_info info; 6460 struct hns3_sfp_type sfp_type; 6461 int ret; 6462 6463 memset(&sfp_type, 0, sizeof(sfp_type)); 6464 memset(&info, 0, sizeof(info)); 6465 info.data = (uint8_t *)&sfp_type; 6466 info.length = sizeof(sfp_type); 6467 ret = hns3_get_module_eeprom(dev, &info); 6468 if (ret) 6469 return ret; 6470 6471 switch (sfp_type.type) { 6472 case HNS3_SFF8024_ID_SFP: 6473 modinfo->type = RTE_ETH_MODULE_SFF_8472; 6474 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 6475 break; 6476 case HNS3_SFF8024_ID_QSFP_8438: 6477 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6478 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6479 break; 6480 case HNS3_SFF8024_ID_QSFP_8436_8636: 6481 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 6482 modinfo->type = RTE_ETH_MODULE_SFF_8436; 6483 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 6484 } else { 6485 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6486 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6487 } 6488 break; 6489 case HNS3_SFF8024_ID_QSFP28_8636: 6490 modinfo->type = RTE_ETH_MODULE_SFF_8636; 6491 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 6492 break; 6493 default: 6494 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 6495 sfp_type.type, sfp_type.ext_type); 6496 return -EINVAL; 6497 } 6498 6499 return 0; 6500 } 6501 6502 static const struct eth_dev_ops hns3_eth_dev_ops = { 6503 .dev_configure = hns3_dev_configure, 6504 .dev_start = hns3_dev_start, 6505 .dev_stop = hns3_dev_stop, 6506 .dev_close = hns3_dev_close, 6507 .promiscuous_enable = hns3_dev_promiscuous_enable, 6508 .promiscuous_disable = hns3_dev_promiscuous_disable, 6509 .allmulticast_enable = hns3_dev_allmulticast_enable, 6510 .allmulticast_disable = hns3_dev_allmulticast_disable, 6511 .mtu_set = hns3_dev_mtu_set, 6512 .stats_get = hns3_stats_get, 6513 .stats_reset = hns3_stats_reset, 6514 .xstats_get = hns3_dev_xstats_get, 6515 .xstats_get_names = hns3_dev_xstats_get_names, 6516 .xstats_reset = hns3_dev_xstats_reset, 6517 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 6518 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 6519 .dev_infos_get = hns3_dev_infos_get, 6520 .fw_version_get = hns3_fw_version_get, 6521 .rx_queue_setup = hns3_rx_queue_setup, 6522 .tx_queue_setup = hns3_tx_queue_setup, 6523 .rx_queue_release = hns3_dev_rx_queue_release, 6524 .tx_queue_release = hns3_dev_tx_queue_release, 6525 .rx_queue_start = hns3_dev_rx_queue_start, 6526 .rx_queue_stop = hns3_dev_rx_queue_stop, 6527 .tx_queue_start = hns3_dev_tx_queue_start, 6528 .tx_queue_stop = hns3_dev_tx_queue_stop, 6529 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 6530 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 6531 .rxq_info_get = hns3_rxq_info_get, 6532 .txq_info_get = hns3_txq_info_get, 6533 .rx_burst_mode_get = hns3_rx_burst_mode_get, 6534 .tx_burst_mode_get = hns3_tx_burst_mode_get, 6535 .flow_ctrl_get = hns3_flow_ctrl_get, 6536 .flow_ctrl_set = hns3_flow_ctrl_set, 6537 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 6538 .mac_addr_add = hns3_add_mac_addr, 6539 .mac_addr_remove = hns3_remove_mac_addr, 6540 .mac_addr_set = hns3_set_default_mac_addr, 6541 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 6542 .link_update = hns3_dev_link_update, 6543 .dev_set_link_up = hns3_dev_set_link_up, 6544 .dev_set_link_down = hns3_dev_set_link_down, 6545 .rss_hash_update = hns3_dev_rss_hash_update, 6546 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 6547 .reta_update = hns3_dev_rss_reta_update, 6548 .reta_query = hns3_dev_rss_reta_query, 6549 .flow_ops_get = hns3_dev_flow_ops_get, 6550 .vlan_filter_set = hns3_vlan_filter_set, 6551 .vlan_tpid_set = hns3_vlan_tpid_set, 6552 .vlan_offload_set = hns3_vlan_offload_set, 6553 .vlan_pvid_set = hns3_vlan_pvid_set, 6554 .get_reg = hns3_get_regs, 6555 .get_module_info = hns3_get_module_info, 6556 .get_module_eeprom = hns3_get_module_eeprom, 6557 .get_dcb_info = hns3_get_dcb_info, 6558 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 6559 .fec_get_capability = hns3_fec_get_capability, 6560 .fec_get = hns3_fec_get, 6561 .fec_set = hns3_fec_set, 6562 .tm_ops_get = hns3_tm_ops_get, 6563 .tx_done_cleanup = hns3_tx_done_cleanup, 6564 .timesync_enable = hns3_timesync_enable, 6565 .timesync_disable = hns3_timesync_disable, 6566 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 6567 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 6568 .timesync_adjust_time = hns3_timesync_adjust_time, 6569 .timesync_read_time = hns3_timesync_read_time, 6570 .timesync_write_time = hns3_timesync_write_time, 6571 .eth_dev_priv_dump = hns3_eth_dev_priv_dump, 6572 }; 6573 6574 static const struct hns3_reset_ops hns3_reset_ops = { 6575 .reset_service = hns3_reset_service, 6576 .stop_service = hns3_stop_service, 6577 .prepare_reset = hns3_prepare_reset, 6578 .wait_hardware_ready = hns3_wait_hardware_ready, 6579 .reinit_dev = hns3_reinit_dev, 6580 .restore_conf = hns3_restore_conf, 6581 .start_service = hns3_start_service, 6582 }; 6583 6584 static void 6585 hns3_init_hw_ops(struct hns3_hw *hw) 6586 { 6587 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr; 6588 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; 6589 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; 6590 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; 6591 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector; 6592 } 6593 6594 static int 6595 hns3_dev_init(struct rte_eth_dev *eth_dev) 6596 { 6597 struct hns3_adapter *hns = eth_dev->data->dev_private; 6598 struct hns3_hw *hw = &hns->hw; 6599 int ret; 6600 6601 PMD_INIT_FUNC_TRACE(); 6602 6603 hns3_flow_init(eth_dev); 6604 6605 hns3_set_rxtx_function(eth_dev); 6606 eth_dev->dev_ops = &hns3_eth_dev_ops; 6607 eth_dev->rx_queue_count = hns3_rx_queue_count; 6608 ret = hns3_mp_init(eth_dev); 6609 if (ret) 6610 goto err_mp_init; 6611 6612 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6613 hns3_tx_push_init(eth_dev); 6614 return 0; 6615 } 6616 6617 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 6618 hns->is_vf = false; 6619 hw->data = eth_dev->data; 6620 hns3_parse_devargs(eth_dev); 6621 6622 /* 6623 * Set default max packet size according to the mtu 6624 * default vale in DPDK frame. 6625 */ 6626 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 6627 6628 ret = hns3_reset_init(hw); 6629 if (ret) 6630 goto err_init_reset; 6631 hw->reset.ops = &hns3_reset_ops; 6632 6633 hns3_init_hw_ops(hw); 6634 ret = hns3_init_pf(eth_dev); 6635 if (ret) { 6636 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 6637 goto err_init_pf; 6638 } 6639 6640 ret = hns3_init_mac_addrs(eth_dev); 6641 if (ret != 0) 6642 goto err_init_mac_addrs; 6643 6644 hw->adapter_state = HNS3_NIC_INITIALIZED; 6645 6646 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6647 SCHEDULE_PENDING) { 6648 hns3_err(hw, "Reschedule reset service after dev_init"); 6649 hns3_schedule_reset(hns); 6650 } else { 6651 /* IMP will wait ready flag before reset */ 6652 hns3_notify_reset_ready(hw, false); 6653 } 6654 6655 hns3_info(hw, "hns3 dev initialization successful!"); 6656 return 0; 6657 6658 err_init_mac_addrs: 6659 hns3_uninit_pf(eth_dev); 6660 6661 err_init_pf: 6662 rte_free(hw->reset.wait_data); 6663 6664 err_init_reset: 6665 hns3_mp_uninit(eth_dev); 6666 6667 err_mp_init: 6668 eth_dev->dev_ops = NULL; 6669 eth_dev->rx_pkt_burst = NULL; 6670 eth_dev->rx_descriptor_status = NULL; 6671 eth_dev->tx_pkt_burst = NULL; 6672 eth_dev->tx_pkt_prepare = NULL; 6673 eth_dev->tx_descriptor_status = NULL; 6674 return ret; 6675 } 6676 6677 static int 6678 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 6679 { 6680 struct hns3_adapter *hns = eth_dev->data->dev_private; 6681 struct hns3_hw *hw = &hns->hw; 6682 6683 PMD_INIT_FUNC_TRACE(); 6684 6685 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6686 hns3_mp_uninit(eth_dev); 6687 return 0; 6688 } 6689 6690 if (hw->adapter_state < HNS3_NIC_CLOSING) 6691 hns3_dev_close(eth_dev); 6692 6693 hw->adapter_state = HNS3_NIC_REMOVED; 6694 return 0; 6695 } 6696 6697 static int 6698 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6699 struct rte_pci_device *pci_dev) 6700 { 6701 return rte_eth_dev_pci_generic_probe(pci_dev, 6702 sizeof(struct hns3_adapter), 6703 hns3_dev_init); 6704 } 6705 6706 static int 6707 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 6708 { 6709 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 6710 } 6711 6712 static const struct rte_pci_id pci_id_hns3_map[] = { 6713 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 6714 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 6715 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 6716 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 6717 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 6718 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 6719 { .vendor_id = 0, }, /* sentinel */ 6720 }; 6721 6722 static struct rte_pci_driver rte_hns3_pmd = { 6723 .id_table = pci_id_hns3_map, 6724 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 6725 .probe = eth_hns3_pci_probe, 6726 .remove = eth_hns3_pci_remove, 6727 }; 6728 6729 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 6730 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 6731 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 6732 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 6733 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 6734 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 6735 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " 6736 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> "); 6737 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 6738 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 6739