1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_mac_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 107 108 void hns3_ether_format_addr(char *buf, uint16_t size, 109 const struct rte_ether_addr *ether_addr) 110 { 111 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 112 ether_addr->addr_bytes[0], 113 ether_addr->addr_bytes[4], 114 ether_addr->addr_bytes[5]); 115 } 116 117 static void 118 hns3_pf_disable_irq0(struct hns3_hw *hw) 119 { 120 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 121 } 122 123 static void 124 hns3_pf_enable_irq0(struct hns3_hw *hw) 125 { 126 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 127 } 128 129 static enum hns3_evt_cause 130 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 131 uint32_t *vec_val) 132 { 133 struct hns3_hw *hw = &hns->hw; 134 135 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 136 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 137 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 138 if (!is_delay) { 139 hw->reset.stats.imp_cnt++; 140 hns3_warn(hw, "IMP reset detected, clear reset status"); 141 } else { 142 hns3_schedule_delayed_reset(hns); 143 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 144 } 145 146 return HNS3_VECTOR0_EVENT_RST; 147 } 148 149 static enum hns3_evt_cause 150 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 151 uint32_t *vec_val) 152 { 153 struct hns3_hw *hw = &hns->hw; 154 155 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 156 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 157 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 158 if (!is_delay) { 159 hw->reset.stats.global_cnt++; 160 hns3_warn(hw, "Global reset detected, clear reset status"); 161 } else { 162 hns3_schedule_delayed_reset(hns); 163 hns3_warn(hw, 164 "Global reset detected, don't clear reset status"); 165 } 166 167 return HNS3_VECTOR0_EVENT_RST; 168 } 169 170 static enum hns3_evt_cause 171 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 172 { 173 struct hns3_hw *hw = &hns->hw; 174 uint32_t vector0_int_stats; 175 uint32_t cmdq_src_val; 176 uint32_t hw_err_src_reg; 177 uint32_t val; 178 enum hns3_evt_cause ret; 179 bool is_delay; 180 181 /* fetch the events from their corresponding regs */ 182 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 183 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 184 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 185 186 is_delay = clearval == NULL ? true : false; 187 /* 188 * Assumption: If by any chance reset and mailbox events are reported 189 * together then we will only process reset event and defer the 190 * processing of the mailbox events. Since, we would have not cleared 191 * RX CMDQ event this time we would receive again another interrupt 192 * from H/W just for the mailbox. 193 */ 194 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 195 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 196 goto out; 197 } 198 199 /* Global reset */ 200 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 201 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 202 goto out; 203 } 204 205 /* Check for vector0 1588 event source */ 206 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 207 val = BIT(HNS3_VECTOR0_1588_INT_B); 208 ret = HNS3_VECTOR0_EVENT_PTP; 209 goto out; 210 } 211 212 /* check for vector0 msix event source */ 213 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 214 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 215 val = vector0_int_stats | hw_err_src_reg; 216 ret = HNS3_VECTOR0_EVENT_ERR; 217 goto out; 218 } 219 220 /* check for vector0 mailbox(=CMDQ RX) event source */ 221 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 222 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 223 val = cmdq_src_val; 224 ret = HNS3_VECTOR0_EVENT_MBX; 225 goto out; 226 } 227 228 val = vector0_int_stats; 229 ret = HNS3_VECTOR0_EVENT_OTHER; 230 out: 231 232 if (clearval) 233 *clearval = val; 234 return ret; 235 } 236 237 static bool 238 hns3_is_1588_event_type(uint32_t event_type) 239 { 240 return (event_type == HNS3_VECTOR0_EVENT_PTP); 241 } 242 243 static void 244 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 245 { 246 if (event_type == HNS3_VECTOR0_EVENT_RST || 247 hns3_is_1588_event_type(event_type)) 248 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 249 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 250 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 251 } 252 253 static void 254 hns3_clear_all_event_cause(struct hns3_hw *hw) 255 { 256 uint32_t vector0_int_stats; 257 258 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 259 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 260 hns3_warn(hw, "Probe during IMP reset interrupt"); 261 262 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 263 hns3_warn(hw, "Probe during Global reset interrupt"); 264 265 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 266 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 267 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 268 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 269 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 270 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 271 BIT(HNS3_VECTOR0_1588_INT_B)); 272 } 273 274 static void 275 hns3_handle_mac_tnl(struct hns3_hw *hw) 276 { 277 struct hns3_cmd_desc desc; 278 uint32_t status; 279 int ret; 280 281 /* query and clear mac tnl interrupt */ 282 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 283 ret = hns3_cmd_send(hw, &desc, 1); 284 if (ret) { 285 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 286 return; 287 } 288 289 status = rte_le_to_cpu_32(desc.data[0]); 290 if (status) { 291 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 292 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 293 false); 294 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 295 ret = hns3_cmd_send(hw, &desc, 1); 296 if (ret) 297 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 298 ret); 299 } 300 } 301 302 static void 303 hns3_interrupt_handler(void *param) 304 { 305 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 306 struct hns3_adapter *hns = dev->data->dev_private; 307 struct hns3_hw *hw = &hns->hw; 308 enum hns3_evt_cause event_cause; 309 uint32_t clearval = 0; 310 uint32_t vector0_int; 311 uint32_t ras_int; 312 uint32_t cmdq_int; 313 314 /* Disable interrupt */ 315 hns3_pf_disable_irq0(hw); 316 317 event_cause = hns3_check_event_cause(hns, &clearval); 318 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 319 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 320 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 321 hns3_clear_event_cause(hw, event_cause, clearval); 322 /* vector 0 interrupt is shared with reset and mailbox source events. */ 323 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 324 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 325 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 326 vector0_int, ras_int, cmdq_int); 327 hns3_handle_mac_tnl(hw); 328 hns3_handle_error(hns); 329 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 330 hns3_warn(hw, "received reset interrupt"); 331 hns3_schedule_reset(hns); 332 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 333 hns3_dev_handle_mbx_msg(hw); 334 } else { 335 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 336 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 337 vector0_int, ras_int, cmdq_int); 338 } 339 340 /* Enable interrupt if it is not cause by reset */ 341 hns3_pf_enable_irq0(hw); 342 } 343 344 static int 345 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 346 { 347 #define HNS3_VLAN_ID_OFFSET_STEP 160 348 #define HNS3_VLAN_BYTE_SIZE 8 349 struct hns3_vlan_filter_pf_cfg_cmd *req; 350 struct hns3_hw *hw = &hns->hw; 351 uint8_t vlan_offset_byte_val; 352 struct hns3_cmd_desc desc; 353 uint8_t vlan_offset_byte; 354 uint8_t vlan_offset_base; 355 int ret; 356 357 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 358 359 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 360 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 361 HNS3_VLAN_BYTE_SIZE; 362 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 363 364 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 365 req->vlan_offset = vlan_offset_base; 366 req->vlan_cfg = on ? 0 : 1; 367 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 368 369 ret = hns3_cmd_send(hw, &desc, 1); 370 if (ret) 371 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 372 vlan_id, ret); 373 374 return ret; 375 } 376 377 static void 378 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 379 { 380 struct hns3_user_vlan_table *vlan_entry; 381 struct hns3_pf *pf = &hns->pf; 382 383 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 384 if (vlan_entry->vlan_id == vlan_id) { 385 if (vlan_entry->hd_tbl_status) 386 hns3_set_port_vlan_filter(hns, vlan_id, 0); 387 LIST_REMOVE(vlan_entry, next); 388 rte_free(vlan_entry); 389 break; 390 } 391 } 392 } 393 394 static void 395 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 396 bool writen_to_tbl) 397 { 398 struct hns3_user_vlan_table *vlan_entry; 399 struct hns3_hw *hw = &hns->hw; 400 struct hns3_pf *pf = &hns->pf; 401 402 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 403 if (vlan_entry->vlan_id == vlan_id) 404 return; 405 } 406 407 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 408 if (vlan_entry == NULL) { 409 hns3_err(hw, "Failed to malloc hns3 vlan table"); 410 return; 411 } 412 413 vlan_entry->hd_tbl_status = writen_to_tbl; 414 vlan_entry->vlan_id = vlan_id; 415 416 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 417 } 418 419 static int 420 hns3_restore_vlan_table(struct hns3_adapter *hns) 421 { 422 struct hns3_user_vlan_table *vlan_entry; 423 struct hns3_hw *hw = &hns->hw; 424 struct hns3_pf *pf = &hns->pf; 425 uint16_t vlan_id; 426 int ret = 0; 427 428 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 429 return hns3_vlan_pvid_configure(hns, 430 hw->port_base_vlan_cfg.pvid, 1); 431 432 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 433 if (vlan_entry->hd_tbl_status) { 434 vlan_id = vlan_entry->vlan_id; 435 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 436 if (ret) 437 break; 438 } 439 } 440 441 return ret; 442 } 443 444 static int 445 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 446 { 447 struct hns3_hw *hw = &hns->hw; 448 bool writen_to_tbl = false; 449 int ret = 0; 450 451 /* 452 * When vlan filter is enabled, hardware regards packets without vlan 453 * as packets with vlan 0. So, to receive packets without vlan, vlan id 454 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 455 */ 456 if (on == 0 && vlan_id == 0) 457 return 0; 458 459 /* 460 * When port base vlan enabled, we use port base vlan as the vlan 461 * filter condition. In this case, we don't update vlan filter table 462 * when user add new vlan or remove exist vlan, just update the 463 * vlan list. The vlan id in vlan list will be written in vlan filter 464 * table until port base vlan disabled 465 */ 466 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 467 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 468 writen_to_tbl = true; 469 } 470 471 if (ret == 0) { 472 if (on) 473 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 474 else 475 hns3_rm_dev_vlan_table(hns, vlan_id); 476 } 477 return ret; 478 } 479 480 static int 481 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 482 { 483 struct hns3_adapter *hns = dev->data->dev_private; 484 struct hns3_hw *hw = &hns->hw; 485 int ret; 486 487 rte_spinlock_lock(&hw->lock); 488 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 489 rte_spinlock_unlock(&hw->lock); 490 return ret; 491 } 492 493 static int 494 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 495 uint16_t tpid) 496 { 497 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 498 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 499 struct hns3_hw *hw = &hns->hw; 500 struct hns3_cmd_desc desc; 501 int ret; 502 503 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && 504 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) { 505 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 506 return -EINVAL; 507 } 508 509 if (tpid != RTE_ETHER_TYPE_VLAN) { 510 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 511 return -EINVAL; 512 } 513 514 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 515 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 516 517 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 518 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 519 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 520 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 521 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 522 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 524 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 525 } 526 527 ret = hns3_cmd_send(hw, &desc, 1); 528 if (ret) { 529 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 530 ret); 531 return ret; 532 } 533 534 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 535 536 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 537 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 538 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 539 540 ret = hns3_cmd_send(hw, &desc, 1); 541 if (ret) 542 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 543 ret); 544 return ret; 545 } 546 547 static int 548 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 549 uint16_t tpid) 550 { 551 struct hns3_adapter *hns = dev->data->dev_private; 552 struct hns3_hw *hw = &hns->hw; 553 int ret; 554 555 rte_spinlock_lock(&hw->lock); 556 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 557 rte_spinlock_unlock(&hw->lock); 558 return ret; 559 } 560 561 static int 562 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 563 struct hns3_rx_vtag_cfg *vcfg) 564 { 565 struct hns3_vport_vtag_rx_cfg_cmd *req; 566 struct hns3_hw *hw = &hns->hw; 567 struct hns3_cmd_desc desc; 568 uint16_t vport_id; 569 uint8_t bitmap; 570 int ret; 571 572 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 573 574 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 575 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 576 vcfg->strip_tag1_en ? 1 : 0); 577 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 578 vcfg->strip_tag2_en ? 1 : 0); 579 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 580 vcfg->vlan1_vlan_prionly ? 1 : 0); 581 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 582 vcfg->vlan2_vlan_prionly ? 1 : 0); 583 584 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 585 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 586 vcfg->strip_tag1_discard_en ? 1 : 0); 587 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 588 vcfg->strip_tag2_discard_en ? 1 : 0); 589 /* 590 * In current version VF is not supported when PF is driven by DPDK 591 * driver, just need to configure parameters for PF vport. 592 */ 593 vport_id = HNS3_PF_FUNC_ID; 594 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 595 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 596 req->vf_bitmap[req->vf_offset] = bitmap; 597 598 ret = hns3_cmd_send(hw, &desc, 1); 599 if (ret) 600 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 601 return ret; 602 } 603 604 static void 605 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 606 struct hns3_rx_vtag_cfg *vcfg) 607 { 608 struct hns3_pf *pf = &hns->pf; 609 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 610 } 611 612 static void 613 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 614 struct hns3_tx_vtag_cfg *vcfg) 615 { 616 struct hns3_pf *pf = &hns->pf; 617 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 618 } 619 620 static int 621 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 622 { 623 struct hns3_rx_vtag_cfg rxvlan_cfg; 624 struct hns3_hw *hw = &hns->hw; 625 int ret; 626 627 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 628 rxvlan_cfg.strip_tag1_en = false; 629 rxvlan_cfg.strip_tag2_en = enable; 630 rxvlan_cfg.strip_tag2_discard_en = false; 631 } else { 632 rxvlan_cfg.strip_tag1_en = enable; 633 rxvlan_cfg.strip_tag2_en = true; 634 rxvlan_cfg.strip_tag2_discard_en = true; 635 } 636 637 rxvlan_cfg.strip_tag1_discard_en = false; 638 rxvlan_cfg.vlan1_vlan_prionly = false; 639 rxvlan_cfg.vlan2_vlan_prionly = false; 640 rxvlan_cfg.rx_vlan_offload_en = enable; 641 642 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 643 if (ret) { 644 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 645 enable ? "enable" : "disable", ret); 646 return ret; 647 } 648 649 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 650 651 return ret; 652 } 653 654 static int 655 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 656 uint8_t fe_type, bool filter_en, uint8_t vf_id) 657 { 658 struct hns3_vlan_filter_ctrl_cmd *req; 659 struct hns3_cmd_desc desc; 660 int ret; 661 662 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 663 664 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 665 req->vlan_type = vlan_type; 666 req->vlan_fe = filter_en ? fe_type : 0; 667 req->vf_id = vf_id; 668 669 ret = hns3_cmd_send(hw, &desc, 1); 670 if (ret) 671 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 672 673 return ret; 674 } 675 676 static int 677 hns3_vlan_filter_init(struct hns3_adapter *hns) 678 { 679 struct hns3_hw *hw = &hns->hw; 680 int ret; 681 682 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 683 HNS3_FILTER_FE_EGRESS, false, 684 HNS3_PF_FUNC_ID); 685 if (ret) { 686 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 687 return ret; 688 } 689 690 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 691 HNS3_FILTER_FE_INGRESS, false, 692 HNS3_PF_FUNC_ID); 693 if (ret) 694 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 695 696 return ret; 697 } 698 699 static int 700 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 701 { 702 struct hns3_hw *hw = &hns->hw; 703 int ret; 704 705 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 706 HNS3_FILTER_FE_INGRESS, enable, 707 HNS3_PF_FUNC_ID); 708 if (ret) 709 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 710 enable ? "enable" : "disable", ret); 711 712 return ret; 713 } 714 715 static int 716 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 717 { 718 struct hns3_adapter *hns = dev->data->dev_private; 719 struct hns3_hw *hw = &hns->hw; 720 struct rte_eth_rxmode *rxmode; 721 unsigned int tmp_mask; 722 bool enable; 723 int ret = 0; 724 725 rte_spinlock_lock(&hw->lock); 726 rxmode = &dev->data->dev_conf.rxmode; 727 tmp_mask = (unsigned int)mask; 728 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { 729 /* ignore vlan filter configuration during promiscuous mode */ 730 if (!dev->data->promiscuous) { 731 /* Enable or disable VLAN filter */ 732 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? 733 true : false; 734 735 ret = hns3_enable_vlan_filter(hns, enable); 736 if (ret) { 737 rte_spinlock_unlock(&hw->lock); 738 hns3_err(hw, "failed to %s rx filter, ret = %d", 739 enable ? "enable" : "disable", ret); 740 return ret; 741 } 742 } 743 } 744 745 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { 746 /* Enable or disable VLAN stripping */ 747 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? 748 true : false; 749 750 ret = hns3_en_hw_strip_rxvtag(hns, enable); 751 if (ret) { 752 rte_spinlock_unlock(&hw->lock); 753 hns3_err(hw, "failed to %s rx strip, ret = %d", 754 enable ? "enable" : "disable", ret); 755 return ret; 756 } 757 } 758 759 rte_spinlock_unlock(&hw->lock); 760 761 return ret; 762 } 763 764 static int 765 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 766 struct hns3_tx_vtag_cfg *vcfg) 767 { 768 struct hns3_vport_vtag_tx_cfg_cmd *req; 769 struct hns3_cmd_desc desc; 770 struct hns3_hw *hw = &hns->hw; 771 uint16_t vport_id; 772 uint8_t bitmap; 773 int ret; 774 775 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 776 777 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 778 req->def_vlan_tag1 = vcfg->default_tag1; 779 req->def_vlan_tag2 = vcfg->default_tag2; 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 781 vcfg->accept_tag1 ? 1 : 0); 782 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 783 vcfg->accept_untag1 ? 1 : 0); 784 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 785 vcfg->accept_tag2 ? 1 : 0); 786 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 787 vcfg->accept_untag2 ? 1 : 0); 788 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 789 vcfg->insert_tag1_en ? 1 : 0); 790 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 791 vcfg->insert_tag2_en ? 1 : 0); 792 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 793 794 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 795 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 796 vcfg->tag_shift_mode_en ? 1 : 0); 797 798 /* 799 * In current version VF is not supported when PF is driven by DPDK 800 * driver, just need to configure parameters for PF vport. 801 */ 802 vport_id = HNS3_PF_FUNC_ID; 803 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 804 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 805 req->vf_bitmap[req->vf_offset] = bitmap; 806 807 ret = hns3_cmd_send(hw, &desc, 1); 808 if (ret) 809 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 810 811 return ret; 812 } 813 814 static int 815 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 816 uint16_t pvid) 817 { 818 struct hns3_hw *hw = &hns->hw; 819 struct hns3_tx_vtag_cfg txvlan_cfg; 820 int ret; 821 822 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 823 txvlan_cfg.accept_tag1 = true; 824 txvlan_cfg.insert_tag1_en = false; 825 txvlan_cfg.default_tag1 = 0; 826 } else { 827 txvlan_cfg.accept_tag1 = 828 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 829 txvlan_cfg.insert_tag1_en = true; 830 txvlan_cfg.default_tag1 = pvid; 831 } 832 833 txvlan_cfg.accept_untag1 = true; 834 txvlan_cfg.accept_tag2 = true; 835 txvlan_cfg.accept_untag2 = true; 836 txvlan_cfg.insert_tag2_en = false; 837 txvlan_cfg.default_tag2 = 0; 838 txvlan_cfg.tag_shift_mode_en = true; 839 840 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 841 if (ret) { 842 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 843 ret); 844 return ret; 845 } 846 847 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 848 return ret; 849 } 850 851 852 static void 853 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 854 { 855 struct hns3_user_vlan_table *vlan_entry; 856 struct hns3_pf *pf = &hns->pf; 857 858 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 859 if (vlan_entry->hd_tbl_status) { 860 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 861 vlan_entry->hd_tbl_status = false; 862 } 863 } 864 865 if (is_del_list) { 866 vlan_entry = LIST_FIRST(&pf->vlan_list); 867 while (vlan_entry) { 868 LIST_REMOVE(vlan_entry, next); 869 rte_free(vlan_entry); 870 vlan_entry = LIST_FIRST(&pf->vlan_list); 871 } 872 } 873 } 874 875 static void 876 hns3_add_all_vlan_table(struct hns3_adapter *hns) 877 { 878 struct hns3_user_vlan_table *vlan_entry; 879 struct hns3_pf *pf = &hns->pf; 880 881 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 882 if (!vlan_entry->hd_tbl_status) { 883 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 884 vlan_entry->hd_tbl_status = true; 885 } 886 } 887 } 888 889 static void 890 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 891 { 892 struct hns3_hw *hw = &hns->hw; 893 int ret; 894 895 hns3_rm_all_vlan_table(hns, true); 896 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 897 ret = hns3_set_port_vlan_filter(hns, 898 hw->port_base_vlan_cfg.pvid, 0); 899 if (ret) { 900 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 901 ret); 902 return; 903 } 904 } 905 } 906 907 static int 908 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 909 uint16_t port_base_vlan_state, uint16_t new_pvid) 910 { 911 struct hns3_hw *hw = &hns->hw; 912 uint16_t old_pvid; 913 int ret; 914 915 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 916 old_pvid = hw->port_base_vlan_cfg.pvid; 917 if (old_pvid != HNS3_INVALID_PVID) { 918 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 919 if (ret) { 920 hns3_err(hw, "failed to remove old pvid %u, " 921 "ret = %d", old_pvid, ret); 922 return ret; 923 } 924 } 925 926 hns3_rm_all_vlan_table(hns, false); 927 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 928 if (ret) { 929 hns3_err(hw, "failed to add new pvid %u, ret = %d", 930 new_pvid, ret); 931 return ret; 932 } 933 } else { 934 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 935 if (ret) { 936 hns3_err(hw, "failed to remove pvid %u, ret = %d", 937 new_pvid, ret); 938 return ret; 939 } 940 941 hns3_add_all_vlan_table(hns); 942 } 943 return 0; 944 } 945 946 static int 947 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 948 { 949 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 950 struct hns3_rx_vtag_cfg rx_vlan_cfg; 951 bool rx_strip_en; 952 int ret; 953 954 rx_strip_en = old_cfg->rx_vlan_offload_en; 955 if (on) { 956 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 957 rx_vlan_cfg.strip_tag2_en = true; 958 rx_vlan_cfg.strip_tag2_discard_en = true; 959 } else { 960 rx_vlan_cfg.strip_tag1_en = false; 961 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 962 rx_vlan_cfg.strip_tag2_discard_en = false; 963 } 964 rx_vlan_cfg.strip_tag1_discard_en = false; 965 rx_vlan_cfg.vlan1_vlan_prionly = false; 966 rx_vlan_cfg.vlan2_vlan_prionly = false; 967 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 968 969 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 970 if (ret) 971 return ret; 972 973 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 974 return ret; 975 } 976 977 static int 978 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 979 { 980 struct hns3_hw *hw = &hns->hw; 981 uint16_t port_base_vlan_state; 982 int ret, err; 983 984 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 985 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 986 hns3_warn(hw, "Invalid operation! As current pvid set " 987 "is %u, disable pvid %u is invalid", 988 hw->port_base_vlan_cfg.pvid, pvid); 989 return 0; 990 } 991 992 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 993 HNS3_PORT_BASE_VLAN_DISABLE; 994 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 995 if (ret) { 996 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 997 ret); 998 return ret; 999 } 1000 1001 ret = hns3_en_pvid_strip(hns, on); 1002 if (ret) { 1003 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1004 "ret = %d", ret); 1005 goto pvid_vlan_strip_fail; 1006 } 1007 1008 if (pvid == HNS3_INVALID_PVID) 1009 goto out; 1010 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1011 if (ret) { 1012 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1013 ret); 1014 goto vlan_filter_set_fail; 1015 } 1016 1017 out: 1018 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1019 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1020 return ret; 1021 1022 vlan_filter_set_fail: 1023 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1024 HNS3_PORT_BASE_VLAN_ENABLE); 1025 if (err) 1026 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1027 1028 pvid_vlan_strip_fail: 1029 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1030 hw->port_base_vlan_cfg.pvid); 1031 if (err) 1032 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1033 1034 return ret; 1035 } 1036 1037 static int 1038 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1039 { 1040 struct hns3_adapter *hns = dev->data->dev_private; 1041 struct hns3_hw *hw = &hns->hw; 1042 bool pvid_en_state_change; 1043 uint16_t pvid_state; 1044 int ret; 1045 1046 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1047 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1048 RTE_ETHER_MAX_VLAN_ID); 1049 return -EINVAL; 1050 } 1051 1052 /* 1053 * If PVID configuration state change, should refresh the PVID 1054 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1055 */ 1056 pvid_state = hw->port_base_vlan_cfg.state; 1057 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1058 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1059 pvid_en_state_change = false; 1060 else 1061 pvid_en_state_change = true; 1062 1063 rte_spinlock_lock(&hw->lock); 1064 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1065 rte_spinlock_unlock(&hw->lock); 1066 if (ret) 1067 return ret; 1068 /* 1069 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1070 * need be processed by PMD driver. 1071 */ 1072 if (pvid_en_state_change && 1073 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1074 hns3_update_all_queues_pvid_proc_en(hw); 1075 1076 return 0; 1077 } 1078 1079 static int 1080 hns3_default_vlan_config(struct hns3_adapter *hns) 1081 { 1082 struct hns3_hw *hw = &hns->hw; 1083 int ret; 1084 1085 /* 1086 * When vlan filter is enabled, hardware regards packets without vlan 1087 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1088 * table, packets without vlan won't be received. So, add vlan 0 as 1089 * the default vlan. 1090 */ 1091 ret = hns3_vlan_filter_configure(hns, 0, 1); 1092 if (ret) 1093 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1094 return ret; 1095 } 1096 1097 static int 1098 hns3_init_vlan_config(struct hns3_adapter *hns) 1099 { 1100 struct hns3_hw *hw = &hns->hw; 1101 int ret; 1102 1103 /* 1104 * This function can be called in the initialization and reset process, 1105 * when in reset process, it means that hardware had been reseted 1106 * successfully and we need to restore the hardware configuration to 1107 * ensure that the hardware configuration remains unchanged before and 1108 * after reset. 1109 */ 1110 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1111 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1112 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1113 } 1114 1115 ret = hns3_vlan_filter_init(hns); 1116 if (ret) { 1117 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1118 return ret; 1119 } 1120 1121 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER, 1122 RTE_ETHER_TYPE_VLAN); 1123 if (ret) { 1124 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1125 return ret; 1126 } 1127 1128 /* 1129 * When in the reinit dev stage of the reset process, the following 1130 * vlan-related configurations may differ from those at initialization, 1131 * we will restore configurations to hardware in hns3_restore_vlan_table 1132 * and hns3_restore_vlan_conf later. 1133 */ 1134 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1135 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1136 if (ret) { 1137 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 ret = hns3_en_hw_strip_rxvtag(hns, false); 1142 if (ret) { 1143 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1144 ret); 1145 return ret; 1146 } 1147 } 1148 1149 return hns3_default_vlan_config(hns); 1150 } 1151 1152 static int 1153 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1154 { 1155 struct hns3_pf *pf = &hns->pf; 1156 struct hns3_hw *hw = &hns->hw; 1157 uint64_t offloads; 1158 bool enable; 1159 int ret; 1160 1161 if (!hw->data->promiscuous) { 1162 /* restore vlan filter states */ 1163 offloads = hw->data->dev_conf.rxmode.offloads; 1164 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false; 1165 ret = hns3_enable_vlan_filter(hns, enable); 1166 if (ret) { 1167 hns3_err(hw, "failed to restore vlan rx filter conf, " 1168 "ret = %d", ret); 1169 return ret; 1170 } 1171 } 1172 1173 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1174 if (ret) { 1175 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1176 return ret; 1177 } 1178 1179 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1180 if (ret) 1181 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1182 1183 return ret; 1184 } 1185 1186 static int 1187 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1188 { 1189 struct hns3_adapter *hns = dev->data->dev_private; 1190 struct rte_eth_dev_data *data = dev->data; 1191 struct rte_eth_txmode *txmode; 1192 struct hns3_hw *hw = &hns->hw; 1193 int mask; 1194 int ret; 1195 1196 txmode = &data->dev_conf.txmode; 1197 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1198 hns3_warn(hw, 1199 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1200 "configuration is not supported! Ignore these two " 1201 "parameters: hw_vlan_reject_tagged(%u), " 1202 "hw_vlan_reject_untagged(%u)", 1203 txmode->hw_vlan_reject_tagged, 1204 txmode->hw_vlan_reject_untagged); 1205 1206 /* Apply vlan offload setting */ 1207 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK; 1208 ret = hns3_vlan_offload_set(dev, mask); 1209 if (ret) { 1210 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1211 ret); 1212 return ret; 1213 } 1214 1215 /* 1216 * If pvid config is not set in rte_eth_conf, driver needn't to set 1217 * VLAN pvid related configuration to hardware. 1218 */ 1219 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1220 return 0; 1221 1222 /* Apply pvid setting */ 1223 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1224 txmode->hw_vlan_insert_pvid); 1225 if (ret) 1226 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1227 txmode->pvid, ret); 1228 1229 return ret; 1230 } 1231 1232 static int 1233 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1234 unsigned int tso_mss_max) 1235 { 1236 struct hns3_cfg_tso_status_cmd *req; 1237 struct hns3_cmd_desc desc; 1238 uint16_t tso_mss; 1239 1240 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1241 1242 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1243 1244 tso_mss = 0; 1245 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1246 tso_mss_min); 1247 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1248 1249 tso_mss = 0; 1250 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1251 tso_mss_max); 1252 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1253 1254 return hns3_cmd_send(hw, &desc, 1); 1255 } 1256 1257 static int 1258 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1259 uint16_t *allocated_size, bool is_alloc) 1260 { 1261 struct hns3_umv_spc_alc_cmd *req; 1262 struct hns3_cmd_desc desc; 1263 int ret; 1264 1265 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1267 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1268 req->space_size = rte_cpu_to_le_32(space_size); 1269 1270 ret = hns3_cmd_send(hw, &desc, 1); 1271 if (ret) { 1272 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1273 is_alloc ? "allocate" : "free", ret); 1274 return ret; 1275 } 1276 1277 if (is_alloc && allocated_size) 1278 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1279 1280 return 0; 1281 } 1282 1283 static int 1284 hns3_init_umv_space(struct hns3_hw *hw) 1285 { 1286 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1287 struct hns3_pf *pf = &hns->pf; 1288 uint16_t allocated_size = 0; 1289 int ret; 1290 1291 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1292 true); 1293 if (ret) 1294 return ret; 1295 1296 if (allocated_size < pf->wanted_umv_size) 1297 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1298 pf->wanted_umv_size, allocated_size); 1299 1300 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1301 pf->wanted_umv_size; 1302 pf->used_umv_size = 0; 1303 return 0; 1304 } 1305 1306 static int 1307 hns3_uninit_umv_space(struct hns3_hw *hw) 1308 { 1309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1310 struct hns3_pf *pf = &hns->pf; 1311 int ret; 1312 1313 if (pf->max_umv_size == 0) 1314 return 0; 1315 1316 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1317 if (ret) 1318 return ret; 1319 1320 pf->max_umv_size = 0; 1321 1322 return 0; 1323 } 1324 1325 static bool 1326 hns3_is_umv_space_full(struct hns3_hw *hw) 1327 { 1328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1329 struct hns3_pf *pf = &hns->pf; 1330 bool is_full; 1331 1332 is_full = (pf->used_umv_size >= pf->max_umv_size); 1333 1334 return is_full; 1335 } 1336 1337 static void 1338 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1339 { 1340 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1341 struct hns3_pf *pf = &hns->pf; 1342 1343 if (is_free) { 1344 if (pf->used_umv_size > 0) 1345 pf->used_umv_size--; 1346 } else 1347 pf->used_umv_size++; 1348 } 1349 1350 static void 1351 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1352 const uint8_t *addr, bool is_mc) 1353 { 1354 const unsigned char *mac_addr = addr; 1355 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1356 ((uint32_t)mac_addr[2] << 16) | 1357 ((uint32_t)mac_addr[1] << 8) | 1358 (uint32_t)mac_addr[0]; 1359 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1360 1361 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1362 if (is_mc) { 1363 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1364 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1365 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1366 } 1367 1368 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1369 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1370 } 1371 1372 static int 1373 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1374 uint8_t resp_code, 1375 enum hns3_mac_vlan_tbl_opcode op) 1376 { 1377 if (cmdq_resp) { 1378 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1379 cmdq_resp); 1380 return -EIO; 1381 } 1382 1383 if (op == HNS3_MAC_VLAN_ADD) { 1384 if (resp_code == 0 || resp_code == 1) { 1385 return 0; 1386 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1387 hns3_err(hw, "add mac addr failed for uc_overflow"); 1388 return -ENOSPC; 1389 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1390 hns3_err(hw, "add mac addr failed for mc_overflow"); 1391 return -ENOSPC; 1392 } 1393 1394 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1395 resp_code); 1396 return -EIO; 1397 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1398 if (resp_code == 0) { 1399 return 0; 1400 } else if (resp_code == 1) { 1401 hns3_dbg(hw, "remove mac addr failed for miss"); 1402 return -ENOENT; 1403 } 1404 1405 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1406 resp_code); 1407 return -EIO; 1408 } else if (op == HNS3_MAC_VLAN_LKUP) { 1409 if (resp_code == 0) { 1410 return 0; 1411 } else if (resp_code == 1) { 1412 hns3_dbg(hw, "lookup mac addr failed for miss"); 1413 return -ENOENT; 1414 } 1415 1416 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1417 resp_code); 1418 return -EIO; 1419 } 1420 1421 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1422 op); 1423 1424 return -EINVAL; 1425 } 1426 1427 static int 1428 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1429 struct hns3_mac_vlan_tbl_entry_cmd *req, 1430 struct hns3_cmd_desc *desc, uint8_t desc_num) 1431 { 1432 uint8_t resp_code; 1433 uint16_t retval; 1434 int ret; 1435 int i; 1436 1437 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { 1438 for (i = 0; i < desc_num - 1; i++) { 1439 hns3_cmd_setup_basic_desc(&desc[i], 1440 HNS3_OPC_MAC_VLAN_ADD, true); 1441 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1442 if (i == 0) 1443 memcpy(desc[i].data, req, 1444 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1445 } 1446 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, 1447 true); 1448 } else { 1449 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, 1450 true); 1451 memcpy(desc[0].data, req, 1452 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1453 } 1454 ret = hns3_cmd_send(hw, desc, desc_num); 1455 if (ret) { 1456 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1457 ret); 1458 return ret; 1459 } 1460 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1461 retval = rte_le_to_cpu_16(desc[0].retval); 1462 1463 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1464 HNS3_MAC_VLAN_LKUP); 1465 } 1466 1467 static int 1468 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1469 struct hns3_mac_vlan_tbl_entry_cmd *req, 1470 struct hns3_cmd_desc *desc, uint8_t desc_num) 1471 { 1472 uint8_t resp_code; 1473 uint16_t retval; 1474 int cfg_status; 1475 int ret; 1476 int i; 1477 1478 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { 1479 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); 1480 memcpy(desc->data, req, 1481 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1482 ret = hns3_cmd_send(hw, desc, desc_num); 1483 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; 1484 retval = rte_le_to_cpu_16(desc->retval); 1485 1486 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1487 HNS3_MAC_VLAN_ADD); 1488 } else { 1489 for (i = 0; i < desc_num; i++) { 1490 hns3_cmd_reuse_desc(&desc[i], false); 1491 if (i == desc_num - 1) 1492 desc[i].flag &= 1493 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1494 else 1495 desc[i].flag |= 1496 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1497 } 1498 memcpy(desc[0].data, req, 1499 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1500 desc[0].retval = 0; 1501 ret = hns3_cmd_send(hw, desc, desc_num); 1502 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1503 retval = rte_le_to_cpu_16(desc[0].retval); 1504 1505 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1506 HNS3_MAC_VLAN_ADD); 1507 } 1508 1509 if (ret) { 1510 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1511 return ret; 1512 } 1513 1514 return cfg_status; 1515 } 1516 1517 static int 1518 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1519 struct hns3_mac_vlan_tbl_entry_cmd *req) 1520 { 1521 struct hns3_cmd_desc desc; 1522 uint8_t resp_code; 1523 uint16_t retval; 1524 int ret; 1525 1526 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1527 1528 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1529 1530 ret = hns3_cmd_send(hw, &desc, 1); 1531 if (ret) { 1532 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1533 return ret; 1534 } 1535 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1536 retval = rte_le_to_cpu_16(desc.retval); 1537 1538 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1539 HNS3_MAC_VLAN_REMOVE); 1540 } 1541 1542 static int 1543 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1544 { 1545 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1546 struct hns3_mac_vlan_tbl_entry_cmd req; 1547 struct hns3_pf *pf = &hns->pf; 1548 struct hns3_cmd_desc desc; 1549 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1550 uint16_t egress_port = 0; 1551 uint8_t vf_id; 1552 int ret; 1553 1554 /* check if mac addr is valid */ 1555 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1556 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1557 mac_addr); 1558 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1559 mac_str); 1560 return -EINVAL; 1561 } 1562 1563 memset(&req, 0, sizeof(req)); 1564 1565 /* 1566 * In current version VF is not supported when PF is driven by DPDK 1567 * driver, just need to configure parameters for PF vport. 1568 */ 1569 vf_id = HNS3_PF_FUNC_ID; 1570 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1571 HNS3_MAC_EPORT_VFID_S, vf_id); 1572 1573 req.egress_port = rte_cpu_to_le_16(egress_port); 1574 1575 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1576 1577 /* 1578 * Lookup the mac address in the mac_vlan table, and add 1579 * it if the entry is inexistent. Repeated unicast entry 1580 * is not allowed in the mac vlan table. 1581 */ 1582 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, 1583 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1584 if (ret == -ENOENT) { 1585 if (!hns3_is_umv_space_full(hw)) { 1586 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, 1587 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1588 if (!ret) 1589 hns3_update_umv_space(hw, false); 1590 return ret; 1591 } 1592 1593 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1594 1595 return -ENOSPC; 1596 } 1597 1598 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1599 1600 /* check if we just hit the duplicate */ 1601 if (ret == 0) { 1602 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1603 return 0; 1604 } 1605 1606 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1607 mac_str); 1608 1609 return ret; 1610 } 1611 1612 static bool 1613 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) 1614 { 1615 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1616 struct rte_ether_addr *addr; 1617 int i; 1618 1619 for (i = 0; i < hw->mc_addrs_num; i++) { 1620 addr = &hw->mc_addrs[i]; 1621 /* Check if there are duplicate addresses in mc_addrs[] */ 1622 if (rte_is_same_ether_addr(addr, mc_addr)) { 1623 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1624 addr); 1625 hns3_err(hw, "failed to add mc mac addr, same addrs" 1626 "(%s) is added by the set_mc_mac_addr_list " 1627 "API", mac_str); 1628 return true; 1629 } 1630 } 1631 1632 return false; 1633 } 1634 1635 int 1636 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1637 __rte_unused uint32_t idx, __rte_unused uint32_t pool) 1638 { 1639 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1640 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1641 int ret; 1642 1643 rte_spinlock_lock(&hw->lock); 1644 1645 /* 1646 * In hns3 network engine adding UC and MC mac address with different 1647 * commands with firmware. We need to determine whether the input 1648 * address is a UC or a MC address to call different commands. 1649 * By the way, it is recommended calling the API function named 1650 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1651 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1652 * may affect the specifications of UC mac addresses. 1653 */ 1654 if (rte_is_multicast_ether_addr(mac_addr)) { 1655 if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { 1656 rte_spinlock_unlock(&hw->lock); 1657 return -EINVAL; 1658 } 1659 ret = hw->ops.add_mc_mac_addr(hw, mac_addr); 1660 } else { 1661 ret = hw->ops.add_uc_mac_addr(hw, mac_addr); 1662 } 1663 rte_spinlock_unlock(&hw->lock); 1664 if (ret) { 1665 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1666 mac_addr); 1667 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1668 ret); 1669 } 1670 1671 return ret; 1672 } 1673 1674 static int 1675 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1676 { 1677 struct hns3_mac_vlan_tbl_entry_cmd req; 1678 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1679 int ret; 1680 1681 /* check if mac addr is valid */ 1682 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1683 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1684 mac_addr); 1685 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1686 mac_str); 1687 return -EINVAL; 1688 } 1689 1690 memset(&req, 0, sizeof(req)); 1691 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1692 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1693 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1694 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1695 return 0; 1696 else if (ret == 0) 1697 hns3_update_umv_space(hw, true); 1698 1699 return ret; 1700 } 1701 1702 void 1703 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1704 { 1705 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1706 /* index will be checked by upper level rte interface */ 1707 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1708 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1709 int ret; 1710 1711 rte_spinlock_lock(&hw->lock); 1712 1713 if (rte_is_multicast_ether_addr(mac_addr)) 1714 ret = hw->ops.del_mc_mac_addr(hw, mac_addr); 1715 else 1716 ret = hw->ops.del_uc_mac_addr(hw, mac_addr); 1717 rte_spinlock_unlock(&hw->lock); 1718 if (ret) { 1719 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1720 mac_addr); 1721 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1722 ret); 1723 } 1724 } 1725 1726 static int 1727 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1728 struct rte_ether_addr *mac_addr) 1729 { 1730 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1731 struct rte_ether_addr *oaddr; 1732 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1733 int ret, ret_val; 1734 1735 rte_spinlock_lock(&hw->lock); 1736 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1737 ret = hw->ops.del_uc_mac_addr(hw, oaddr); 1738 if (ret) { 1739 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1740 oaddr); 1741 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1742 mac_str, ret); 1743 1744 rte_spinlock_unlock(&hw->lock); 1745 return ret; 1746 } 1747 1748 ret = hw->ops.add_uc_mac_addr(hw, mac_addr); 1749 if (ret) { 1750 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1751 mac_addr); 1752 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1753 goto err_add_uc_addr; 1754 } 1755 1756 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1757 if (ret) { 1758 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1759 goto err_pause_addr_cfg; 1760 } 1761 1762 rte_ether_addr_copy(mac_addr, 1763 (struct rte_ether_addr *)hw->mac.mac_addr); 1764 rte_spinlock_unlock(&hw->lock); 1765 1766 return 0; 1767 1768 err_pause_addr_cfg: 1769 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr); 1770 if (ret_val) { 1771 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1772 mac_addr); 1773 hns3_warn(hw, 1774 "Failed to roll back to del setted mac addr(%s): %d", 1775 mac_str, ret_val); 1776 } 1777 1778 err_add_uc_addr: 1779 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr); 1780 if (ret_val) { 1781 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1782 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1783 mac_str, ret_val); 1784 } 1785 rte_spinlock_unlock(&hw->lock); 1786 1787 return ret; 1788 } 1789 1790 int 1791 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1792 { 1793 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1794 struct hns3_hw *hw = &hns->hw; 1795 struct hns3_hw_ops *ops = &hw->ops; 1796 struct rte_ether_addr *addr; 1797 uint16_t mac_addrs_capa; 1798 int ret = 0; 1799 int i; 1800 1801 mac_addrs_capa = 1802 hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM; 1803 for (i = 0; i < mac_addrs_capa; i++) { 1804 addr = &hw->data->mac_addrs[i]; 1805 if (rte_is_zero_ether_addr(addr)) 1806 continue; 1807 if (rte_is_multicast_ether_addr(addr)) 1808 ret = del ? ops->del_mc_mac_addr(hw, addr) : 1809 ops->add_mc_mac_addr(hw, addr); 1810 else 1811 ret = del ? ops->del_uc_mac_addr(hw, addr) : 1812 ops->add_uc_mac_addr(hw, addr); 1813 1814 if (ret) { 1815 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1816 addr); 1817 hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.", 1818 del ? "remove" : "restore", mac_str, i, ret); 1819 } 1820 } 1821 1822 return ret; 1823 } 1824 1825 static void 1826 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1827 { 1828 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1829 uint8_t word_num; 1830 uint8_t bit_num; 1831 1832 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1833 word_num = vfid / 32; 1834 bit_num = vfid % 32; 1835 if (clr) 1836 desc[1].data[word_num] &= 1837 rte_cpu_to_le_32(~(1UL << bit_num)); 1838 else 1839 desc[1].data[word_num] |= 1840 rte_cpu_to_le_32(1UL << bit_num); 1841 } else { 1842 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1843 bit_num = vfid % 32; 1844 if (clr) 1845 desc[2].data[word_num] &= 1846 rte_cpu_to_le_32(~(1UL << bit_num)); 1847 else 1848 desc[2].data[word_num] |= 1849 rte_cpu_to_le_32(1UL << bit_num); 1850 } 1851 } 1852 1853 static int 1854 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1855 { 1856 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; 1857 struct hns3_mac_vlan_tbl_entry_cmd req; 1858 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1859 uint8_t vf_id; 1860 int ret; 1861 1862 /* Check if mac addr is valid */ 1863 if (!rte_is_multicast_ether_addr(mac_addr)) { 1864 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1865 mac_addr); 1866 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1867 mac_str); 1868 return -EINVAL; 1869 } 1870 1871 memset(&req, 0, sizeof(req)); 1872 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1873 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1874 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1875 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1876 if (ret) { 1877 /* This mac addr do not exist, add new entry for it */ 1878 memset(desc[0].data, 0, sizeof(desc[0].data)); 1879 memset(desc[1].data, 0, sizeof(desc[0].data)); 1880 memset(desc[2].data, 0, sizeof(desc[0].data)); 1881 } 1882 1883 /* 1884 * In current version VF is not supported when PF is driven by DPDK 1885 * driver, just need to configure parameters for PF vport. 1886 */ 1887 vf_id = HNS3_PF_FUNC_ID; 1888 hns3_update_desc_vfid(desc, vf_id, false); 1889 ret = hns3_add_mac_vlan_tbl(hw, &req, desc, 1890 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1891 if (ret) { 1892 if (ret == -ENOSPC) 1893 hns3_err(hw, "mc mac vlan table is full"); 1894 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1895 mac_addr); 1896 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1897 } 1898 1899 return ret; 1900 } 1901 1902 static int 1903 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1904 { 1905 struct hns3_mac_vlan_tbl_entry_cmd req; 1906 struct hns3_cmd_desc desc[3]; 1907 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1908 uint8_t vf_id; 1909 int ret; 1910 1911 /* Check if mac addr is valid */ 1912 if (!rte_is_multicast_ether_addr(mac_addr)) { 1913 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1914 mac_addr); 1915 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1916 mac_str); 1917 return -EINVAL; 1918 } 1919 1920 memset(&req, 0, sizeof(req)); 1921 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1922 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1923 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1924 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1925 if (ret == 0) { 1926 /* 1927 * This mac addr exist, remove this handle's VFID for it. 1928 * In current version VF is not supported when PF is driven by 1929 * DPDK driver, just need to configure parameters for PF vport. 1930 */ 1931 vf_id = HNS3_PF_FUNC_ID; 1932 hns3_update_desc_vfid(desc, vf_id, true); 1933 1934 /* All the vfid is zero, so need to delete this entry */ 1935 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1936 } else if (ret == -ENOENT) { 1937 /* This mac addr doesn't exist. */ 1938 return 0; 1939 } 1940 1941 if (ret) { 1942 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1943 mac_addr); 1944 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1945 } 1946 1947 return ret; 1948 } 1949 1950 static int 1951 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1952 struct rte_ether_addr *mc_addr_set, 1953 uint32_t nb_mc_addr) 1954 { 1955 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1956 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1957 struct rte_ether_addr *addr; 1958 uint16_t mac_addrs_capa; 1959 uint32_t i; 1960 uint32_t j; 1961 1962 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1963 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1964 "invalid. valid range: 0~%d", 1965 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1966 return -EINVAL; 1967 } 1968 1969 /* Check if input mac addresses are valid */ 1970 for (i = 0; i < nb_mc_addr; i++) { 1971 addr = &mc_addr_set[i]; 1972 if (!rte_is_multicast_ether_addr(addr)) { 1973 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1974 addr); 1975 hns3_err(hw, 1976 "failed to set mc mac addr, addr(%s) invalid.", 1977 mac_str); 1978 return -EINVAL; 1979 } 1980 1981 /* Check if there are duplicate addresses */ 1982 for (j = i + 1; j < nb_mc_addr; j++) { 1983 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 1984 hns3_ether_format_addr(mac_str, 1985 RTE_ETHER_ADDR_FMT_SIZE, 1986 addr); 1987 hns3_err(hw, "failed to set mc mac addr, " 1988 "addrs invalid. two same addrs(%s).", 1989 mac_str); 1990 return -EINVAL; 1991 } 1992 } 1993 1994 /* 1995 * Check if there are duplicate addresses between mac_addrs 1996 * and mc_addr_set 1997 */ 1998 mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : 1999 HNS3_UC_MACADDR_NUM; 2000 for (j = 0; j < mac_addrs_capa; j++) { 2001 if (rte_is_same_ether_addr(addr, 2002 &hw->data->mac_addrs[j])) { 2003 hns3_ether_format_addr(mac_str, 2004 RTE_ETHER_ADDR_FMT_SIZE, 2005 addr); 2006 hns3_err(hw, "failed to set mc mac addr, " 2007 "addrs invalid. addrs(%s) has already " 2008 "configured in mac_addr add API", 2009 mac_str); 2010 return -EINVAL; 2011 } 2012 } 2013 } 2014 2015 return 0; 2016 } 2017 2018 int 2019 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2020 struct rte_ether_addr *mc_addr_set, 2021 uint32_t nb_mc_addr) 2022 { 2023 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2024 struct rte_ether_addr *addr; 2025 int cur_addr_num; 2026 int set_addr_num; 2027 int num; 2028 int ret; 2029 int i; 2030 2031 /* Check if input parameters are valid */ 2032 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2033 if (ret) 2034 return ret; 2035 2036 rte_spinlock_lock(&hw->lock); 2037 cur_addr_num = hw->mc_addrs_num; 2038 for (i = 0; i < cur_addr_num; i++) { 2039 num = cur_addr_num - i - 1; 2040 addr = &hw->mc_addrs[num]; 2041 ret = hw->ops.del_mc_mac_addr(hw, addr); 2042 if (ret) { 2043 rte_spinlock_unlock(&hw->lock); 2044 return ret; 2045 } 2046 2047 hw->mc_addrs_num--; 2048 } 2049 2050 set_addr_num = (int)nb_mc_addr; 2051 for (i = 0; i < set_addr_num; i++) { 2052 addr = &mc_addr_set[i]; 2053 ret = hw->ops.add_mc_mac_addr(hw, addr); 2054 if (ret) { 2055 rte_spinlock_unlock(&hw->lock); 2056 return ret; 2057 } 2058 2059 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); 2060 hw->mc_addrs_num++; 2061 } 2062 rte_spinlock_unlock(&hw->lock); 2063 2064 return 0; 2065 } 2066 2067 int 2068 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2069 { 2070 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2071 struct hns3_hw *hw = &hns->hw; 2072 struct rte_ether_addr *addr; 2073 int ret = 0; 2074 int i; 2075 2076 for (i = 0; i < hw->mc_addrs_num; i++) { 2077 addr = &hw->mc_addrs[i]; 2078 if (!rte_is_multicast_ether_addr(addr)) 2079 continue; 2080 if (del) 2081 ret = hw->ops.del_mc_mac_addr(hw, addr); 2082 else 2083 ret = hw->ops.add_mc_mac_addr(hw, addr); 2084 if (ret) { 2085 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2086 addr); 2087 hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d", 2088 del ? "Remove" : "Restore", mac_str, ret); 2089 } 2090 } 2091 return ret; 2092 } 2093 2094 static int 2095 hns3_check_mq_mode(struct rte_eth_dev *dev) 2096 { 2097 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2098 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2099 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2100 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2101 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2102 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2103 uint8_t num_tc; 2104 int max_tc = 0; 2105 int i; 2106 2107 if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) || 2108 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB || 2109 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) { 2110 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 2111 rx_mq_mode, tx_mq_mode); 2112 return -EOPNOTSUPP; 2113 } 2114 2115 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2116 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2117 if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2118 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2119 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2120 dcb_rx_conf->nb_tcs, pf->tc_max); 2121 return -EINVAL; 2122 } 2123 2124 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2125 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2126 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, " 2127 "nb_tcs(%d) != %d or %d in rx direction.", 2128 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2129 return -EINVAL; 2130 } 2131 2132 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2133 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2134 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2135 return -EINVAL; 2136 } 2137 2138 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2139 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2140 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2141 "is not equal to one in tx direction.", 2142 i, dcb_rx_conf->dcb_tc[i]); 2143 return -EINVAL; 2144 } 2145 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2146 max_tc = dcb_rx_conf->dcb_tc[i]; 2147 } 2148 2149 num_tc = max_tc + 1; 2150 if (num_tc > dcb_rx_conf->nb_tcs) { 2151 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2152 num_tc, dcb_rx_conf->nb_tcs); 2153 return -EINVAL; 2154 } 2155 } 2156 2157 return 0; 2158 } 2159 2160 static int 2161 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2162 enum hns3_ring_type queue_type, uint16_t queue_id) 2163 { 2164 struct hns3_cmd_desc desc; 2165 struct hns3_ctrl_vector_chain_cmd *req = 2166 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2167 enum hns3_opcode_type op; 2168 uint16_t tqp_type_and_id = 0; 2169 uint16_t type; 2170 uint16_t gl; 2171 int ret; 2172 2173 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2174 hns3_cmd_setup_basic_desc(&desc, op, false); 2175 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2176 HNS3_TQP_INT_ID_L_S); 2177 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2178 HNS3_TQP_INT_ID_H_S); 2179 2180 if (queue_type == HNS3_RING_TYPE_RX) 2181 gl = HNS3_RING_GL_RX; 2182 else 2183 gl = HNS3_RING_GL_TX; 2184 2185 type = queue_type; 2186 2187 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2188 type); 2189 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2190 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2191 gl); 2192 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2193 req->int_cause_num = 1; 2194 ret = hns3_cmd_send(hw, &desc, 1); 2195 if (ret) { 2196 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2197 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2198 return ret; 2199 } 2200 2201 return 0; 2202 } 2203 2204 static int 2205 hns3_init_ring_with_vector(struct hns3_hw *hw) 2206 { 2207 uint16_t vec; 2208 int ret; 2209 int i; 2210 2211 /* 2212 * In hns3 network engine, vector 0 is always the misc interrupt of this 2213 * function, vector 1~N can be used respectively for the queues of the 2214 * function. Tx and Rx queues with the same number share the interrupt 2215 * vector. In the initialization clearing the all hardware mapping 2216 * relationship configurations between queues and interrupt vectors is 2217 * needed, so some error caused by the residual configurations, such as 2218 * the unexpected Tx interrupt, can be avoid. 2219 */ 2220 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2221 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2222 vec = vec - 1; /* the last interrupt is reserved */ 2223 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2224 for (i = 0; i < hw->intr_tqps_num; i++) { 2225 /* 2226 * Set gap limiter/rate limiter/quanity limiter algorithm 2227 * configuration for interrupt coalesce of queue's interrupt. 2228 */ 2229 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2230 HNS3_TQP_INTR_GL_DEFAULT); 2231 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2232 HNS3_TQP_INTR_GL_DEFAULT); 2233 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2234 /* 2235 * QL(quantity limiter) is not used currently, just set 0 to 2236 * close it. 2237 */ 2238 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2239 2240 ret = hns3_bind_ring_with_vector(hw, vec, false, 2241 HNS3_RING_TYPE_TX, i); 2242 if (ret) { 2243 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2244 "vector: %u, ret=%d", i, vec, ret); 2245 return ret; 2246 } 2247 2248 ret = hns3_bind_ring_with_vector(hw, vec, false, 2249 HNS3_RING_TYPE_RX, i); 2250 if (ret) { 2251 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2252 "vector: %u, ret=%d", i, vec, ret); 2253 return ret; 2254 } 2255 } 2256 2257 return 0; 2258 } 2259 2260 static int 2261 hns3_setup_dcb(struct rte_eth_dev *dev) 2262 { 2263 struct hns3_adapter *hns = dev->data->dev_private; 2264 struct hns3_hw *hw = &hns->hw; 2265 int ret; 2266 2267 if (!hns3_dev_get_support(hw, DCB)) { 2268 hns3_err(hw, "this port does not support dcb configurations."); 2269 return -EOPNOTSUPP; 2270 } 2271 2272 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2273 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2274 return -EOPNOTSUPP; 2275 } 2276 2277 ret = hns3_dcb_configure(hns); 2278 if (ret) 2279 hns3_err(hw, "failed to config dcb: %d", ret); 2280 2281 return ret; 2282 } 2283 2284 static int 2285 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2286 { 2287 int ret; 2288 2289 /* 2290 * Some hardware doesn't support auto-negotiation, but users may not 2291 * configure link_speeds (default 0), which means auto-negotiation. 2292 * In this case, it should return success. 2293 */ 2294 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG && 2295 hw->mac.support_autoneg == 0) 2296 return 0; 2297 2298 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 2299 ret = hns3_check_port_speed(hw, link_speeds); 2300 if (ret) 2301 return ret; 2302 } 2303 2304 return 0; 2305 } 2306 2307 static int 2308 hns3_check_dev_conf(struct rte_eth_dev *dev) 2309 { 2310 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2311 struct rte_eth_conf *conf = &dev->data->dev_conf; 2312 int ret; 2313 2314 ret = hns3_check_mq_mode(dev); 2315 if (ret) 2316 return ret; 2317 2318 return hns3_check_link_speed(hw, conf->link_speeds); 2319 } 2320 2321 static int 2322 hns3_dev_configure(struct rte_eth_dev *dev) 2323 { 2324 struct hns3_adapter *hns = dev->data->dev_private; 2325 struct rte_eth_conf *conf = &dev->data->dev_conf; 2326 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2327 struct hns3_hw *hw = &hns->hw; 2328 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2329 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2330 struct rte_eth_rss_conf rss_conf; 2331 bool gro_en; 2332 int ret; 2333 2334 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2335 2336 /* 2337 * Some versions of hardware network engine does not support 2338 * individually enable/disable/reset the Tx or Rx queue. These devices 2339 * must enable/disable/reset Tx and Rx queues at the same time. When the 2340 * numbers of Tx queues allocated by upper applications are not equal to 2341 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2342 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2343 * work as usual. But these fake queues are imperceptible, and can not 2344 * be used by upper applications. 2345 */ 2346 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2347 if (ret) { 2348 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2349 hw->cfg_max_queues = 0; 2350 return ret; 2351 } 2352 2353 hw->adapter_state = HNS3_NIC_CONFIGURING; 2354 ret = hns3_check_dev_conf(dev); 2355 if (ret) 2356 goto cfg_err; 2357 2358 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2359 ret = hns3_setup_dcb(dev); 2360 if (ret) 2361 goto cfg_err; 2362 } 2363 2364 /* When RSS is not configured, redirect the packet queue 0 */ 2365 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2366 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2367 rss_conf = conf->rx_adv_conf.rss_conf; 2368 hw->rss_dis_flag = false; 2369 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2370 if (ret) 2371 goto cfg_err; 2372 } 2373 2374 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu); 2375 if (ret != 0) 2376 goto cfg_err; 2377 2378 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2379 if (ret) 2380 goto cfg_err; 2381 2382 ret = hns3_dev_configure_vlan(dev); 2383 if (ret) 2384 goto cfg_err; 2385 2386 /* config hardware GRO */ 2387 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 2388 ret = hns3_config_gro(hw, gro_en); 2389 if (ret) 2390 goto cfg_err; 2391 2392 hns3_init_rx_ptype_tble(dev); 2393 hw->adapter_state = HNS3_NIC_CONFIGURED; 2394 2395 return 0; 2396 2397 cfg_err: 2398 hw->cfg_max_queues = 0; 2399 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2400 hw->adapter_state = HNS3_NIC_INITIALIZED; 2401 2402 return ret; 2403 } 2404 2405 static int 2406 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2407 { 2408 struct hns3_config_max_frm_size_cmd *req; 2409 struct hns3_cmd_desc desc; 2410 2411 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2412 2413 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2414 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2415 req->min_frm_size = RTE_ETHER_MIN_LEN; 2416 2417 return hns3_cmd_send(hw, &desc, 1); 2418 } 2419 2420 static int 2421 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2422 { 2423 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2424 uint16_t original_mps = hns->pf.mps; 2425 int err; 2426 int ret; 2427 2428 ret = hns3_set_mac_mtu(hw, mps); 2429 if (ret) { 2430 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2431 return ret; 2432 } 2433 2434 hns->pf.mps = mps; 2435 ret = hns3_buffer_alloc(hw); 2436 if (ret) { 2437 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2438 goto rollback; 2439 } 2440 2441 return 0; 2442 2443 rollback: 2444 err = hns3_set_mac_mtu(hw, original_mps); 2445 if (err) { 2446 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2447 return ret; 2448 } 2449 hns->pf.mps = original_mps; 2450 2451 return ret; 2452 } 2453 2454 static int 2455 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2456 { 2457 struct hns3_adapter *hns = dev->data->dev_private; 2458 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2459 struct hns3_hw *hw = &hns->hw; 2460 int ret; 2461 2462 if (dev->data->dev_started) { 2463 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2464 "before configuration", dev->data->port_id); 2465 return -EBUSY; 2466 } 2467 2468 rte_spinlock_lock(&hw->lock); 2469 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2470 2471 /* 2472 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2473 * assign to "uint16_t" type variable. 2474 */ 2475 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2476 if (ret) { 2477 rte_spinlock_unlock(&hw->lock); 2478 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2479 dev->data->port_id, mtu, ret); 2480 return ret; 2481 } 2482 2483 rte_spinlock_unlock(&hw->lock); 2484 2485 return 0; 2486 } 2487 2488 static uint32_t 2489 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2490 { 2491 uint32_t speed_capa = 0; 2492 2493 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2494 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD; 2495 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2496 speed_capa |= RTE_ETH_LINK_SPEED_10M; 2497 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2498 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 2499 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2500 speed_capa |= RTE_ETH_LINK_SPEED_100M; 2501 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2502 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2503 2504 return speed_capa; 2505 } 2506 2507 static uint32_t 2508 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2509 { 2510 uint32_t speed_capa = 0; 2511 2512 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2513 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2514 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2515 speed_capa |= RTE_ETH_LINK_SPEED_10G; 2516 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2517 speed_capa |= RTE_ETH_LINK_SPEED_25G; 2518 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2519 speed_capa |= RTE_ETH_LINK_SPEED_40G; 2520 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2521 speed_capa |= RTE_ETH_LINK_SPEED_50G; 2522 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2523 speed_capa |= RTE_ETH_LINK_SPEED_100G; 2524 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2525 speed_capa |= RTE_ETH_LINK_SPEED_200G; 2526 2527 return speed_capa; 2528 } 2529 2530 static uint32_t 2531 hns3_get_speed_capa(struct hns3_hw *hw) 2532 { 2533 struct hns3_mac *mac = &hw->mac; 2534 uint32_t speed_capa; 2535 2536 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2537 speed_capa = 2538 hns3_get_copper_port_speed_capa(mac->supported_speed); 2539 else 2540 speed_capa = 2541 hns3_get_firber_port_speed_capa(mac->supported_speed); 2542 2543 if (mac->support_autoneg == 0) 2544 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 2545 2546 return speed_capa; 2547 } 2548 2549 int 2550 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2551 { 2552 struct hns3_adapter *hns = eth_dev->data->dev_private; 2553 struct hns3_hw *hw = &hns->hw; 2554 uint16_t queue_num = hw->tqps_num; 2555 2556 /* 2557 * In interrupt mode, 'max_rx_queues' is set based on the number of 2558 * MSI-X interrupt resources of the hardware. 2559 */ 2560 if (hw->data->dev_conf.intr_conf.rxq == 1) 2561 queue_num = hw->intr_tqps_num; 2562 2563 info->max_rx_queues = queue_num; 2564 info->max_tx_queues = hw->tqps_num; 2565 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2566 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2567 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2568 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2569 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2570 info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 2571 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 2572 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 2573 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | 2574 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2575 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 2576 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 2577 RTE_ETH_RX_OFFLOAD_SCATTER | 2578 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 2579 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 2580 RTE_ETH_RX_OFFLOAD_RSS_HASH | 2581 RTE_ETH_RX_OFFLOAD_TCP_LRO); 2582 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2583 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 2584 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 2585 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 2586 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 2587 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 2588 RTE_ETH_TX_OFFLOAD_TCP_TSO | 2589 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 2590 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | 2591 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | 2592 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 2593 hns3_txvlan_cap_get(hw)); 2594 2595 if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) 2596 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; 2597 2598 if (hns3_dev_get_support(hw, INDEP_TXRX)) 2599 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2600 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2601 info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 2602 2603 if (hns3_dev_get_support(hw, PTP)) 2604 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 2605 2606 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2607 .nb_max = HNS3_MAX_RING_DESC, 2608 .nb_min = HNS3_MIN_RING_DESC, 2609 .nb_align = HNS3_ALIGN_RING_DESC, 2610 }; 2611 2612 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2613 .nb_max = HNS3_MAX_RING_DESC, 2614 .nb_min = HNS3_MIN_RING_DESC, 2615 .nb_align = HNS3_ALIGN_RING_DESC, 2616 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2617 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2618 }; 2619 2620 info->speed_capa = hns3_get_speed_capa(hw); 2621 info->default_rxconf = (struct rte_eth_rxconf) { 2622 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2623 /* 2624 * If there are no available Rx buffer descriptors, incoming 2625 * packets are always dropped by hardware based on hns3 network 2626 * engine. 2627 */ 2628 .rx_drop_en = 1, 2629 .offloads = 0, 2630 }; 2631 info->default_txconf = (struct rte_eth_txconf) { 2632 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2633 .offloads = 0, 2634 }; 2635 2636 info->reta_size = hw->rss_ind_tbl_size; 2637 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2638 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2639 2640 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2641 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2642 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2643 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2644 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2645 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2646 2647 return 0; 2648 } 2649 2650 static int 2651 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2652 size_t fw_size) 2653 { 2654 struct hns3_adapter *hns = eth_dev->data->dev_private; 2655 struct hns3_hw *hw = &hns->hw; 2656 uint32_t version = hw->fw_version; 2657 int ret; 2658 2659 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2660 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2661 HNS3_FW_VERSION_BYTE3_S), 2662 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2663 HNS3_FW_VERSION_BYTE2_S), 2664 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2665 HNS3_FW_VERSION_BYTE1_S), 2666 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2667 HNS3_FW_VERSION_BYTE0_S)); 2668 if (ret < 0) 2669 return -EINVAL; 2670 2671 ret += 1; /* add the size of '\0' */ 2672 if (fw_size < (size_t)ret) 2673 return ret; 2674 else 2675 return 0; 2676 } 2677 2678 static int 2679 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2680 { 2681 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2682 int ret; 2683 2684 (void)hns3_update_link_status(hw); 2685 2686 ret = hns3_update_link_info(eth_dev); 2687 if (ret) 2688 hw->mac.link_status = RTE_ETH_LINK_DOWN; 2689 2690 return ret; 2691 } 2692 2693 static void 2694 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2695 struct rte_eth_link *new_link) 2696 { 2697 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2698 struct hns3_mac *mac = &hw->mac; 2699 2700 switch (mac->link_speed) { 2701 case RTE_ETH_SPEED_NUM_10M: 2702 case RTE_ETH_SPEED_NUM_100M: 2703 case RTE_ETH_SPEED_NUM_1G: 2704 case RTE_ETH_SPEED_NUM_10G: 2705 case RTE_ETH_SPEED_NUM_25G: 2706 case RTE_ETH_SPEED_NUM_40G: 2707 case RTE_ETH_SPEED_NUM_50G: 2708 case RTE_ETH_SPEED_NUM_100G: 2709 case RTE_ETH_SPEED_NUM_200G: 2710 if (mac->link_status) 2711 new_link->link_speed = mac->link_speed; 2712 break; 2713 default: 2714 if (mac->link_status) 2715 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 2716 break; 2717 } 2718 2719 if (!mac->link_status) 2720 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE; 2721 2722 new_link->link_duplex = mac->link_duplex; 2723 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 2724 new_link->link_autoneg = mac->link_autoneg; 2725 } 2726 2727 static int 2728 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2729 { 2730 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2731 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2732 2733 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2734 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2735 struct hns3_mac *mac = &hw->mac; 2736 struct rte_eth_link new_link; 2737 int ret; 2738 2739 /* When port is stopped, report link down. */ 2740 if (eth_dev->data->dev_started == 0) { 2741 new_link.link_autoneg = mac->link_autoneg; 2742 new_link.link_duplex = mac->link_duplex; 2743 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2744 new_link.link_status = RTE_ETH_LINK_DOWN; 2745 goto out; 2746 } 2747 2748 do { 2749 ret = hns3_update_port_link_info(eth_dev); 2750 if (ret) { 2751 hns3_err(hw, "failed to get port link info, ret = %d.", 2752 ret); 2753 break; 2754 } 2755 2756 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP) 2757 break; 2758 2759 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2760 } while (retry_cnt--); 2761 2762 memset(&new_link, 0, sizeof(new_link)); 2763 hns3_setup_linkstatus(eth_dev, &new_link); 2764 2765 out: 2766 return rte_eth_linkstatus_set(eth_dev, &new_link); 2767 } 2768 2769 static int 2770 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2771 { 2772 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2773 int ret; 2774 2775 /* 2776 * The "tx_pkt_burst" will be restored. But the secondary process does 2777 * not support the mechanism for notifying the primary process. 2778 */ 2779 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2780 hns3_err(hw, "secondary process does not support to set link up."); 2781 return -ENOTSUP; 2782 } 2783 2784 /* 2785 * If device isn't started Rx/Tx function is still disabled, setting 2786 * link up is not allowed. But it is probably better to return success 2787 * to reduce the impact on the upper layer. 2788 */ 2789 if (hw->adapter_state != HNS3_NIC_STARTED) { 2790 hns3_info(hw, "device isn't started, can't set link up."); 2791 return 0; 2792 } 2793 2794 if (!hw->set_link_down) 2795 return 0; 2796 2797 rte_spinlock_lock(&hw->lock); 2798 ret = hns3_cfg_mac_mode(hw, true); 2799 if (ret) { 2800 rte_spinlock_unlock(&hw->lock); 2801 hns3_err(hw, "failed to set link up, ret = %d", ret); 2802 return ret; 2803 } 2804 2805 hw->set_link_down = false; 2806 hns3_start_tx_datapath(dev); 2807 rte_spinlock_unlock(&hw->lock); 2808 2809 return 0; 2810 } 2811 2812 static int 2813 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2814 { 2815 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2816 int ret; 2817 2818 /* 2819 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2820 * process does not support the mechanism for notifying the primary 2821 * process. 2822 */ 2823 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2824 hns3_err(hw, "secondary process does not support to set link down."); 2825 return -ENOTSUP; 2826 } 2827 2828 /* 2829 * If device isn't started or the API has been called, link status is 2830 * down, return success. 2831 */ 2832 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2833 return 0; 2834 2835 rte_spinlock_lock(&hw->lock); 2836 hns3_stop_tx_datapath(dev); 2837 ret = hns3_cfg_mac_mode(hw, false); 2838 if (ret) { 2839 hns3_start_tx_datapath(dev); 2840 rte_spinlock_unlock(&hw->lock); 2841 hns3_err(hw, "failed to set link down, ret = %d", ret); 2842 return ret; 2843 } 2844 2845 hw->set_link_down = true; 2846 rte_spinlock_unlock(&hw->lock); 2847 2848 return 0; 2849 } 2850 2851 static int 2852 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2853 { 2854 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2855 struct hns3_pf *pf = &hns->pf; 2856 2857 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2858 return -EINVAL; 2859 2860 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2861 2862 return 0; 2863 } 2864 2865 static int 2866 hns3_query_function_status(struct hns3_hw *hw) 2867 { 2868 #define HNS3_QUERY_MAX_CNT 10 2869 #define HNS3_QUERY_SLEEP_MSCOEND 1 2870 struct hns3_func_status_cmd *req; 2871 struct hns3_cmd_desc desc; 2872 int timeout = 0; 2873 int ret; 2874 2875 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2876 req = (struct hns3_func_status_cmd *)desc.data; 2877 2878 do { 2879 ret = hns3_cmd_send(hw, &desc, 1); 2880 if (ret) { 2881 PMD_INIT_LOG(ERR, "query function status failed %d", 2882 ret); 2883 return ret; 2884 } 2885 2886 /* Check pf reset is done */ 2887 if (req->pf_state) 2888 break; 2889 2890 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2891 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2892 2893 return hns3_parse_func_status(hw, req); 2894 } 2895 2896 static int 2897 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2898 { 2899 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2900 struct hns3_pf *pf = &hns->pf; 2901 2902 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2903 /* 2904 * The total_tqps_num obtained from firmware is maximum tqp 2905 * numbers of this port, which should be used for PF and VFs. 2906 * There is no need for pf to have so many tqp numbers in 2907 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2908 * coming from config file, is assigned to maximum queue number 2909 * for the PF of this port by user. So users can modify the 2910 * maximum queue number of PF according to their own application 2911 * scenarios, which is more flexible to use. In addition, many 2912 * memories can be saved due to allocating queue statistics 2913 * room according to the actual number of queues required. The 2914 * maximum queue number of PF for network engine with 2915 * revision_id greater than 0x30 is assigned by config file. 2916 */ 2917 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2918 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2919 "must be greater than 0.", 2920 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2921 return -EINVAL; 2922 } 2923 2924 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2925 hw->total_tqps_num); 2926 } else { 2927 /* 2928 * Due to the limitation on the number of PF interrupts 2929 * available, the maximum queue number assigned to PF on 2930 * the network engine with revision_id 0x21 is 64. 2931 */ 2932 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2933 HNS3_MAX_TQP_NUM_HIP08_PF); 2934 } 2935 2936 return 0; 2937 } 2938 2939 static int 2940 hns3_query_pf_resource(struct hns3_hw *hw) 2941 { 2942 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2943 struct hns3_pf *pf = &hns->pf; 2944 struct hns3_pf_res_cmd *req; 2945 struct hns3_cmd_desc desc; 2946 int ret; 2947 2948 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2949 ret = hns3_cmd_send(hw, &desc, 1); 2950 if (ret) { 2951 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2952 return ret; 2953 } 2954 2955 req = (struct hns3_pf_res_cmd *)desc.data; 2956 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2957 rte_le_to_cpu_16(req->ext_tqp_num); 2958 ret = hns3_get_pf_max_tqp_num(hw); 2959 if (ret) 2960 return ret; 2961 2962 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2963 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2964 2965 if (req->tx_buf_size) 2966 pf->tx_buf_size = 2967 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2968 else 2969 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2970 2971 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2972 2973 if (req->dv_buf_size) 2974 pf->dv_buf_size = 2975 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2976 else 2977 pf->dv_buf_size = HNS3_DEFAULT_DV; 2978 2979 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2980 2981 hw->num_msi = 2982 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2983 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2984 2985 return 0; 2986 } 2987 2988 static void 2989 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2990 { 2991 struct hns3_cfg_param_cmd *req; 2992 uint64_t mac_addr_tmp_high; 2993 uint8_t ext_rss_size_max; 2994 uint64_t mac_addr_tmp; 2995 uint32_t i; 2996 2997 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2998 2999 /* get the configuration */ 3000 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3001 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3002 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3003 HNS3_CFG_TQP_DESC_N_M, 3004 HNS3_CFG_TQP_DESC_N_S); 3005 3006 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3007 HNS3_CFG_PHY_ADDR_M, 3008 HNS3_CFG_PHY_ADDR_S); 3009 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3010 HNS3_CFG_MEDIA_TP_M, 3011 HNS3_CFG_MEDIA_TP_S); 3012 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3013 HNS3_CFG_RX_BUF_LEN_M, 3014 HNS3_CFG_RX_BUF_LEN_S); 3015 /* get mac address */ 3016 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3017 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3018 HNS3_CFG_MAC_ADDR_H_M, 3019 HNS3_CFG_MAC_ADDR_H_S); 3020 3021 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3022 3023 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3024 HNS3_CFG_DEFAULT_SPEED_M, 3025 HNS3_CFG_DEFAULT_SPEED_S); 3026 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3027 HNS3_CFG_RSS_SIZE_M, 3028 HNS3_CFG_RSS_SIZE_S); 3029 3030 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3031 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3032 3033 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3034 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3035 3036 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3037 HNS3_CFG_SPEED_ABILITY_M, 3038 HNS3_CFG_SPEED_ABILITY_S); 3039 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3040 HNS3_CFG_UMV_TBL_SPACE_M, 3041 HNS3_CFG_UMV_TBL_SPACE_S); 3042 if (!cfg->umv_space) 3043 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3044 3045 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3046 HNS3_CFG_EXT_RSS_SIZE_M, 3047 HNS3_CFG_EXT_RSS_SIZE_S); 3048 /* 3049 * Field ext_rss_size_max obtained from firmware will be more flexible 3050 * for future changes and expansions, which is an exponent of 2, instead 3051 * of reading out directly. If this field is not zero, hns3 PF PMD 3052 * driver uses it as rss_size_max under one TC. Device, whose revision 3053 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3054 * maximum number of queues supported under a TC through this field. 3055 */ 3056 if (ext_rss_size_max) 3057 cfg->rss_size_max = 1U << ext_rss_size_max; 3058 } 3059 3060 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3061 * @hw: pointer to struct hns3_hw 3062 * @hcfg: the config structure to be getted 3063 */ 3064 static int 3065 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3066 { 3067 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3068 struct hns3_cfg_param_cmd *req; 3069 uint32_t offset; 3070 uint32_t i; 3071 int ret; 3072 3073 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3074 offset = 0; 3075 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3076 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3077 true); 3078 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3079 i * HNS3_CFG_RD_LEN_BYTES); 3080 /* Len should be divided by 4 when send to hardware */ 3081 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3082 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3083 req->offset = rte_cpu_to_le_32(offset); 3084 } 3085 3086 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3087 if (ret) { 3088 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3089 return ret; 3090 } 3091 3092 hns3_parse_cfg(hcfg, desc); 3093 3094 return 0; 3095 } 3096 3097 static int 3098 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3099 { 3100 switch (speed_cmd) { 3101 case HNS3_CFG_SPEED_10M: 3102 *speed = RTE_ETH_SPEED_NUM_10M; 3103 break; 3104 case HNS3_CFG_SPEED_100M: 3105 *speed = RTE_ETH_SPEED_NUM_100M; 3106 break; 3107 case HNS3_CFG_SPEED_1G: 3108 *speed = RTE_ETH_SPEED_NUM_1G; 3109 break; 3110 case HNS3_CFG_SPEED_10G: 3111 *speed = RTE_ETH_SPEED_NUM_10G; 3112 break; 3113 case HNS3_CFG_SPEED_25G: 3114 *speed = RTE_ETH_SPEED_NUM_25G; 3115 break; 3116 case HNS3_CFG_SPEED_40G: 3117 *speed = RTE_ETH_SPEED_NUM_40G; 3118 break; 3119 case HNS3_CFG_SPEED_50G: 3120 *speed = RTE_ETH_SPEED_NUM_50G; 3121 break; 3122 case HNS3_CFG_SPEED_100G: 3123 *speed = RTE_ETH_SPEED_NUM_100G; 3124 break; 3125 case HNS3_CFG_SPEED_200G: 3126 *speed = RTE_ETH_SPEED_NUM_200G; 3127 break; 3128 default: 3129 return -EINVAL; 3130 } 3131 3132 return 0; 3133 } 3134 3135 static void 3136 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3137 { 3138 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3139 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3140 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3141 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3142 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3143 } 3144 3145 static void 3146 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3147 { 3148 struct hns3_dev_specs_0_cmd *req0; 3149 3150 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3151 3152 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3153 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3154 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3155 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3156 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3157 } 3158 3159 static int 3160 hns3_check_dev_specifications(struct hns3_hw *hw) 3161 { 3162 if (hw->rss_ind_tbl_size == 0 || 3163 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3164 hns3_err(hw, "the size of hash lookup table configured (%u)" 3165 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3166 HNS3_RSS_IND_TBL_SIZE_MAX); 3167 return -EINVAL; 3168 } 3169 3170 return 0; 3171 } 3172 3173 static int 3174 hns3_query_dev_specifications(struct hns3_hw *hw) 3175 { 3176 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3177 int ret; 3178 int i; 3179 3180 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3181 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3182 true); 3183 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3184 } 3185 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3186 3187 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3188 if (ret) 3189 return ret; 3190 3191 hns3_parse_dev_specifications(hw, desc); 3192 3193 return hns3_check_dev_specifications(hw); 3194 } 3195 3196 static int 3197 hns3_get_capability(struct hns3_hw *hw) 3198 { 3199 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3200 struct rte_pci_device *pci_dev; 3201 struct hns3_pf *pf = &hns->pf; 3202 struct rte_eth_dev *eth_dev; 3203 uint16_t device_id; 3204 uint8_t revision; 3205 int ret; 3206 3207 eth_dev = &rte_eth_devices[hw->data->port_id]; 3208 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3209 device_id = pci_dev->id.device_id; 3210 3211 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3212 device_id == HNS3_DEV_ID_50GE_RDMA || 3213 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3214 device_id == HNS3_DEV_ID_200G_RDMA) 3215 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3216 3217 /* Get PCI revision id */ 3218 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3219 HNS3_PCI_REVISION_ID); 3220 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3221 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3222 ret); 3223 return -EIO; 3224 } 3225 hw->revision = revision; 3226 3227 if (revision < PCI_REVISION_ID_HIP09_A) { 3228 hns3_set_default_dev_specifications(hw); 3229 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3230 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3231 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3232 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3233 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3234 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3235 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3236 hw->rss_info.ipv6_sctp_offload_supported = false; 3237 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3238 pf->support_multi_tc_pause = false; 3239 return 0; 3240 } 3241 3242 ret = hns3_query_dev_specifications(hw); 3243 if (ret) { 3244 PMD_INIT_LOG(ERR, 3245 "failed to query dev specifications, ret = %d", 3246 ret); 3247 return ret; 3248 } 3249 3250 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3251 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3252 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3253 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3254 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3255 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3256 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3257 hw->rss_info.ipv6_sctp_offload_supported = true; 3258 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3259 pf->support_multi_tc_pause = true; 3260 3261 return 0; 3262 } 3263 3264 static int 3265 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3266 { 3267 int ret; 3268 3269 switch (media_type) { 3270 case HNS3_MEDIA_TYPE_COPPER: 3271 if (!hns3_dev_get_support(hw, COPPER)) { 3272 PMD_INIT_LOG(ERR, 3273 "Media type is copper, not supported."); 3274 ret = -EOPNOTSUPP; 3275 } else { 3276 ret = 0; 3277 } 3278 break; 3279 case HNS3_MEDIA_TYPE_FIBER: 3280 ret = 0; 3281 break; 3282 case HNS3_MEDIA_TYPE_BACKPLANE: 3283 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3284 ret = -EOPNOTSUPP; 3285 break; 3286 default: 3287 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3288 ret = -EINVAL; 3289 break; 3290 } 3291 3292 return ret; 3293 } 3294 3295 static int 3296 hns3_get_board_configuration(struct hns3_hw *hw) 3297 { 3298 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3299 struct hns3_pf *pf = &hns->pf; 3300 struct hns3_cfg cfg; 3301 int ret; 3302 3303 ret = hns3_get_board_cfg(hw, &cfg); 3304 if (ret) { 3305 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3306 return ret; 3307 } 3308 3309 ret = hns3_check_media_type(hw, cfg.media_type); 3310 if (ret) 3311 return ret; 3312 3313 hw->mac.media_type = cfg.media_type; 3314 hw->rss_size_max = cfg.rss_size_max; 3315 hw->rss_dis_flag = false; 3316 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3317 hw->mac.phy_addr = cfg.phy_addr; 3318 hw->num_tx_desc = cfg.tqp_desc_num; 3319 hw->num_rx_desc = cfg.tqp_desc_num; 3320 hw->dcb_info.num_pg = 1; 3321 hw->dcb_info.hw_pfc_map = 0; 3322 3323 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3324 if (ret) { 3325 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3326 cfg.default_speed, ret); 3327 return ret; 3328 } 3329 3330 pf->tc_max = cfg.tc_num; 3331 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3332 PMD_INIT_LOG(WARNING, 3333 "Get TC num(%u) from flash, set TC num to 1", 3334 pf->tc_max); 3335 pf->tc_max = 1; 3336 } 3337 3338 /* Dev does not support DCB */ 3339 if (!hns3_dev_get_support(hw, DCB)) { 3340 pf->tc_max = 1; 3341 pf->pfc_max = 0; 3342 } else 3343 pf->pfc_max = pf->tc_max; 3344 3345 hw->dcb_info.num_tc = 1; 3346 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3347 hw->tqps_num / hw->dcb_info.num_tc); 3348 hns3_set_bit(hw->hw_tc_map, 0, 1); 3349 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3350 3351 pf->wanted_umv_size = cfg.umv_space; 3352 3353 return ret; 3354 } 3355 3356 static int 3357 hns3_get_configuration(struct hns3_hw *hw) 3358 { 3359 int ret; 3360 3361 ret = hns3_query_function_status(hw); 3362 if (ret) { 3363 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3364 return ret; 3365 } 3366 3367 /* Get device capability */ 3368 ret = hns3_get_capability(hw); 3369 if (ret) { 3370 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3371 return ret; 3372 } 3373 3374 /* Get pf resource */ 3375 ret = hns3_query_pf_resource(hw); 3376 if (ret) { 3377 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3378 return ret; 3379 } 3380 3381 ret = hns3_get_board_configuration(hw); 3382 if (ret) { 3383 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3384 return ret; 3385 } 3386 3387 ret = hns3_query_dev_fec_info(hw); 3388 if (ret) 3389 PMD_INIT_LOG(ERR, 3390 "failed to query FEC information, ret = %d", ret); 3391 3392 return ret; 3393 } 3394 3395 static int 3396 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3397 uint16_t tqp_vid, bool is_pf) 3398 { 3399 struct hns3_tqp_map_cmd *req; 3400 struct hns3_cmd_desc desc; 3401 int ret; 3402 3403 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3404 3405 req = (struct hns3_tqp_map_cmd *)desc.data; 3406 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3407 req->tqp_vf = func_id; 3408 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3409 if (!is_pf) 3410 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3411 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3412 3413 ret = hns3_cmd_send(hw, &desc, 1); 3414 if (ret) 3415 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3416 3417 return ret; 3418 } 3419 3420 static int 3421 hns3_map_tqp(struct hns3_hw *hw) 3422 { 3423 int ret; 3424 int i; 3425 3426 /* 3427 * In current version, VF is not supported when PF is driven by DPDK 3428 * driver, so we assign total tqps_num tqps allocated to this port 3429 * to PF. 3430 */ 3431 for (i = 0; i < hw->total_tqps_num; i++) { 3432 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3433 if (ret) 3434 return ret; 3435 } 3436 3437 return 0; 3438 } 3439 3440 static int 3441 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3442 { 3443 struct hns3_config_mac_speed_dup_cmd *req; 3444 struct hns3_cmd_desc desc; 3445 int ret; 3446 3447 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3448 3449 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3450 3451 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3452 3453 switch (speed) { 3454 case RTE_ETH_SPEED_NUM_10M: 3455 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3456 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3457 break; 3458 case RTE_ETH_SPEED_NUM_100M: 3459 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3460 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3461 break; 3462 case RTE_ETH_SPEED_NUM_1G: 3463 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3464 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3465 break; 3466 case RTE_ETH_SPEED_NUM_10G: 3467 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3468 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3469 break; 3470 case RTE_ETH_SPEED_NUM_25G: 3471 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3472 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3473 break; 3474 case RTE_ETH_SPEED_NUM_40G: 3475 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3476 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3477 break; 3478 case RTE_ETH_SPEED_NUM_50G: 3479 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3480 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3481 break; 3482 case RTE_ETH_SPEED_NUM_100G: 3483 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3484 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3485 break; 3486 case RTE_ETH_SPEED_NUM_200G: 3487 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3488 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3489 break; 3490 default: 3491 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3492 return -EINVAL; 3493 } 3494 3495 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3496 3497 ret = hns3_cmd_send(hw, &desc, 1); 3498 if (ret) 3499 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3500 3501 return ret; 3502 } 3503 3504 static int 3505 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3506 { 3507 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3508 struct hns3_pf *pf = &hns->pf; 3509 struct hns3_priv_buf *priv; 3510 uint32_t i, total_size; 3511 3512 total_size = pf->pkt_buf_size; 3513 3514 /* alloc tx buffer for all enabled tc */ 3515 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3516 priv = &buf_alloc->priv_buf[i]; 3517 3518 if (hw->hw_tc_map & BIT(i)) { 3519 if (total_size < pf->tx_buf_size) 3520 return -ENOMEM; 3521 3522 priv->tx_buf_size = pf->tx_buf_size; 3523 } else 3524 priv->tx_buf_size = 0; 3525 3526 total_size -= priv->tx_buf_size; 3527 } 3528 3529 return 0; 3530 } 3531 3532 static int 3533 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3534 { 3535 /* TX buffer size is unit by 128 byte */ 3536 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3537 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3538 struct hns3_tx_buff_alloc_cmd *req; 3539 struct hns3_cmd_desc desc; 3540 uint32_t buf_size; 3541 uint32_t i; 3542 int ret; 3543 3544 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3545 3546 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3547 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3548 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3549 3550 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3551 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3552 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3553 } 3554 3555 ret = hns3_cmd_send(hw, &desc, 1); 3556 if (ret) 3557 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3558 3559 return ret; 3560 } 3561 3562 static int 3563 hns3_get_tc_num(struct hns3_hw *hw) 3564 { 3565 int cnt = 0; 3566 uint8_t i; 3567 3568 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3569 if (hw->hw_tc_map & BIT(i)) 3570 cnt++; 3571 return cnt; 3572 } 3573 3574 static uint32_t 3575 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3576 { 3577 struct hns3_priv_buf *priv; 3578 uint32_t rx_priv = 0; 3579 int i; 3580 3581 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3582 priv = &buf_alloc->priv_buf[i]; 3583 if (priv->enable) 3584 rx_priv += priv->buf_size; 3585 } 3586 return rx_priv; 3587 } 3588 3589 static uint32_t 3590 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3591 { 3592 uint32_t total_tx_size = 0; 3593 uint32_t i; 3594 3595 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3596 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3597 3598 return total_tx_size; 3599 } 3600 3601 /* Get the number of pfc enabled TCs, which have private buffer */ 3602 static int 3603 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3604 { 3605 struct hns3_priv_buf *priv; 3606 int cnt = 0; 3607 uint8_t i; 3608 3609 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3610 priv = &buf_alloc->priv_buf[i]; 3611 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3612 cnt++; 3613 } 3614 3615 return cnt; 3616 } 3617 3618 /* Get the number of pfc disabled TCs, which have private buffer */ 3619 static int 3620 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3621 struct hns3_pkt_buf_alloc *buf_alloc) 3622 { 3623 struct hns3_priv_buf *priv; 3624 int cnt = 0; 3625 uint8_t i; 3626 3627 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3628 priv = &buf_alloc->priv_buf[i]; 3629 if (hw->hw_tc_map & BIT(i) && 3630 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3631 cnt++; 3632 } 3633 3634 return cnt; 3635 } 3636 3637 static bool 3638 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3639 uint32_t rx_all) 3640 { 3641 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3642 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3643 struct hns3_pf *pf = &hns->pf; 3644 uint32_t shared_buf, aligned_mps; 3645 uint32_t rx_priv; 3646 uint8_t tc_num; 3647 uint8_t i; 3648 3649 tc_num = hns3_get_tc_num(hw); 3650 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3651 3652 if (hns3_dev_get_support(hw, DCB)) 3653 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3654 pf->dv_buf_size; 3655 else 3656 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3657 + pf->dv_buf_size; 3658 3659 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3660 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3661 HNS3_BUF_SIZE_UNIT); 3662 3663 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3664 if (rx_all < rx_priv + shared_std) 3665 return false; 3666 3667 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3668 buf_alloc->s_buf.buf_size = shared_buf; 3669 if (hns3_dev_get_support(hw, DCB)) { 3670 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3671 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3672 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3673 HNS3_BUF_SIZE_UNIT); 3674 } else { 3675 buf_alloc->s_buf.self.high = 3676 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3677 buf_alloc->s_buf.self.low = aligned_mps; 3678 } 3679 3680 if (hns3_dev_get_support(hw, DCB)) { 3681 hi_thrd = shared_buf - pf->dv_buf_size; 3682 3683 if (tc_num <= NEED_RESERVE_TC_NUM) 3684 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3685 BUF_MAX_PERCENT; 3686 3687 if (tc_num) 3688 hi_thrd = hi_thrd / tc_num; 3689 3690 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3691 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3692 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3693 } else { 3694 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3695 lo_thrd = aligned_mps; 3696 } 3697 3698 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3699 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3700 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3701 } 3702 3703 return true; 3704 } 3705 3706 static bool 3707 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3708 struct hns3_pkt_buf_alloc *buf_alloc) 3709 { 3710 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3711 struct hns3_pf *pf = &hns->pf; 3712 struct hns3_priv_buf *priv; 3713 uint32_t aligned_mps; 3714 uint32_t rx_all; 3715 uint8_t i; 3716 3717 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3718 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3719 3720 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3721 priv = &buf_alloc->priv_buf[i]; 3722 3723 priv->enable = 0; 3724 priv->wl.low = 0; 3725 priv->wl.high = 0; 3726 priv->buf_size = 0; 3727 3728 if (!(hw->hw_tc_map & BIT(i))) 3729 continue; 3730 3731 priv->enable = 1; 3732 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3733 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3734 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3735 HNS3_BUF_SIZE_UNIT); 3736 } else { 3737 priv->wl.low = 0; 3738 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3739 aligned_mps; 3740 } 3741 3742 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3743 } 3744 3745 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3746 } 3747 3748 static bool 3749 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3750 struct hns3_pkt_buf_alloc *buf_alloc) 3751 { 3752 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3753 struct hns3_pf *pf = &hns->pf; 3754 struct hns3_priv_buf *priv; 3755 int no_pfc_priv_num; 3756 uint32_t rx_all; 3757 uint8_t mask; 3758 int i; 3759 3760 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3761 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3762 3763 /* let the last to be cleared first */ 3764 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3765 priv = &buf_alloc->priv_buf[i]; 3766 mask = BIT((uint8_t)i); 3767 if (hw->hw_tc_map & mask && 3768 !(hw->dcb_info.hw_pfc_map & mask)) { 3769 /* Clear the no pfc TC private buffer */ 3770 priv->wl.low = 0; 3771 priv->wl.high = 0; 3772 priv->buf_size = 0; 3773 priv->enable = 0; 3774 no_pfc_priv_num--; 3775 } 3776 3777 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3778 no_pfc_priv_num == 0) 3779 break; 3780 } 3781 3782 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3783 } 3784 3785 static bool 3786 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3787 struct hns3_pkt_buf_alloc *buf_alloc) 3788 { 3789 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3790 struct hns3_pf *pf = &hns->pf; 3791 struct hns3_priv_buf *priv; 3792 uint32_t rx_all; 3793 int pfc_priv_num; 3794 uint8_t mask; 3795 int i; 3796 3797 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3798 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3799 3800 /* let the last to be cleared first */ 3801 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3802 priv = &buf_alloc->priv_buf[i]; 3803 mask = BIT((uint8_t)i); 3804 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3805 /* Reduce the number of pfc TC with private buffer */ 3806 priv->wl.low = 0; 3807 priv->enable = 0; 3808 priv->wl.high = 0; 3809 priv->buf_size = 0; 3810 pfc_priv_num--; 3811 } 3812 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3813 pfc_priv_num == 0) 3814 break; 3815 } 3816 3817 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3818 } 3819 3820 static bool 3821 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3822 struct hns3_pkt_buf_alloc *buf_alloc) 3823 { 3824 #define COMPENSATE_BUFFER 0x3C00 3825 #define COMPENSATE_HALF_MPS_NUM 5 3826 #define PRIV_WL_GAP 0x1800 3827 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3828 struct hns3_pf *pf = &hns->pf; 3829 uint32_t tc_num = hns3_get_tc_num(hw); 3830 uint32_t half_mps = pf->mps >> 1; 3831 struct hns3_priv_buf *priv; 3832 uint32_t min_rx_priv; 3833 uint32_t rx_priv; 3834 uint8_t i; 3835 3836 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3837 if (tc_num) 3838 rx_priv = rx_priv / tc_num; 3839 3840 if (tc_num <= NEED_RESERVE_TC_NUM) 3841 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3842 3843 /* 3844 * Minimum value of private buffer in rx direction (min_rx_priv) is 3845 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3846 * buffer if rx_priv is greater than min_rx_priv. 3847 */ 3848 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3849 COMPENSATE_HALF_MPS_NUM * half_mps; 3850 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3851 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3852 if (rx_priv < min_rx_priv) 3853 return false; 3854 3855 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3856 priv = &buf_alloc->priv_buf[i]; 3857 priv->enable = 0; 3858 priv->wl.low = 0; 3859 priv->wl.high = 0; 3860 priv->buf_size = 0; 3861 3862 if (!(hw->hw_tc_map & BIT(i))) 3863 continue; 3864 3865 priv->enable = 1; 3866 priv->buf_size = rx_priv; 3867 priv->wl.high = rx_priv - pf->dv_buf_size; 3868 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3869 } 3870 3871 buf_alloc->s_buf.buf_size = 0; 3872 3873 return true; 3874 } 3875 3876 /* 3877 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3878 * @hw: pointer to struct hns3_hw 3879 * @buf_alloc: pointer to buffer calculation data 3880 * @return: 0: calculate sucessful, negative: fail 3881 */ 3882 static int 3883 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3884 { 3885 /* When DCB is not supported, rx private buffer is not allocated. */ 3886 if (!hns3_dev_get_support(hw, DCB)) { 3887 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3888 struct hns3_pf *pf = &hns->pf; 3889 uint32_t rx_all = pf->pkt_buf_size; 3890 3891 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3892 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3893 return -ENOMEM; 3894 3895 return 0; 3896 } 3897 3898 /* 3899 * Try to allocate privated packet buffer for all TCs without share 3900 * buffer. 3901 */ 3902 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3903 return 0; 3904 3905 /* 3906 * Try to allocate privated packet buffer for all TCs with share 3907 * buffer. 3908 */ 3909 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3910 return 0; 3911 3912 /* 3913 * For different application scenes, the enabled port number, TC number 3914 * and no_drop TC number are different. In order to obtain the better 3915 * performance, software could allocate the buffer size and configure 3916 * the waterline by trying to decrease the private buffer size according 3917 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 3918 * enabled tc. 3919 */ 3920 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3921 return 0; 3922 3923 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3924 return 0; 3925 3926 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3927 return 0; 3928 3929 return -ENOMEM; 3930 } 3931 3932 static int 3933 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3934 { 3935 struct hns3_rx_priv_buff_cmd *req; 3936 struct hns3_cmd_desc desc; 3937 uint32_t buf_size; 3938 int ret; 3939 int i; 3940 3941 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3942 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3943 3944 /* Alloc private buffer TCs */ 3945 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3946 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3947 3948 req->buf_num[i] = 3949 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3950 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3951 } 3952 3953 buf_size = buf_alloc->s_buf.buf_size; 3954 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3955 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3956 3957 ret = hns3_cmd_send(hw, &desc, 1); 3958 if (ret) 3959 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3960 3961 return ret; 3962 } 3963 3964 static int 3965 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3966 { 3967 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3968 struct hns3_rx_priv_wl_buf *req; 3969 struct hns3_priv_buf *priv; 3970 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3971 int i, j; 3972 int ret; 3973 3974 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3975 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3976 false); 3977 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3978 3979 /* The first descriptor set the NEXT bit to 1 */ 3980 if (i == 0) 3981 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3982 else 3983 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3984 3985 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3986 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3987 3988 priv = &buf_alloc->priv_buf[idx]; 3989 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3990 HNS3_BUF_UNIT_S); 3991 req->tc_wl[j].high |= 3992 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3993 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3994 HNS3_BUF_UNIT_S); 3995 req->tc_wl[j].low |= 3996 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3997 } 3998 } 3999 4000 /* Send 2 descriptor at one time */ 4001 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4002 if (ret) 4003 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4004 ret); 4005 return ret; 4006 } 4007 4008 static int 4009 hns3_common_thrd_config(struct hns3_hw *hw, 4010 struct hns3_pkt_buf_alloc *buf_alloc) 4011 { 4012 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4013 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4014 struct hns3_rx_com_thrd *req; 4015 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4016 struct hns3_tc_thrd *tc; 4017 int tc_idx; 4018 int i, j; 4019 int ret; 4020 4021 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4022 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4023 false); 4024 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4025 4026 /* The first descriptor set the NEXT bit to 1 */ 4027 if (i == 0) 4028 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4029 else 4030 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4031 4032 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4033 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4034 tc = &s_buf->tc_thrd[tc_idx]; 4035 4036 req->com_thrd[j].high = 4037 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4038 req->com_thrd[j].high |= 4039 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4040 req->com_thrd[j].low = 4041 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4042 req->com_thrd[j].low |= 4043 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4044 } 4045 } 4046 4047 /* Send 2 descriptors at one time */ 4048 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4049 if (ret) 4050 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4051 4052 return ret; 4053 } 4054 4055 static int 4056 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4057 { 4058 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4059 struct hns3_rx_com_wl *req; 4060 struct hns3_cmd_desc desc; 4061 int ret; 4062 4063 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4064 4065 req = (struct hns3_rx_com_wl *)desc.data; 4066 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4067 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4068 4069 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4070 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4071 4072 ret = hns3_cmd_send(hw, &desc, 1); 4073 if (ret) 4074 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4075 4076 return ret; 4077 } 4078 4079 int 4080 hns3_buffer_alloc(struct hns3_hw *hw) 4081 { 4082 struct hns3_pkt_buf_alloc pkt_buf; 4083 int ret; 4084 4085 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4086 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4087 if (ret) { 4088 PMD_INIT_LOG(ERR, 4089 "could not calc tx buffer size for all TCs %d", 4090 ret); 4091 return ret; 4092 } 4093 4094 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4095 if (ret) { 4096 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4097 return ret; 4098 } 4099 4100 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4101 if (ret) { 4102 PMD_INIT_LOG(ERR, 4103 "could not calc rx priv buffer size for all TCs %d", 4104 ret); 4105 return ret; 4106 } 4107 4108 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4109 if (ret) { 4110 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4111 return ret; 4112 } 4113 4114 if (hns3_dev_get_support(hw, DCB)) { 4115 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4116 if (ret) { 4117 PMD_INIT_LOG(ERR, 4118 "could not configure rx private waterline %d", 4119 ret); 4120 return ret; 4121 } 4122 4123 ret = hns3_common_thrd_config(hw, &pkt_buf); 4124 if (ret) { 4125 PMD_INIT_LOG(ERR, 4126 "could not configure common threshold %d", 4127 ret); 4128 return ret; 4129 } 4130 } 4131 4132 ret = hns3_common_wl_config(hw, &pkt_buf); 4133 if (ret) 4134 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4135 ret); 4136 4137 return ret; 4138 } 4139 4140 static int 4141 hns3_mac_init(struct hns3_hw *hw) 4142 { 4143 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4144 struct hns3_mac *mac = &hw->mac; 4145 struct hns3_pf *pf = &hns->pf; 4146 int ret; 4147 4148 pf->support_sfp_query = true; 4149 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4150 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4151 if (ret) { 4152 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4153 return ret; 4154 } 4155 4156 mac->link_status = RTE_ETH_LINK_DOWN; 4157 4158 return hns3_config_mtu(hw, pf->mps); 4159 } 4160 4161 static int 4162 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4163 { 4164 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4165 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4166 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4167 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4168 int return_status; 4169 4170 if (cmdq_resp) { 4171 PMD_INIT_LOG(ERR, 4172 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4173 cmdq_resp); 4174 return -EIO; 4175 } 4176 4177 switch (resp_code) { 4178 case HNS3_ETHERTYPE_SUCCESS_ADD: 4179 case HNS3_ETHERTYPE_ALREADY_ADD: 4180 return_status = 0; 4181 break; 4182 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4183 PMD_INIT_LOG(ERR, 4184 "add mac ethertype failed for manager table overflow."); 4185 return_status = -EIO; 4186 break; 4187 case HNS3_ETHERTYPE_KEY_CONFLICT: 4188 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4189 return_status = -EIO; 4190 break; 4191 default: 4192 PMD_INIT_LOG(ERR, 4193 "add mac ethertype failed for undefined, code=%u.", 4194 resp_code); 4195 return_status = -EIO; 4196 break; 4197 } 4198 4199 return return_status; 4200 } 4201 4202 static int 4203 hns3_add_mgr_tbl(struct hns3_hw *hw, 4204 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4205 { 4206 struct hns3_cmd_desc desc; 4207 uint8_t resp_code; 4208 uint16_t retval; 4209 int ret; 4210 4211 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4212 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4213 4214 ret = hns3_cmd_send(hw, &desc, 1); 4215 if (ret) { 4216 PMD_INIT_LOG(ERR, 4217 "add mac ethertype failed for cmd_send, ret =%d.", 4218 ret); 4219 return ret; 4220 } 4221 4222 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4223 retval = rte_le_to_cpu_16(desc.retval); 4224 4225 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4226 } 4227 4228 static void 4229 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4230 int *table_item_num) 4231 { 4232 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4233 4234 /* 4235 * In current version, we add one item in management table as below: 4236 * 0x0180C200000E -- LLDP MC address 4237 */ 4238 tbl = mgr_table; 4239 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4240 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4241 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4242 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4243 tbl->i_port_bitmap = 0x1; 4244 *table_item_num = 1; 4245 } 4246 4247 static int 4248 hns3_init_mgr_tbl(struct hns3_hw *hw) 4249 { 4250 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4251 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4252 int table_item_num; 4253 int ret; 4254 int i; 4255 4256 memset(mgr_table, 0, sizeof(mgr_table)); 4257 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4258 for (i = 0; i < table_item_num; i++) { 4259 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4260 if (ret) { 4261 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4262 ret); 4263 return ret; 4264 } 4265 } 4266 4267 return 0; 4268 } 4269 4270 static void 4271 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4272 bool en_mc, bool en_bc, int vport_id) 4273 { 4274 if (!param) 4275 return; 4276 4277 memset(param, 0, sizeof(struct hns3_promisc_param)); 4278 if (en_uc) 4279 param->enable = HNS3_PROMISC_EN_UC; 4280 if (en_mc) 4281 param->enable |= HNS3_PROMISC_EN_MC; 4282 if (en_bc) 4283 param->enable |= HNS3_PROMISC_EN_BC; 4284 param->vf_id = vport_id; 4285 } 4286 4287 static int 4288 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4289 { 4290 struct hns3_promisc_cfg_cmd *req; 4291 struct hns3_cmd_desc desc; 4292 int ret; 4293 4294 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4295 4296 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4297 req->vf_id = param->vf_id; 4298 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4299 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4300 4301 ret = hns3_cmd_send(hw, &desc, 1); 4302 if (ret) 4303 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4304 4305 return ret; 4306 } 4307 4308 static int 4309 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4310 { 4311 struct hns3_promisc_param param; 4312 bool en_bc_pmc = true; 4313 uint8_t vf_id; 4314 4315 /* 4316 * In current version VF is not supported when PF is driven by DPDK 4317 * driver, just need to configure parameters for PF vport. 4318 */ 4319 vf_id = HNS3_PF_FUNC_ID; 4320 4321 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4322 return hns3_cmd_set_promisc_mode(hw, ¶m); 4323 } 4324 4325 static int 4326 hns3_promisc_init(struct hns3_hw *hw) 4327 { 4328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4329 struct hns3_pf *pf = &hns->pf; 4330 struct hns3_promisc_param param; 4331 uint16_t func_id; 4332 int ret; 4333 4334 ret = hns3_set_promisc_mode(hw, false, false); 4335 if (ret) { 4336 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4337 return ret; 4338 } 4339 4340 /* 4341 * In current version VFs are not supported when PF is driven by DPDK 4342 * driver. After PF has been taken over by DPDK, the original VF will 4343 * be invalid. So, there is a possibility of entry residues. It should 4344 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4345 * during init. 4346 */ 4347 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4348 hns3_promisc_param_init(¶m, false, false, false, func_id); 4349 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4350 if (ret) { 4351 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4352 " ret = %d", func_id, ret); 4353 return ret; 4354 } 4355 } 4356 4357 return 0; 4358 } 4359 4360 static void 4361 hns3_promisc_uninit(struct hns3_hw *hw) 4362 { 4363 struct hns3_promisc_param param; 4364 uint16_t func_id; 4365 int ret; 4366 4367 func_id = HNS3_PF_FUNC_ID; 4368 4369 /* 4370 * In current version VFs are not supported when PF is driven by 4371 * DPDK driver, and VFs' promisc mode status has been cleared during 4372 * init and their status will not change. So just clear PF's promisc 4373 * mode status during uninit. 4374 */ 4375 hns3_promisc_param_init(¶m, false, false, false, func_id); 4376 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4377 if (ret) 4378 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4379 " uninit, ret = %d", ret); 4380 } 4381 4382 static int 4383 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4384 { 4385 bool allmulti = dev->data->all_multicast ? true : false; 4386 struct hns3_adapter *hns = dev->data->dev_private; 4387 struct hns3_hw *hw = &hns->hw; 4388 uint64_t offloads; 4389 int err; 4390 int ret; 4391 4392 rte_spinlock_lock(&hw->lock); 4393 ret = hns3_set_promisc_mode(hw, true, true); 4394 if (ret) { 4395 rte_spinlock_unlock(&hw->lock); 4396 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4397 ret); 4398 return ret; 4399 } 4400 4401 /* 4402 * When promiscuous mode was enabled, disable the vlan filter to let 4403 * all packets coming in in the receiving direction. 4404 */ 4405 offloads = dev->data->dev_conf.rxmode.offloads; 4406 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 4407 ret = hns3_enable_vlan_filter(hns, false); 4408 if (ret) { 4409 hns3_err(hw, "failed to enable promiscuous mode due to " 4410 "failure to disable vlan filter, ret = %d", 4411 ret); 4412 err = hns3_set_promisc_mode(hw, false, allmulti); 4413 if (err) 4414 hns3_err(hw, "failed to restore promiscuous " 4415 "status after disable vlan filter " 4416 "failed during enabling promiscuous " 4417 "mode, ret = %d", ret); 4418 } 4419 } 4420 4421 rte_spinlock_unlock(&hw->lock); 4422 4423 return ret; 4424 } 4425 4426 static int 4427 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4428 { 4429 bool allmulti = dev->data->all_multicast ? true : false; 4430 struct hns3_adapter *hns = dev->data->dev_private; 4431 struct hns3_hw *hw = &hns->hw; 4432 uint64_t offloads; 4433 int err; 4434 int ret; 4435 4436 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4437 rte_spinlock_lock(&hw->lock); 4438 ret = hns3_set_promisc_mode(hw, false, allmulti); 4439 if (ret) { 4440 rte_spinlock_unlock(&hw->lock); 4441 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4442 ret); 4443 return ret; 4444 } 4445 /* when promiscuous mode was disabled, restore the vlan filter status */ 4446 offloads = dev->data->dev_conf.rxmode.offloads; 4447 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 4448 ret = hns3_enable_vlan_filter(hns, true); 4449 if (ret) { 4450 hns3_err(hw, "failed to disable promiscuous mode due to" 4451 " failure to restore vlan filter, ret = %d", 4452 ret); 4453 err = hns3_set_promisc_mode(hw, true, true); 4454 if (err) 4455 hns3_err(hw, "failed to restore promiscuous " 4456 "status after enabling vlan filter " 4457 "failed during disabling promiscuous " 4458 "mode, ret = %d", ret); 4459 } 4460 } 4461 rte_spinlock_unlock(&hw->lock); 4462 4463 return ret; 4464 } 4465 4466 static int 4467 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4468 { 4469 struct hns3_adapter *hns = dev->data->dev_private; 4470 struct hns3_hw *hw = &hns->hw; 4471 int ret; 4472 4473 if (dev->data->promiscuous) 4474 return 0; 4475 4476 rte_spinlock_lock(&hw->lock); 4477 ret = hns3_set_promisc_mode(hw, false, true); 4478 rte_spinlock_unlock(&hw->lock); 4479 if (ret) 4480 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4481 ret); 4482 4483 return ret; 4484 } 4485 4486 static int 4487 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4488 { 4489 struct hns3_adapter *hns = dev->data->dev_private; 4490 struct hns3_hw *hw = &hns->hw; 4491 int ret; 4492 4493 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4494 if (dev->data->promiscuous) 4495 return 0; 4496 4497 rte_spinlock_lock(&hw->lock); 4498 ret = hns3_set_promisc_mode(hw, false, false); 4499 rte_spinlock_unlock(&hw->lock); 4500 if (ret) 4501 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4502 ret); 4503 4504 return ret; 4505 } 4506 4507 static int 4508 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4509 { 4510 struct hns3_hw *hw = &hns->hw; 4511 bool allmulti = hw->data->all_multicast ? true : false; 4512 int ret; 4513 4514 if (hw->data->promiscuous) { 4515 ret = hns3_set_promisc_mode(hw, true, true); 4516 if (ret) 4517 hns3_err(hw, "failed to restore promiscuous mode, " 4518 "ret = %d", ret); 4519 return ret; 4520 } 4521 4522 ret = hns3_set_promisc_mode(hw, false, allmulti); 4523 if (ret) 4524 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4525 ret); 4526 return ret; 4527 } 4528 4529 static int 4530 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4531 { 4532 struct hns3_sfp_info_cmd *resp; 4533 struct hns3_cmd_desc desc; 4534 int ret; 4535 4536 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4537 resp = (struct hns3_sfp_info_cmd *)desc.data; 4538 resp->query_type = HNS3_ACTIVE_QUERY; 4539 4540 ret = hns3_cmd_send(hw, &desc, 1); 4541 if (ret == -EOPNOTSUPP) { 4542 hns3_warn(hw, "firmware does not support get SFP info," 4543 " ret = %d.", ret); 4544 return ret; 4545 } else if (ret) { 4546 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4547 return ret; 4548 } 4549 4550 /* 4551 * In some case, the speed of MAC obtained from firmware may be 0, it 4552 * shouldn't be set to mac->speed. 4553 */ 4554 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4555 return 0; 4556 4557 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4558 /* 4559 * if resp->supported_speed is 0, it means it's an old version 4560 * firmware, do not update these params. 4561 */ 4562 if (resp->supported_speed) { 4563 mac_info->query_type = HNS3_ACTIVE_QUERY; 4564 mac_info->supported_speed = 4565 rte_le_to_cpu_32(resp->supported_speed); 4566 mac_info->support_autoneg = resp->autoneg_ability; 4567 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED 4568 : RTE_ETH_LINK_AUTONEG; 4569 } else { 4570 mac_info->query_type = HNS3_DEFAULT_QUERY; 4571 } 4572 4573 return 0; 4574 } 4575 4576 static uint8_t 4577 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4578 { 4579 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M)) 4580 duplex = RTE_ETH_LINK_FULL_DUPLEX; 4581 4582 return duplex; 4583 } 4584 4585 static int 4586 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4587 { 4588 struct hns3_mac *mac = &hw->mac; 4589 int ret; 4590 4591 duplex = hns3_check_speed_dup(duplex, speed); 4592 if (mac->link_speed == speed && mac->link_duplex == duplex) 4593 return 0; 4594 4595 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4596 if (ret) 4597 return ret; 4598 4599 ret = hns3_port_shaper_update(hw, speed); 4600 if (ret) 4601 return ret; 4602 4603 mac->link_speed = speed; 4604 mac->link_duplex = duplex; 4605 4606 return 0; 4607 } 4608 4609 static int 4610 hns3_update_fiber_link_info(struct hns3_hw *hw) 4611 { 4612 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4613 struct hns3_mac *mac = &hw->mac; 4614 struct hns3_mac mac_info; 4615 int ret; 4616 4617 /* If firmware do not support get SFP/qSFP speed, return directly */ 4618 if (!pf->support_sfp_query) 4619 return 0; 4620 4621 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4622 ret = hns3_get_sfp_info(hw, &mac_info); 4623 if (ret == -EOPNOTSUPP) { 4624 pf->support_sfp_query = false; 4625 return ret; 4626 } else if (ret) 4627 return ret; 4628 4629 /* Do nothing if no SFP */ 4630 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE) 4631 return 0; 4632 4633 /* 4634 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4635 * to reconfigure the speed of MAC. Otherwise, it indicates 4636 * that the current firmware only supports to obtain the 4637 * speed of the SFP, and the speed of MAC needs to reconfigure. 4638 */ 4639 mac->query_type = mac_info.query_type; 4640 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4641 if (mac_info.link_speed != mac->link_speed) { 4642 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4643 if (ret) 4644 return ret; 4645 } 4646 4647 mac->link_speed = mac_info.link_speed; 4648 mac->supported_speed = mac_info.supported_speed; 4649 mac->support_autoneg = mac_info.support_autoneg; 4650 mac->link_autoneg = mac_info.link_autoneg; 4651 4652 return 0; 4653 } 4654 4655 /* Config full duplex for SFP */ 4656 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4657 RTE_ETH_LINK_FULL_DUPLEX); 4658 } 4659 4660 static void 4661 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4662 { 4663 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4664 4665 struct hns3_phy_params_bd0_cmd *req; 4666 uint32_t supported; 4667 4668 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4669 mac->link_speed = rte_le_to_cpu_32(req->speed); 4670 mac->link_duplex = hns3_get_bit(req->duplex, 4671 HNS3_PHY_DUPLEX_CFG_B); 4672 mac->link_autoneg = hns3_get_bit(req->autoneg, 4673 HNS3_PHY_AUTONEG_CFG_B); 4674 mac->advertising = rte_le_to_cpu_32(req->advertising); 4675 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4676 supported = rte_le_to_cpu_32(req->supported); 4677 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4678 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4679 } 4680 4681 static int 4682 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4683 { 4684 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4685 uint16_t i; 4686 int ret; 4687 4688 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4689 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4690 true); 4691 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4692 } 4693 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4694 4695 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4696 if (ret) { 4697 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4698 return ret; 4699 } 4700 4701 hns3_parse_copper_phy_params(desc, mac); 4702 4703 return 0; 4704 } 4705 4706 static int 4707 hns3_update_copper_link_info(struct hns3_hw *hw) 4708 { 4709 struct hns3_mac *mac = &hw->mac; 4710 struct hns3_mac mac_info; 4711 int ret; 4712 4713 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4714 ret = hns3_get_copper_phy_params(hw, &mac_info); 4715 if (ret) 4716 return ret; 4717 4718 if (mac_info.link_speed != mac->link_speed) { 4719 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4720 if (ret) 4721 return ret; 4722 } 4723 4724 mac->link_speed = mac_info.link_speed; 4725 mac->link_duplex = mac_info.link_duplex; 4726 mac->link_autoneg = mac_info.link_autoneg; 4727 mac->supported_speed = mac_info.supported_speed; 4728 mac->advertising = mac_info.advertising; 4729 mac->lp_advertising = mac_info.lp_advertising; 4730 mac->support_autoneg = mac_info.support_autoneg; 4731 4732 return 0; 4733 } 4734 4735 static int 4736 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4737 { 4738 struct hns3_adapter *hns = eth_dev->data->dev_private; 4739 struct hns3_hw *hw = &hns->hw; 4740 int ret = 0; 4741 4742 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4743 ret = hns3_update_copper_link_info(hw); 4744 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4745 ret = hns3_update_fiber_link_info(hw); 4746 4747 return ret; 4748 } 4749 4750 static int 4751 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4752 { 4753 struct hns3_config_mac_mode_cmd *req; 4754 struct hns3_cmd_desc desc; 4755 uint32_t loop_en = 0; 4756 uint8_t val = 0; 4757 int ret; 4758 4759 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4760 4761 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4762 if (enable) 4763 val = 1; 4764 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4765 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4766 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4767 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4768 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4769 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4770 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4771 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4772 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4773 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4774 4775 /* 4776 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4777 * when receiving frames. Otherwise, CRC will be stripped. 4778 */ 4779 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 4780 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4781 else 4782 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4783 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4784 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4785 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4786 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4787 4788 ret = hns3_cmd_send(hw, &desc, 1); 4789 if (ret) 4790 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4791 4792 return ret; 4793 } 4794 4795 static int 4796 hns3_get_mac_link_status(struct hns3_hw *hw) 4797 { 4798 struct hns3_link_status_cmd *req; 4799 struct hns3_cmd_desc desc; 4800 int link_status; 4801 int ret; 4802 4803 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4804 ret = hns3_cmd_send(hw, &desc, 1); 4805 if (ret) { 4806 hns3_err(hw, "get link status cmd failed %d", ret); 4807 return RTE_ETH_LINK_DOWN; 4808 } 4809 4810 req = (struct hns3_link_status_cmd *)desc.data; 4811 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4812 4813 return !!link_status; 4814 } 4815 4816 static bool 4817 hns3_update_link_status(struct hns3_hw *hw) 4818 { 4819 int state; 4820 4821 state = hns3_get_mac_link_status(hw); 4822 if (state != hw->mac.link_status) { 4823 hw->mac.link_status = state; 4824 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4825 return true; 4826 } 4827 4828 return false; 4829 } 4830 4831 void 4832 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4833 { 4834 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4835 struct rte_eth_link new_link; 4836 int ret; 4837 4838 if (query) 4839 hns3_update_port_link_info(dev); 4840 4841 memset(&new_link, 0, sizeof(new_link)); 4842 hns3_setup_linkstatus(dev, &new_link); 4843 4844 ret = rte_eth_linkstatus_set(dev, &new_link); 4845 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4846 hns3_start_report_lse(dev); 4847 } 4848 4849 static void 4850 hns3_service_handler(void *param) 4851 { 4852 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4853 struct hns3_adapter *hns = eth_dev->data->dev_private; 4854 struct hns3_hw *hw = &hns->hw; 4855 4856 if (!hns3_is_reset_pending(hns)) 4857 hns3_update_linkstatus_and_event(hw, true); 4858 else 4859 hns3_warn(hw, "Cancel the query when reset is pending"); 4860 4861 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4862 } 4863 4864 static int 4865 hns3_init_hardware(struct hns3_adapter *hns) 4866 { 4867 struct hns3_hw *hw = &hns->hw; 4868 int ret; 4869 4870 ret = hns3_map_tqp(hw); 4871 if (ret) { 4872 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4873 return ret; 4874 } 4875 4876 ret = hns3_init_umv_space(hw); 4877 if (ret) { 4878 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4879 return ret; 4880 } 4881 4882 ret = hns3_mac_init(hw); 4883 if (ret) { 4884 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4885 goto err_mac_init; 4886 } 4887 4888 ret = hns3_init_mgr_tbl(hw); 4889 if (ret) { 4890 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4891 goto err_mac_init; 4892 } 4893 4894 ret = hns3_promisc_init(hw); 4895 if (ret) { 4896 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4897 ret); 4898 goto err_mac_init; 4899 } 4900 4901 ret = hns3_init_vlan_config(hns); 4902 if (ret) { 4903 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4904 goto err_mac_init; 4905 } 4906 4907 ret = hns3_dcb_init(hw); 4908 if (ret) { 4909 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4910 goto err_mac_init; 4911 } 4912 4913 ret = hns3_init_fd_config(hns); 4914 if (ret) { 4915 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4916 goto err_mac_init; 4917 } 4918 4919 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4920 if (ret) { 4921 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4922 goto err_mac_init; 4923 } 4924 4925 ret = hns3_config_gro(hw, false); 4926 if (ret) { 4927 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4928 goto err_mac_init; 4929 } 4930 4931 /* 4932 * In the initialization clearing the all hardware mapping relationship 4933 * configurations between queues and interrupt vectors is needed, so 4934 * some error caused by the residual configurations, such as the 4935 * unexpected interrupt, can be avoid. 4936 */ 4937 ret = hns3_init_ring_with_vector(hw); 4938 if (ret) { 4939 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4940 goto err_mac_init; 4941 } 4942 4943 return 0; 4944 4945 err_mac_init: 4946 hns3_uninit_umv_space(hw); 4947 return ret; 4948 } 4949 4950 static int 4951 hns3_clear_hw(struct hns3_hw *hw) 4952 { 4953 struct hns3_cmd_desc desc; 4954 int ret; 4955 4956 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4957 4958 ret = hns3_cmd_send(hw, &desc, 1); 4959 if (ret && ret != -EOPNOTSUPP) 4960 return ret; 4961 4962 return 0; 4963 } 4964 4965 static void 4966 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4967 { 4968 uint32_t val; 4969 4970 /* 4971 * The new firmware support report more hardware error types by 4972 * msix mode. These errors are defined as RAS errors in hardware 4973 * and belong to a different type from the MSI-x errors processed 4974 * by the network driver. 4975 * 4976 * Network driver should open the new error report on initialization. 4977 */ 4978 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4979 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4980 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4981 } 4982 4983 static uint32_t 4984 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 4985 { 4986 struct hns3_mac *mac = &hw->mac; 4987 4988 switch (mac->link_speed) { 4989 case RTE_ETH_SPEED_NUM_1G: 4990 return HNS3_FIBER_LINK_SPEED_1G_BIT; 4991 case RTE_ETH_SPEED_NUM_10G: 4992 return HNS3_FIBER_LINK_SPEED_10G_BIT; 4993 case RTE_ETH_SPEED_NUM_25G: 4994 return HNS3_FIBER_LINK_SPEED_25G_BIT; 4995 case RTE_ETH_SPEED_NUM_40G: 4996 return HNS3_FIBER_LINK_SPEED_40G_BIT; 4997 case RTE_ETH_SPEED_NUM_50G: 4998 return HNS3_FIBER_LINK_SPEED_50G_BIT; 4999 case RTE_ETH_SPEED_NUM_100G: 5000 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5001 case RTE_ETH_SPEED_NUM_200G: 5002 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5003 default: 5004 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5005 return 0; 5006 } 5007 } 5008 5009 /* 5010 * Validity of supported_speed for firber and copper media type can be 5011 * guaranteed by the following policy: 5012 * Copper: 5013 * Although the initialization of the phy in the firmware may not be 5014 * completed, the firmware can guarantees that the supported_speed is 5015 * an valid value. 5016 * Firber: 5017 * If the version of firmware supports the acitive query way of the 5018 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5019 * through it. If unsupported, use the SFP's speed as the value of the 5020 * supported_speed. 5021 */ 5022 static int 5023 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5024 { 5025 struct hns3_adapter *hns = eth_dev->data->dev_private; 5026 struct hns3_hw *hw = &hns->hw; 5027 struct hns3_mac *mac = &hw->mac; 5028 int ret; 5029 5030 ret = hns3_update_link_info(eth_dev); 5031 if (ret) 5032 return ret; 5033 5034 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5035 /* 5036 * Some firmware does not support the report of supported_speed, 5037 * and only report the effective speed of SFP. In this case, it 5038 * is necessary to use the SFP's speed as the supported_speed. 5039 */ 5040 if (mac->supported_speed == 0) 5041 mac->supported_speed = 5042 hns3_set_firber_default_support_speed(hw); 5043 } 5044 5045 return 0; 5046 } 5047 5048 static void 5049 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5050 { 5051 struct hns3_mac *mac = &hns->hw.mac; 5052 5053 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5054 hns->pf.support_fc_autoneg = true; 5055 return; 5056 } 5057 5058 /* 5059 * Flow control auto-negotiation requires the cooperation of the driver 5060 * and firmware. Currently, the optical port does not support flow 5061 * control auto-negotiation. 5062 */ 5063 hns->pf.support_fc_autoneg = false; 5064 } 5065 5066 static int 5067 hns3_init_pf(struct rte_eth_dev *eth_dev) 5068 { 5069 struct rte_device *dev = eth_dev->device; 5070 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5071 struct hns3_adapter *hns = eth_dev->data->dev_private; 5072 struct hns3_hw *hw = &hns->hw; 5073 int ret; 5074 5075 PMD_INIT_FUNC_TRACE(); 5076 5077 /* Get hardware io base address from pcie BAR2 IO space */ 5078 hw->io_base = pci_dev->mem_resource[2].addr; 5079 5080 /* Firmware command queue initialize */ 5081 ret = hns3_cmd_init_queue(hw); 5082 if (ret) { 5083 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5084 goto err_cmd_init_queue; 5085 } 5086 5087 hns3_clear_all_event_cause(hw); 5088 5089 /* Firmware command initialize */ 5090 ret = hns3_cmd_init(hw); 5091 if (ret) { 5092 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5093 goto err_cmd_init; 5094 } 5095 5096 hns3_tx_push_init(eth_dev); 5097 5098 /* 5099 * To ensure that the hardware environment is clean during 5100 * initialization, the driver actively clear the hardware environment 5101 * during initialization, including PF and corresponding VFs' vlan, mac, 5102 * flow table configurations, etc. 5103 */ 5104 ret = hns3_clear_hw(hw); 5105 if (ret) { 5106 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5107 goto err_cmd_init; 5108 } 5109 5110 /* Hardware statistics of imissed registers cleared. */ 5111 ret = hns3_update_imissed_stats(hw, true); 5112 if (ret) { 5113 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5114 goto err_cmd_init; 5115 } 5116 5117 hns3_config_all_msix_error(hw, true); 5118 5119 ret = rte_intr_callback_register(pci_dev->intr_handle, 5120 hns3_interrupt_handler, 5121 eth_dev); 5122 if (ret) { 5123 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5124 goto err_intr_callback_register; 5125 } 5126 5127 ret = hns3_ptp_init(hw); 5128 if (ret) 5129 goto err_get_config; 5130 5131 /* Enable interrupt */ 5132 rte_intr_enable(pci_dev->intr_handle); 5133 hns3_pf_enable_irq0(hw); 5134 5135 /* Get configuration */ 5136 ret = hns3_get_configuration(hw); 5137 if (ret) { 5138 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5139 goto err_get_config; 5140 } 5141 5142 ret = hns3_tqp_stats_init(hw); 5143 if (ret) 5144 goto err_get_config; 5145 5146 ret = hns3_init_hardware(hns); 5147 if (ret) { 5148 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5149 goto err_init_hw; 5150 } 5151 5152 /* Initialize flow director filter list & hash */ 5153 ret = hns3_fdir_filter_init(hns); 5154 if (ret) { 5155 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5156 goto err_fdir; 5157 } 5158 5159 hns3_rss_set_default_args(hw); 5160 5161 ret = hns3_enable_hw_error_intr(hns, true); 5162 if (ret) { 5163 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5164 ret); 5165 goto err_enable_intr; 5166 } 5167 5168 ret = hns3_get_port_supported_speed(eth_dev); 5169 if (ret) { 5170 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5171 "by device, ret = %d.", ret); 5172 goto err_supported_speed; 5173 } 5174 5175 hns3_get_fc_autoneg_capability(hns); 5176 5177 hns3_tm_conf_init(eth_dev); 5178 5179 return 0; 5180 5181 err_supported_speed: 5182 (void)hns3_enable_hw_error_intr(hns, false); 5183 err_enable_intr: 5184 hns3_fdir_filter_uninit(hns); 5185 err_fdir: 5186 hns3_uninit_umv_space(hw); 5187 err_init_hw: 5188 hns3_tqp_stats_uninit(hw); 5189 err_get_config: 5190 hns3_pf_disable_irq0(hw); 5191 rte_intr_disable(pci_dev->intr_handle); 5192 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 5193 eth_dev); 5194 err_intr_callback_register: 5195 err_cmd_init: 5196 hns3_cmd_uninit(hw); 5197 hns3_cmd_destroy_queue(hw); 5198 err_cmd_init_queue: 5199 hw->io_base = NULL; 5200 5201 return ret; 5202 } 5203 5204 static void 5205 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5206 { 5207 struct hns3_adapter *hns = eth_dev->data->dev_private; 5208 struct rte_device *dev = eth_dev->device; 5209 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5210 struct hns3_hw *hw = &hns->hw; 5211 5212 PMD_INIT_FUNC_TRACE(); 5213 5214 hns3_tm_conf_uninit(eth_dev); 5215 hns3_enable_hw_error_intr(hns, false); 5216 hns3_rss_uninit(hns); 5217 (void)hns3_config_gro(hw, false); 5218 hns3_promisc_uninit(hw); 5219 hns3_flow_uninit(eth_dev); 5220 hns3_fdir_filter_uninit(hns); 5221 hns3_uninit_umv_space(hw); 5222 hns3_tqp_stats_uninit(hw); 5223 hns3_config_mac_tnl_int(hw, false); 5224 hns3_pf_disable_irq0(hw); 5225 rte_intr_disable(pci_dev->intr_handle); 5226 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler, 5227 eth_dev); 5228 hns3_config_all_msix_error(hw, false); 5229 hns3_cmd_uninit(hw); 5230 hns3_cmd_destroy_queue(hw); 5231 hw->io_base = NULL; 5232 } 5233 5234 static uint32_t 5235 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5236 { 5237 uint32_t speed_bit; 5238 5239 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 5240 case RTE_ETH_LINK_SPEED_10M: 5241 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5242 break; 5243 case RTE_ETH_LINK_SPEED_10M_HD: 5244 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5245 break; 5246 case RTE_ETH_LINK_SPEED_100M: 5247 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5248 break; 5249 case RTE_ETH_LINK_SPEED_100M_HD: 5250 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5251 break; 5252 case RTE_ETH_LINK_SPEED_1G: 5253 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5254 break; 5255 default: 5256 speed_bit = 0; 5257 break; 5258 } 5259 5260 return speed_bit; 5261 } 5262 5263 static uint32_t 5264 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5265 { 5266 uint32_t speed_bit; 5267 5268 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 5269 case RTE_ETH_LINK_SPEED_1G: 5270 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5271 break; 5272 case RTE_ETH_LINK_SPEED_10G: 5273 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5274 break; 5275 case RTE_ETH_LINK_SPEED_25G: 5276 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5277 break; 5278 case RTE_ETH_LINK_SPEED_40G: 5279 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5280 break; 5281 case RTE_ETH_LINK_SPEED_50G: 5282 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5283 break; 5284 case RTE_ETH_LINK_SPEED_100G: 5285 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5286 break; 5287 case RTE_ETH_LINK_SPEED_200G: 5288 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5289 break; 5290 default: 5291 speed_bit = 0; 5292 break; 5293 } 5294 5295 return speed_bit; 5296 } 5297 5298 static int 5299 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5300 { 5301 struct hns3_mac *mac = &hw->mac; 5302 uint32_t supported_speed = mac->supported_speed; 5303 uint32_t speed_bit = 0; 5304 5305 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5306 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5307 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5308 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5309 5310 if (!(speed_bit & supported_speed)) { 5311 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5312 link_speeds); 5313 return -EINVAL; 5314 } 5315 5316 return 0; 5317 } 5318 5319 static inline uint32_t 5320 hns3_get_link_speed(uint32_t link_speeds) 5321 { 5322 uint32_t speed = RTE_ETH_SPEED_NUM_NONE; 5323 5324 if (link_speeds & RTE_ETH_LINK_SPEED_10M || 5325 link_speeds & RTE_ETH_LINK_SPEED_10M_HD) 5326 speed = RTE_ETH_SPEED_NUM_10M; 5327 if (link_speeds & RTE_ETH_LINK_SPEED_100M || 5328 link_speeds & RTE_ETH_LINK_SPEED_100M_HD) 5329 speed = RTE_ETH_SPEED_NUM_100M; 5330 if (link_speeds & RTE_ETH_LINK_SPEED_1G) 5331 speed = RTE_ETH_SPEED_NUM_1G; 5332 if (link_speeds & RTE_ETH_LINK_SPEED_10G) 5333 speed = RTE_ETH_SPEED_NUM_10G; 5334 if (link_speeds & RTE_ETH_LINK_SPEED_25G) 5335 speed = RTE_ETH_SPEED_NUM_25G; 5336 if (link_speeds & RTE_ETH_LINK_SPEED_40G) 5337 speed = RTE_ETH_SPEED_NUM_40G; 5338 if (link_speeds & RTE_ETH_LINK_SPEED_50G) 5339 speed = RTE_ETH_SPEED_NUM_50G; 5340 if (link_speeds & RTE_ETH_LINK_SPEED_100G) 5341 speed = RTE_ETH_SPEED_NUM_100G; 5342 if (link_speeds & RTE_ETH_LINK_SPEED_200G) 5343 speed = RTE_ETH_SPEED_NUM_200G; 5344 5345 return speed; 5346 } 5347 5348 static uint8_t 5349 hns3_get_link_duplex(uint32_t link_speeds) 5350 { 5351 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 5352 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 5353 return RTE_ETH_LINK_HALF_DUPLEX; 5354 else 5355 return RTE_ETH_LINK_FULL_DUPLEX; 5356 } 5357 5358 static int 5359 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5360 struct hns3_set_link_speed_cfg *cfg) 5361 { 5362 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5363 struct hns3_phy_params_bd0_cmd *req; 5364 uint16_t i; 5365 5366 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5367 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5368 false); 5369 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5370 } 5371 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5372 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5373 req->autoneg = cfg->autoneg; 5374 5375 /* 5376 * The full speed capability is used to negotiate when 5377 * auto-negotiation is enabled. 5378 */ 5379 if (cfg->autoneg) { 5380 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5381 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5382 HNS3_PHY_LINK_SPEED_100M_BIT | 5383 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5384 HNS3_PHY_LINK_SPEED_1000M_BIT; 5385 } else { 5386 req->speed = cfg->speed; 5387 req->duplex = cfg->duplex; 5388 } 5389 5390 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5391 } 5392 5393 static int 5394 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5395 { 5396 struct hns3_config_auto_neg_cmd *req; 5397 struct hns3_cmd_desc desc; 5398 uint32_t flag = 0; 5399 int ret; 5400 5401 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5402 5403 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5404 if (enable) 5405 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5406 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5407 5408 ret = hns3_cmd_send(hw, &desc, 1); 5409 if (ret) 5410 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5411 5412 return ret; 5413 } 5414 5415 static int 5416 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5417 struct hns3_set_link_speed_cfg *cfg) 5418 { 5419 int ret; 5420 5421 if (hw->mac.support_autoneg) { 5422 ret = hns3_set_autoneg(hw, cfg->autoneg); 5423 if (ret) { 5424 hns3_err(hw, "failed to configure auto-negotiation."); 5425 return ret; 5426 } 5427 5428 /* 5429 * To enable auto-negotiation, we only need to open the switch 5430 * of auto-negotiation, then firmware sets all speed 5431 * capabilities. 5432 */ 5433 if (cfg->autoneg) 5434 return 0; 5435 } 5436 5437 /* 5438 * Some hardware doesn't support auto-negotiation, but users may not 5439 * configure link_speeds (default 0), which means auto-negotiation. 5440 * In this case, a warning message need to be printed, instead of 5441 * an error. 5442 */ 5443 if (cfg->autoneg) { 5444 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 5445 return 0; 5446 } 5447 5448 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5449 } 5450 5451 static int 5452 hns3_set_port_link_speed(struct hns3_hw *hw, 5453 struct hns3_set_link_speed_cfg *cfg) 5454 { 5455 int ret; 5456 5457 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5458 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5459 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5460 if (pf->is_tmp_phy) 5461 return 0; 5462 #endif 5463 5464 ret = hns3_set_copper_port_link_speed(hw, cfg); 5465 if (ret) { 5466 hns3_err(hw, "failed to set copper port link speed," 5467 "ret = %d.", ret); 5468 return ret; 5469 } 5470 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5471 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5472 if (ret) { 5473 hns3_err(hw, "failed to set fiber port link speed," 5474 "ret = %d.", ret); 5475 return ret; 5476 } 5477 } 5478 5479 return 0; 5480 } 5481 5482 static int 5483 hns3_apply_link_speed(struct hns3_hw *hw) 5484 { 5485 struct rte_eth_conf *conf = &hw->data->dev_conf; 5486 struct hns3_set_link_speed_cfg cfg; 5487 5488 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5489 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ? 5490 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 5491 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) { 5492 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5493 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5494 } 5495 5496 return hns3_set_port_link_speed(hw, &cfg); 5497 } 5498 5499 static int 5500 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5501 { 5502 struct hns3_hw *hw = &hns->hw; 5503 bool link_en; 5504 int ret; 5505 5506 ret = hns3_update_queue_map_configure(hns); 5507 if (ret) { 5508 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5509 ret); 5510 return ret; 5511 } 5512 5513 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5514 ret = hns3_tm_conf_update(hw); 5515 if (ret) { 5516 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5517 return ret; 5518 } 5519 5520 hns3_enable_rxd_adv_layout(hw); 5521 5522 ret = hns3_init_queues(hns, reset_queue); 5523 if (ret) { 5524 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5525 return ret; 5526 } 5527 5528 link_en = hw->set_link_down ? false : true; 5529 ret = hns3_cfg_mac_mode(hw, link_en); 5530 if (ret) { 5531 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5532 goto err_config_mac_mode; 5533 } 5534 5535 ret = hns3_apply_link_speed(hw); 5536 if (ret) 5537 goto err_set_link_speed; 5538 5539 return 0; 5540 5541 err_set_link_speed: 5542 (void)hns3_cfg_mac_mode(hw, false); 5543 5544 err_config_mac_mode: 5545 hns3_dev_release_mbufs(hns); 5546 /* 5547 * Here is exception handling, hns3_reset_all_tqps will have the 5548 * corresponding error message if it is handled incorrectly, so it is 5549 * not necessary to check hns3_reset_all_tqps return value, here keep 5550 * ret as the error code causing the exception. 5551 */ 5552 (void)hns3_reset_all_tqps(hns); 5553 return ret; 5554 } 5555 5556 static int 5557 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5558 { 5559 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5560 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5561 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5562 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5563 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5564 uint32_t intr_vector; 5565 uint16_t q_id; 5566 int ret; 5567 5568 /* 5569 * hns3 needs a separate interrupt to be used as event interrupt which 5570 * could not be shared with task queue pair, so KERNEL drivers need 5571 * support multiple interrupt vectors. 5572 */ 5573 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5574 !rte_intr_cap_multiple(intr_handle)) 5575 return 0; 5576 5577 rte_intr_disable(intr_handle); 5578 intr_vector = hw->used_rx_queues; 5579 /* creates event fd for each intr vector when MSIX is used */ 5580 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5581 return -EINVAL; 5582 5583 /* Allocate vector list */ 5584 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 5585 hw->used_rx_queues)) { 5586 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5587 hw->used_rx_queues); 5588 ret = -ENOMEM; 5589 goto alloc_intr_vec_error; 5590 } 5591 5592 if (rte_intr_allow_others(intr_handle)) { 5593 vec = RTE_INTR_VEC_RXTX_OFFSET; 5594 base = RTE_INTR_VEC_RXTX_OFFSET; 5595 } 5596 5597 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5598 ret = hns3_bind_ring_with_vector(hw, vec, true, 5599 HNS3_RING_TYPE_RX, q_id); 5600 if (ret) 5601 goto bind_vector_error; 5602 5603 if (rte_intr_vec_list_index_set(intr_handle, q_id, vec)) 5604 goto bind_vector_error; 5605 /* 5606 * If there are not enough efds (e.g. not enough interrupt), 5607 * remaining queues will be bond to the last interrupt. 5608 */ 5609 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) 5610 vec++; 5611 } 5612 rte_intr_enable(intr_handle); 5613 return 0; 5614 5615 bind_vector_error: 5616 rte_intr_vec_list_free(intr_handle); 5617 alloc_intr_vec_error: 5618 rte_intr_efd_disable(intr_handle); 5619 return ret; 5620 } 5621 5622 static int 5623 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5624 { 5625 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5626 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5627 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5628 uint16_t q_id; 5629 int ret; 5630 5631 if (dev->data->dev_conf.intr_conf.rxq == 0) 5632 return 0; 5633 5634 if (rte_intr_dp_is_en(intr_handle)) { 5635 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5636 ret = hns3_bind_ring_with_vector(hw, 5637 rte_intr_vec_list_index_get(intr_handle, 5638 q_id), 5639 true, HNS3_RING_TYPE_RX, q_id); 5640 if (ret) 5641 return ret; 5642 } 5643 } 5644 5645 return 0; 5646 } 5647 5648 static void 5649 hns3_restore_filter(struct rte_eth_dev *dev) 5650 { 5651 hns3_restore_rss_filter(dev); 5652 } 5653 5654 static int 5655 hns3_dev_start(struct rte_eth_dev *dev) 5656 { 5657 struct hns3_adapter *hns = dev->data->dev_private; 5658 struct hns3_hw *hw = &hns->hw; 5659 bool old_state = hw->set_link_down; 5660 int ret; 5661 5662 PMD_INIT_FUNC_TRACE(); 5663 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5664 return -EBUSY; 5665 5666 rte_spinlock_lock(&hw->lock); 5667 hw->adapter_state = HNS3_NIC_STARTING; 5668 5669 /* 5670 * If the dev_set_link_down() API has been called, the "set_link_down" 5671 * flag can be cleared by dev_start() API. In addition, the flag should 5672 * also be cleared before calling hns3_do_start() so that MAC can be 5673 * enabled in dev_start stage. 5674 */ 5675 hw->set_link_down = false; 5676 ret = hns3_do_start(hns, true); 5677 if (ret) 5678 goto do_start_fail; 5679 5680 ret = hns3_map_rx_interrupt(dev); 5681 if (ret) 5682 goto map_rx_inter_err; 5683 5684 /* 5685 * There are three register used to control the status of a TQP 5686 * (contains a pair of Tx queue and Rx queue) in the new version network 5687 * engine. One is used to control the enabling of Tx queue, the other is 5688 * used to control the enabling of Rx queue, and the last is the master 5689 * switch used to control the enabling of the tqp. The Tx register and 5690 * TQP register must be enabled at the same time to enable a Tx queue. 5691 * The same applies to the Rx queue. For the older network engine, this 5692 * function only refresh the enabled flag, and it is used to update the 5693 * status of queue in the dpdk framework. 5694 */ 5695 ret = hns3_start_all_txqs(dev); 5696 if (ret) 5697 goto map_rx_inter_err; 5698 5699 ret = hns3_start_all_rxqs(dev); 5700 if (ret) 5701 goto start_all_rxqs_fail; 5702 5703 hw->adapter_state = HNS3_NIC_STARTED; 5704 rte_spinlock_unlock(&hw->lock); 5705 5706 hns3_rx_scattered_calc(dev); 5707 hns3_set_rxtx_function(dev); 5708 hns3_mp_req_start_rxtx(dev); 5709 5710 hns3_restore_filter(dev); 5711 5712 /* Enable interrupt of all rx queues before enabling queues */ 5713 hns3_dev_all_rx_queue_intr_enable(hw, true); 5714 5715 /* 5716 * After finished the initialization, enable tqps to receive/transmit 5717 * packets and refresh all queue status. 5718 */ 5719 hns3_start_tqps(hw); 5720 5721 hns3_tm_dev_start_proc(hw); 5722 5723 if (dev->data->dev_conf.intr_conf.lsc != 0) 5724 hns3_dev_link_update(dev, 0); 5725 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5726 5727 hns3_info(hw, "hns3 dev start successful!"); 5728 5729 return 0; 5730 5731 start_all_rxqs_fail: 5732 hns3_stop_all_txqs(dev); 5733 map_rx_inter_err: 5734 (void)hns3_do_stop(hns); 5735 do_start_fail: 5736 hw->set_link_down = old_state; 5737 hw->adapter_state = HNS3_NIC_CONFIGURED; 5738 rte_spinlock_unlock(&hw->lock); 5739 5740 return ret; 5741 } 5742 5743 static int 5744 hns3_do_stop(struct hns3_adapter *hns) 5745 { 5746 struct hns3_hw *hw = &hns->hw; 5747 int ret; 5748 5749 /* 5750 * The "hns3_do_stop" function will also be called by .stop_service to 5751 * prepare reset. At the time of global or IMP reset, the command cannot 5752 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5753 * accessed during the reset process. So the mbuf can not be released 5754 * during reset and is required to be released after the reset is 5755 * completed. 5756 */ 5757 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5758 hns3_dev_release_mbufs(hns); 5759 5760 ret = hns3_cfg_mac_mode(hw, false); 5761 if (ret) 5762 return ret; 5763 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5764 5765 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5766 hns3_configure_all_mac_addr(hns, true); 5767 ret = hns3_reset_all_tqps(hns); 5768 if (ret) { 5769 hns3_err(hw, "failed to reset all queues ret = %d.", 5770 ret); 5771 return ret; 5772 } 5773 } 5774 5775 return 0; 5776 } 5777 5778 static void 5779 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5780 { 5781 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5782 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5783 struct hns3_adapter *hns = dev->data->dev_private; 5784 struct hns3_hw *hw = &hns->hw; 5785 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5786 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5787 uint16_t q_id; 5788 5789 if (dev->data->dev_conf.intr_conf.rxq == 0) 5790 return; 5791 5792 /* unmap the ring with vector */ 5793 if (rte_intr_allow_others(intr_handle)) { 5794 vec = RTE_INTR_VEC_RXTX_OFFSET; 5795 base = RTE_INTR_VEC_RXTX_OFFSET; 5796 } 5797 if (rte_intr_dp_is_en(intr_handle)) { 5798 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5799 (void)hns3_bind_ring_with_vector(hw, vec, false, 5800 HNS3_RING_TYPE_RX, 5801 q_id); 5802 if (vec < base + rte_intr_nb_efd_get(intr_handle) 5803 - 1) 5804 vec++; 5805 } 5806 } 5807 /* Clean datapath event and queue/vec mapping */ 5808 rte_intr_efd_disable(intr_handle); 5809 rte_intr_vec_list_free(intr_handle); 5810 } 5811 5812 static int 5813 hns3_dev_stop(struct rte_eth_dev *dev) 5814 { 5815 struct hns3_adapter *hns = dev->data->dev_private; 5816 struct hns3_hw *hw = &hns->hw; 5817 5818 PMD_INIT_FUNC_TRACE(); 5819 dev->data->dev_started = 0; 5820 5821 hw->adapter_state = HNS3_NIC_STOPPING; 5822 hns3_set_rxtx_function(dev); 5823 rte_wmb(); 5824 /* Disable datapath on secondary process. */ 5825 hns3_mp_req_stop_rxtx(dev); 5826 /* Prevent crashes when queues are still in use. */ 5827 rte_delay_ms(hw->cfg_max_queues); 5828 5829 rte_spinlock_lock(&hw->lock); 5830 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5831 hns3_tm_dev_stop_proc(hw); 5832 hns3_config_mac_tnl_int(hw, false); 5833 hns3_stop_tqps(hw); 5834 hns3_do_stop(hns); 5835 hns3_unmap_rx_interrupt(dev); 5836 hw->adapter_state = HNS3_NIC_CONFIGURED; 5837 } 5838 hns3_rx_scattered_reset(dev); 5839 rte_eal_alarm_cancel(hns3_service_handler, dev); 5840 hns3_stop_report_lse(dev); 5841 rte_spinlock_unlock(&hw->lock); 5842 5843 return 0; 5844 } 5845 5846 static int 5847 hns3_dev_close(struct rte_eth_dev *eth_dev) 5848 { 5849 struct hns3_adapter *hns = eth_dev->data->dev_private; 5850 struct hns3_hw *hw = &hns->hw; 5851 int ret = 0; 5852 5853 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5854 hns3_mp_uninit(eth_dev); 5855 return 0; 5856 } 5857 5858 if (hw->adapter_state == HNS3_NIC_STARTED) 5859 ret = hns3_dev_stop(eth_dev); 5860 5861 hw->adapter_state = HNS3_NIC_CLOSING; 5862 hns3_reset_abort(hns); 5863 hw->adapter_state = HNS3_NIC_CLOSED; 5864 5865 hns3_configure_all_mc_mac_addr(hns, true); 5866 hns3_remove_all_vlan_table(hns); 5867 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5868 hns3_uninit_pf(eth_dev); 5869 hns3_free_all_queues(eth_dev); 5870 rte_free(hw->reset.wait_data); 5871 hns3_mp_uninit(eth_dev); 5872 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5873 5874 return ret; 5875 } 5876 5877 static void 5878 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5879 bool *tx_pause) 5880 { 5881 struct hns3_mac *mac = &hw->mac; 5882 uint32_t advertising = mac->advertising; 5883 uint32_t lp_advertising = mac->lp_advertising; 5884 *rx_pause = false; 5885 *tx_pause = false; 5886 5887 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5888 *rx_pause = true; 5889 *tx_pause = true; 5890 } else if (advertising & lp_advertising & 5891 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5892 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5893 *rx_pause = true; 5894 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5895 *tx_pause = true; 5896 } 5897 } 5898 5899 static enum hns3_fc_mode 5900 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5901 { 5902 enum hns3_fc_mode current_mode; 5903 bool rx_pause = false; 5904 bool tx_pause = false; 5905 5906 switch (hw->mac.media_type) { 5907 case HNS3_MEDIA_TYPE_COPPER: 5908 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 5909 break; 5910 5911 /* 5912 * Flow control auto-negotiation is not supported for fiber and 5913 * backpalne media type. 5914 */ 5915 case HNS3_MEDIA_TYPE_FIBER: 5916 case HNS3_MEDIA_TYPE_BACKPLANE: 5917 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 5918 current_mode = hw->requested_fc_mode; 5919 goto out; 5920 default: 5921 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 5922 hw->mac.media_type); 5923 current_mode = HNS3_FC_NONE; 5924 goto out; 5925 } 5926 5927 if (rx_pause && tx_pause) 5928 current_mode = HNS3_FC_FULL; 5929 else if (rx_pause) 5930 current_mode = HNS3_FC_RX_PAUSE; 5931 else if (tx_pause) 5932 current_mode = HNS3_FC_TX_PAUSE; 5933 else 5934 current_mode = HNS3_FC_NONE; 5935 5936 out: 5937 return current_mode; 5938 } 5939 5940 static enum hns3_fc_mode 5941 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 5942 { 5943 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5944 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5945 struct hns3_mac *mac = &hw->mac; 5946 5947 /* 5948 * When the flow control mode is obtained, the device may not complete 5949 * auto-negotiation. It is necessary to wait for link establishment. 5950 */ 5951 (void)hns3_dev_link_update(dev, 1); 5952 5953 /* 5954 * If the link auto-negotiation of the nic is disabled, or the flow 5955 * control auto-negotiation is not supported, the forced flow control 5956 * mode is used. 5957 */ 5958 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 5959 return hw->requested_fc_mode; 5960 5961 return hns3_get_autoneg_fc_mode(hw); 5962 } 5963 5964 static int 5965 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5966 { 5967 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5968 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5969 enum hns3_fc_mode current_mode; 5970 5971 current_mode = hns3_get_current_fc_mode(dev); 5972 switch (current_mode) { 5973 case HNS3_FC_FULL: 5974 fc_conf->mode = RTE_ETH_FC_FULL; 5975 break; 5976 case HNS3_FC_TX_PAUSE: 5977 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 5978 break; 5979 case HNS3_FC_RX_PAUSE: 5980 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 5981 break; 5982 case HNS3_FC_NONE: 5983 default: 5984 fc_conf->mode = RTE_ETH_FC_NONE; 5985 break; 5986 } 5987 5988 fc_conf->pause_time = pf->pause_time; 5989 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 5990 5991 return 0; 5992 } 5993 5994 static int 5995 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 5996 { 5997 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5998 5999 if (!pf->support_fc_autoneg) { 6000 if (autoneg != 0) { 6001 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6002 return -EOPNOTSUPP; 6003 } 6004 6005 /* 6006 * Flow control auto-negotiation of the NIC is not supported, 6007 * but other auto-negotiation features may be supported. 6008 */ 6009 if (autoneg != hw->mac.link_autoneg) { 6010 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6011 return -EOPNOTSUPP; 6012 } 6013 6014 return 0; 6015 } 6016 6017 /* 6018 * If flow control auto-negotiation of the NIC is supported, all 6019 * auto-negotiation features are supported. 6020 */ 6021 if (autoneg != hw->mac.link_autoneg) { 6022 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6023 return -EOPNOTSUPP; 6024 } 6025 6026 return 0; 6027 } 6028 6029 static int 6030 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6031 { 6032 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6033 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6034 int ret; 6035 6036 if (fc_conf->high_water || fc_conf->low_water || 6037 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6038 hns3_err(hw, "Unsupported flow control settings specified, " 6039 "high_water(%u), low_water(%u), send_xon(%u) and " 6040 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6041 fc_conf->high_water, fc_conf->low_water, 6042 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6043 return -EINVAL; 6044 } 6045 6046 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6047 if (ret) 6048 return ret; 6049 6050 if (!fc_conf->pause_time) { 6051 hns3_err(hw, "Invalid pause time %u setting.", 6052 fc_conf->pause_time); 6053 return -EINVAL; 6054 } 6055 6056 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6057 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6058 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6059 "current_fc_status = %d", hw->current_fc_status); 6060 return -EOPNOTSUPP; 6061 } 6062 6063 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 6064 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6065 return -EOPNOTSUPP; 6066 } 6067 6068 rte_spinlock_lock(&hw->lock); 6069 ret = hns3_fc_enable(dev, fc_conf); 6070 rte_spinlock_unlock(&hw->lock); 6071 6072 return ret; 6073 } 6074 6075 static int 6076 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6077 struct rte_eth_pfc_conf *pfc_conf) 6078 { 6079 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6080 int ret; 6081 6082 if (!hns3_dev_get_support(hw, DCB)) { 6083 hns3_err(hw, "This port does not support dcb configurations."); 6084 return -EOPNOTSUPP; 6085 } 6086 6087 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6088 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6089 hns3_err(hw, "Unsupported flow control settings specified, " 6090 "high_water(%u), low_water(%u), send_xon(%u) and " 6091 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6092 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6093 pfc_conf->fc.send_xon, 6094 pfc_conf->fc.mac_ctrl_frame_fwd); 6095 return -EINVAL; 6096 } 6097 if (pfc_conf->fc.autoneg) { 6098 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6099 return -EINVAL; 6100 } 6101 if (pfc_conf->fc.pause_time == 0) { 6102 hns3_err(hw, "Invalid pause time %u setting.", 6103 pfc_conf->fc.pause_time); 6104 return -EINVAL; 6105 } 6106 6107 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6108 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6109 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6110 "current_fc_status = %d", hw->current_fc_status); 6111 return -EOPNOTSUPP; 6112 } 6113 6114 rte_spinlock_lock(&hw->lock); 6115 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6116 rte_spinlock_unlock(&hw->lock); 6117 6118 return ret; 6119 } 6120 6121 static int 6122 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6123 { 6124 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6125 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6126 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6127 int i; 6128 6129 rte_spinlock_lock(&hw->lock); 6130 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 6131 dcb_info->nb_tcs = pf->local_max_tc; 6132 else 6133 dcb_info->nb_tcs = 1; 6134 6135 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6136 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6137 for (i = 0; i < dcb_info->nb_tcs; i++) 6138 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6139 6140 for (i = 0; i < hw->num_tc; i++) { 6141 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6142 dcb_info->tc_queue.tc_txq[0][i].base = 6143 hw->tc_queue[i].tqp_offset; 6144 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6145 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6146 hw->tc_queue[i].tqp_count; 6147 } 6148 rte_spinlock_unlock(&hw->lock); 6149 6150 return 0; 6151 } 6152 6153 static int 6154 hns3_reinit_dev(struct hns3_adapter *hns) 6155 { 6156 struct hns3_hw *hw = &hns->hw; 6157 int ret; 6158 6159 ret = hns3_cmd_init(hw); 6160 if (ret) { 6161 hns3_err(hw, "Failed to init cmd: %d", ret); 6162 return ret; 6163 } 6164 6165 ret = hns3_reset_all_tqps(hns); 6166 if (ret) { 6167 hns3_err(hw, "Failed to reset all queues: %d", ret); 6168 return ret; 6169 } 6170 6171 ret = hns3_init_hardware(hns); 6172 if (ret) { 6173 hns3_err(hw, "Failed to init hardware: %d", ret); 6174 return ret; 6175 } 6176 6177 ret = hns3_enable_hw_error_intr(hns, true); 6178 if (ret) { 6179 hns3_err(hw, "fail to enable hw error interrupts: %d", 6180 ret); 6181 return ret; 6182 } 6183 hns3_info(hw, "Reset done, driver initialization finished."); 6184 6185 return 0; 6186 } 6187 6188 static bool 6189 is_pf_reset_done(struct hns3_hw *hw) 6190 { 6191 uint32_t val, reg, reg_bit; 6192 6193 switch (hw->reset.level) { 6194 case HNS3_IMP_RESET: 6195 reg = HNS3_GLOBAL_RESET_REG; 6196 reg_bit = HNS3_IMP_RESET_BIT; 6197 break; 6198 case HNS3_GLOBAL_RESET: 6199 reg = HNS3_GLOBAL_RESET_REG; 6200 reg_bit = HNS3_GLOBAL_RESET_BIT; 6201 break; 6202 case HNS3_FUNC_RESET: 6203 reg = HNS3_FUN_RST_ING; 6204 reg_bit = HNS3_FUN_RST_ING_B; 6205 break; 6206 case HNS3_FLR_RESET: 6207 default: 6208 hns3_err(hw, "Wait for unsupported reset level: %d", 6209 hw->reset.level); 6210 return true; 6211 } 6212 val = hns3_read_dev(hw, reg); 6213 if (hns3_get_bit(val, reg_bit)) 6214 return false; 6215 else 6216 return true; 6217 } 6218 6219 bool 6220 hns3_is_reset_pending(struct hns3_adapter *hns) 6221 { 6222 struct hns3_hw *hw = &hns->hw; 6223 enum hns3_reset_level reset; 6224 6225 hns3_check_event_cause(hns, NULL); 6226 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6227 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6228 hw->reset.level < reset) { 6229 hns3_warn(hw, "High level reset %d is pending", reset); 6230 return true; 6231 } 6232 reset = hns3_get_reset_level(hns, &hw->reset.request); 6233 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6234 hw->reset.level < reset) { 6235 hns3_warn(hw, "High level reset %d is request", reset); 6236 return true; 6237 } 6238 return false; 6239 } 6240 6241 static int 6242 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6243 { 6244 struct hns3_hw *hw = &hns->hw; 6245 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6246 struct timeval tv; 6247 6248 if (wait_data->result == HNS3_WAIT_SUCCESS) 6249 return 0; 6250 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6251 hns3_clock_gettime(&tv); 6252 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6253 tv.tv_sec, tv.tv_usec); 6254 return -ETIME; 6255 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6256 return -EAGAIN; 6257 6258 wait_data->hns = hns; 6259 wait_data->check_completion = is_pf_reset_done; 6260 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6261 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 6262 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6263 wait_data->count = HNS3_RESET_WAIT_CNT; 6264 wait_data->result = HNS3_WAIT_REQUEST; 6265 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6266 return -EAGAIN; 6267 } 6268 6269 static int 6270 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6271 { 6272 struct hns3_cmd_desc desc; 6273 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6274 6275 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6276 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6277 req->fun_reset_vfid = func_id; 6278 6279 return hns3_cmd_send(hw, &desc, 1); 6280 } 6281 6282 static int 6283 hns3_imp_reset_cmd(struct hns3_hw *hw) 6284 { 6285 struct hns3_cmd_desc desc; 6286 6287 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6288 desc.data[0] = 0xeedd; 6289 6290 return hns3_cmd_send(hw, &desc, 1); 6291 } 6292 6293 static void 6294 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6295 { 6296 struct hns3_hw *hw = &hns->hw; 6297 struct timeval tv; 6298 uint32_t val; 6299 6300 hns3_clock_gettime(&tv); 6301 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6302 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6303 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6304 tv.tv_sec, tv.tv_usec); 6305 return; 6306 } 6307 6308 switch (reset_level) { 6309 case HNS3_IMP_RESET: 6310 hns3_imp_reset_cmd(hw); 6311 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6312 tv.tv_sec, tv.tv_usec); 6313 break; 6314 case HNS3_GLOBAL_RESET: 6315 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6316 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6317 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6318 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6319 tv.tv_sec, tv.tv_usec); 6320 break; 6321 case HNS3_FUNC_RESET: 6322 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6323 tv.tv_sec, tv.tv_usec); 6324 /* schedule again to check later */ 6325 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6326 hns3_schedule_reset(hns); 6327 break; 6328 default: 6329 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6330 return; 6331 } 6332 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6333 } 6334 6335 static enum hns3_reset_level 6336 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6337 { 6338 struct hns3_hw *hw = &hns->hw; 6339 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6340 6341 /* Return the highest priority reset level amongst all */ 6342 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6343 reset_level = HNS3_IMP_RESET; 6344 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6345 reset_level = HNS3_GLOBAL_RESET; 6346 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6347 reset_level = HNS3_FUNC_RESET; 6348 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6349 reset_level = HNS3_FLR_RESET; 6350 6351 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6352 return HNS3_NONE_RESET; 6353 6354 return reset_level; 6355 } 6356 6357 static void 6358 hns3_record_imp_error(struct hns3_adapter *hns) 6359 { 6360 struct hns3_hw *hw = &hns->hw; 6361 uint32_t reg_val; 6362 6363 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6364 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6365 hns3_warn(hw, "Detected IMP RD poison!"); 6366 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6367 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6368 } 6369 6370 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6371 hns3_warn(hw, "Detected IMP CMDQ error!"); 6372 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6373 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6374 } 6375 } 6376 6377 static int 6378 hns3_prepare_reset(struct hns3_adapter *hns) 6379 { 6380 struct hns3_hw *hw = &hns->hw; 6381 uint32_t reg_val; 6382 int ret; 6383 6384 switch (hw->reset.level) { 6385 case HNS3_FUNC_RESET: 6386 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6387 if (ret) 6388 return ret; 6389 6390 /* 6391 * After performaning pf reset, it is not necessary to do the 6392 * mailbox handling or send any command to firmware, because 6393 * any mailbox handling or command to firmware is only valid 6394 * after hns3_cmd_init is called. 6395 */ 6396 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6397 hw->reset.stats.request_cnt++; 6398 break; 6399 case HNS3_IMP_RESET: 6400 hns3_record_imp_error(hns); 6401 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6402 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6403 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6404 break; 6405 default: 6406 break; 6407 } 6408 return 0; 6409 } 6410 6411 static int 6412 hns3_set_rst_done(struct hns3_hw *hw) 6413 { 6414 struct hns3_pf_rst_done_cmd *req; 6415 struct hns3_cmd_desc desc; 6416 6417 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6418 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6419 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6420 return hns3_cmd_send(hw, &desc, 1); 6421 } 6422 6423 static int 6424 hns3_stop_service(struct hns3_adapter *hns) 6425 { 6426 struct hns3_hw *hw = &hns->hw; 6427 struct rte_eth_dev *eth_dev; 6428 6429 eth_dev = &rte_eth_devices[hw->data->port_id]; 6430 hw->mac.link_status = RTE_ETH_LINK_DOWN; 6431 if (hw->adapter_state == HNS3_NIC_STARTED) { 6432 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6433 hns3_update_linkstatus_and_event(hw, false); 6434 } 6435 6436 hns3_set_rxtx_function(eth_dev); 6437 rte_wmb(); 6438 /* Disable datapath on secondary process. */ 6439 hns3_mp_req_stop_rxtx(eth_dev); 6440 rte_delay_ms(hw->cfg_max_queues); 6441 6442 rte_spinlock_lock(&hw->lock); 6443 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6444 hw->adapter_state == HNS3_NIC_STOPPING) { 6445 hns3_enable_all_queues(hw, false); 6446 hns3_do_stop(hns); 6447 hw->reset.mbuf_deferred_free = true; 6448 } else 6449 hw->reset.mbuf_deferred_free = false; 6450 6451 /* 6452 * It is cumbersome for hardware to pick-and-choose entries for deletion 6453 * from table space. Hence, for function reset software intervention is 6454 * required to delete the entries 6455 */ 6456 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6457 hns3_configure_all_mc_mac_addr(hns, true); 6458 rte_spinlock_unlock(&hw->lock); 6459 6460 return 0; 6461 } 6462 6463 static int 6464 hns3_start_service(struct hns3_adapter *hns) 6465 { 6466 struct hns3_hw *hw = &hns->hw; 6467 struct rte_eth_dev *eth_dev; 6468 6469 if (hw->reset.level == HNS3_IMP_RESET || 6470 hw->reset.level == HNS3_GLOBAL_RESET) 6471 hns3_set_rst_done(hw); 6472 eth_dev = &rte_eth_devices[hw->data->port_id]; 6473 hns3_set_rxtx_function(eth_dev); 6474 hns3_mp_req_start_rxtx(eth_dev); 6475 if (hw->adapter_state == HNS3_NIC_STARTED) { 6476 /* 6477 * This API parent function already hold the hns3_hw.lock, the 6478 * hns3_service_handler may report lse, in bonding application 6479 * it will call driver's ops which may acquire the hns3_hw.lock 6480 * again, thus lead to deadlock. 6481 * We defer calls hns3_service_handler to avoid the deadlock. 6482 */ 6483 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6484 hns3_service_handler, eth_dev); 6485 6486 /* Enable interrupt of all rx queues before enabling queues */ 6487 hns3_dev_all_rx_queue_intr_enable(hw, true); 6488 /* 6489 * Enable state of each rxq and txq will be recovered after 6490 * reset, so we need to restore them before enable all tqps; 6491 */ 6492 hns3_restore_tqp_enable_state(hw); 6493 /* 6494 * When finished the initialization, enable queues to receive 6495 * and transmit packets. 6496 */ 6497 hns3_enable_all_queues(hw, true); 6498 } 6499 6500 return 0; 6501 } 6502 6503 static int 6504 hns3_restore_conf(struct hns3_adapter *hns) 6505 { 6506 struct hns3_hw *hw = &hns->hw; 6507 int ret; 6508 6509 ret = hns3_configure_all_mac_addr(hns, false); 6510 if (ret) 6511 return ret; 6512 6513 ret = hns3_configure_all_mc_mac_addr(hns, false); 6514 if (ret) 6515 goto err_mc_mac; 6516 6517 ret = hns3_dev_promisc_restore(hns); 6518 if (ret) 6519 goto err_promisc; 6520 6521 ret = hns3_restore_vlan_table(hns); 6522 if (ret) 6523 goto err_promisc; 6524 6525 ret = hns3_restore_vlan_conf(hns); 6526 if (ret) 6527 goto err_promisc; 6528 6529 ret = hns3_restore_all_fdir_filter(hns); 6530 if (ret) 6531 goto err_promisc; 6532 6533 ret = hns3_restore_ptp(hns); 6534 if (ret) 6535 goto err_promisc; 6536 6537 ret = hns3_restore_rx_interrupt(hw); 6538 if (ret) 6539 goto err_promisc; 6540 6541 ret = hns3_restore_gro_conf(hw); 6542 if (ret) 6543 goto err_promisc; 6544 6545 ret = hns3_restore_fec(hw); 6546 if (ret) 6547 goto err_promisc; 6548 6549 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6550 ret = hns3_do_start(hns, false); 6551 if (ret) 6552 goto err_promisc; 6553 hns3_info(hw, "hns3 dev restart successful!"); 6554 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6555 hw->adapter_state = HNS3_NIC_CONFIGURED; 6556 return 0; 6557 6558 err_promisc: 6559 hns3_configure_all_mc_mac_addr(hns, true); 6560 err_mc_mac: 6561 hns3_configure_all_mac_addr(hns, true); 6562 return ret; 6563 } 6564 6565 static void 6566 hns3_reset_service(void *param) 6567 { 6568 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6569 struct hns3_hw *hw = &hns->hw; 6570 enum hns3_reset_level reset_level; 6571 struct timeval tv_delta; 6572 struct timeval tv_start; 6573 struct timeval tv; 6574 uint64_t msec; 6575 int ret; 6576 6577 /* 6578 * The interrupt is not triggered within the delay time. 6579 * The interrupt may have been lost. It is necessary to handle 6580 * the interrupt to recover from the error. 6581 */ 6582 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6583 SCHEDULE_DEFERRED) { 6584 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6585 __ATOMIC_RELAXED); 6586 hns3_err(hw, "Handling interrupts in delayed tasks"); 6587 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6588 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6589 if (reset_level == HNS3_NONE_RESET) { 6590 hns3_err(hw, "No reset level is set, try IMP reset"); 6591 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6592 } 6593 } 6594 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6595 6596 /* 6597 * Check if there is any ongoing reset in the hardware. This status can 6598 * be checked from reset_pending. If there is then, we need to wait for 6599 * hardware to complete reset. 6600 * a. If we are able to figure out in reasonable time that hardware 6601 * has fully resetted then, we can proceed with driver, client 6602 * reset. 6603 * b. else, we can come back later to check this status so re-sched 6604 * now. 6605 */ 6606 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6607 if (reset_level != HNS3_NONE_RESET) { 6608 hns3_clock_gettime(&tv_start); 6609 ret = hns3_reset_process(hns, reset_level); 6610 hns3_clock_gettime(&tv); 6611 timersub(&tv, &tv_start, &tv_delta); 6612 msec = hns3_clock_calctime_ms(&tv_delta); 6613 if (msec > HNS3_RESET_PROCESS_MS) 6614 hns3_err(hw, "%d handle long time delta %" PRIu64 6615 " ms time=%ld.%.6ld", 6616 hw->reset.level, msec, 6617 tv.tv_sec, tv.tv_usec); 6618 if (ret == -EAGAIN) 6619 return; 6620 } 6621 6622 /* Check if we got any *new* reset requests to be honored */ 6623 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6624 if (reset_level != HNS3_NONE_RESET) 6625 hns3_msix_process(hns, reset_level); 6626 } 6627 6628 static unsigned int 6629 hns3_get_speed_capa_num(uint16_t device_id) 6630 { 6631 unsigned int num; 6632 6633 switch (device_id) { 6634 case HNS3_DEV_ID_25GE: 6635 case HNS3_DEV_ID_25GE_RDMA: 6636 num = 2; 6637 break; 6638 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6639 case HNS3_DEV_ID_200G_RDMA: 6640 num = 1; 6641 break; 6642 default: 6643 num = 0; 6644 break; 6645 } 6646 6647 return num; 6648 } 6649 6650 static int 6651 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6652 uint16_t device_id) 6653 { 6654 switch (device_id) { 6655 case HNS3_DEV_ID_25GE: 6656 /* fallthrough */ 6657 case HNS3_DEV_ID_25GE_RDMA: 6658 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6659 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6660 6661 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6662 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6663 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6664 break; 6665 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6666 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6667 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6668 break; 6669 case HNS3_DEV_ID_200G_RDMA: 6670 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6671 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6672 break; 6673 default: 6674 return -ENOTSUP; 6675 } 6676 6677 return 0; 6678 } 6679 6680 static int 6681 hns3_fec_get_capability(struct rte_eth_dev *dev, 6682 struct rte_eth_fec_capa *speed_fec_capa, 6683 unsigned int num) 6684 { 6685 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6686 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6687 uint16_t device_id = pci_dev->id.device_id; 6688 unsigned int capa_num; 6689 int ret; 6690 6691 capa_num = hns3_get_speed_capa_num(device_id); 6692 if (capa_num == 0) { 6693 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6694 device_id); 6695 return -ENOTSUP; 6696 } 6697 6698 if (speed_fec_capa == NULL || num < capa_num) 6699 return capa_num; 6700 6701 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6702 if (ret) 6703 return -ENOTSUP; 6704 6705 return capa_num; 6706 } 6707 6708 static int 6709 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6710 { 6711 struct hns3_config_fec_cmd *req; 6712 struct hns3_cmd_desc desc; 6713 int ret; 6714 6715 /* 6716 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6717 * in device of link speed 6718 * below 10 Gbps. 6719 */ 6720 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) { 6721 *state = 0; 6722 return 0; 6723 } 6724 6725 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6726 req = (struct hns3_config_fec_cmd *)desc.data; 6727 ret = hns3_cmd_send(hw, &desc, 1); 6728 if (ret) { 6729 hns3_err(hw, "get current fec auto state failed, ret = %d", 6730 ret); 6731 return ret; 6732 } 6733 6734 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6735 return 0; 6736 } 6737 6738 static int 6739 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6740 { 6741 struct hns3_sfp_info_cmd *resp; 6742 uint32_t tmp_fec_capa; 6743 uint8_t auto_state; 6744 struct hns3_cmd_desc desc; 6745 int ret; 6746 6747 /* 6748 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6749 * configured FEC mode is returned. 6750 * If link is up, current FEC mode is returned. 6751 */ 6752 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) { 6753 ret = get_current_fec_auto_state(hw, &auto_state); 6754 if (ret) 6755 return ret; 6756 6757 if (auto_state == 0x1) { 6758 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6759 return 0; 6760 } 6761 } 6762 6763 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6764 resp = (struct hns3_sfp_info_cmd *)desc.data; 6765 resp->query_type = HNS3_ACTIVE_QUERY; 6766 6767 ret = hns3_cmd_send(hw, &desc, 1); 6768 if (ret == -EOPNOTSUPP) { 6769 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6770 return ret; 6771 } else if (ret) { 6772 hns3_err(hw, "get FEC failed, ret = %d", ret); 6773 return ret; 6774 } 6775 6776 /* 6777 * FEC mode order defined in hns3 hardware is inconsistend with 6778 * that defined in the ethdev library. So the sequence needs 6779 * to be converted. 6780 */ 6781 switch (resp->active_fec) { 6782 case HNS3_HW_FEC_MODE_NOFEC: 6783 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6784 break; 6785 case HNS3_HW_FEC_MODE_BASER: 6786 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6787 break; 6788 case HNS3_HW_FEC_MODE_RS: 6789 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6790 break; 6791 default: 6792 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6793 break; 6794 } 6795 6796 *fec_capa = tmp_fec_capa; 6797 return 0; 6798 } 6799 6800 static int 6801 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6802 { 6803 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6804 6805 return hns3_fec_get_internal(hw, fec_capa); 6806 } 6807 6808 static int 6809 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6810 { 6811 struct hns3_config_fec_cmd *req; 6812 struct hns3_cmd_desc desc; 6813 int ret; 6814 6815 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6816 6817 req = (struct hns3_config_fec_cmd *)desc.data; 6818 switch (mode) { 6819 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6820 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6821 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6822 break; 6823 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6824 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6825 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6826 break; 6827 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6828 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6829 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6830 break; 6831 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6832 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6833 break; 6834 default: 6835 return 0; 6836 } 6837 ret = hns3_cmd_send(hw, &desc, 1); 6838 if (ret) 6839 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6840 6841 return ret; 6842 } 6843 6844 static uint32_t 6845 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6846 { 6847 struct hns3_mac *mac = &hw->mac; 6848 uint32_t cur_capa; 6849 6850 switch (mac->link_speed) { 6851 case RTE_ETH_SPEED_NUM_10G: 6852 cur_capa = fec_capa[1].capa; 6853 break; 6854 case RTE_ETH_SPEED_NUM_25G: 6855 case RTE_ETH_SPEED_NUM_100G: 6856 case RTE_ETH_SPEED_NUM_200G: 6857 cur_capa = fec_capa[0].capa; 6858 break; 6859 default: 6860 cur_capa = 0; 6861 break; 6862 } 6863 6864 return cur_capa; 6865 } 6866 6867 static bool 6868 is_fec_mode_one_bit_set(uint32_t mode) 6869 { 6870 int cnt = 0; 6871 uint8_t i; 6872 6873 for (i = 0; i < sizeof(mode); i++) 6874 if (mode >> i & 0x1) 6875 cnt++; 6876 6877 return cnt == 1 ? true : false; 6878 } 6879 6880 static int 6881 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6882 { 6883 #define FEC_CAPA_NUM 2 6884 struct hns3_adapter *hns = dev->data->dev_private; 6885 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6886 struct hns3_pf *pf = &hns->pf; 6887 6888 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6889 uint32_t cur_capa; 6890 uint32_t num = FEC_CAPA_NUM; 6891 int ret; 6892 6893 ret = hns3_fec_get_capability(dev, fec_capa, num); 6894 if (ret < 0) 6895 return ret; 6896 6897 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 6898 if (!is_fec_mode_one_bit_set(mode)) { 6899 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 6900 "FEC mode should be only one bit set", mode); 6901 return -EINVAL; 6902 } 6903 6904 /* 6905 * Check whether the configured mode is within the FEC capability. 6906 * If not, the configured mode will not be supported. 6907 */ 6908 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6909 if (!(cur_capa & mode)) { 6910 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6911 return -EINVAL; 6912 } 6913 6914 rte_spinlock_lock(&hw->lock); 6915 ret = hns3_set_fec_hw(hw, mode); 6916 if (ret) { 6917 rte_spinlock_unlock(&hw->lock); 6918 return ret; 6919 } 6920 6921 pf->fec_mode = mode; 6922 rte_spinlock_unlock(&hw->lock); 6923 6924 return 0; 6925 } 6926 6927 static int 6928 hns3_restore_fec(struct hns3_hw *hw) 6929 { 6930 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6931 struct hns3_pf *pf = &hns->pf; 6932 uint32_t mode = pf->fec_mode; 6933 int ret; 6934 6935 ret = hns3_set_fec_hw(hw, mode); 6936 if (ret) 6937 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6938 mode, ret); 6939 6940 return ret; 6941 } 6942 6943 static int 6944 hns3_query_dev_fec_info(struct hns3_hw *hw) 6945 { 6946 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6947 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 6948 int ret; 6949 6950 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 6951 if (ret) 6952 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6953 6954 return ret; 6955 } 6956 6957 static bool 6958 hns3_optical_module_existed(struct hns3_hw *hw) 6959 { 6960 struct hns3_cmd_desc desc; 6961 bool existed; 6962 int ret; 6963 6964 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 6965 ret = hns3_cmd_send(hw, &desc, 1); 6966 if (ret) { 6967 hns3_err(hw, 6968 "fail to get optical module exist state, ret = %d.\n", 6969 ret); 6970 return false; 6971 } 6972 existed = !!desc.data[0]; 6973 6974 return existed; 6975 } 6976 6977 static int 6978 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 6979 uint32_t len, uint8_t *data) 6980 { 6981 #define HNS3_SFP_INFO_CMD_NUM 6 6982 #define HNS3_SFP_INFO_MAX_LEN \ 6983 (HNS3_SFP_INFO_BD0_LEN + \ 6984 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 6985 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 6986 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 6987 uint16_t read_len; 6988 uint16_t copy_len; 6989 int ret; 6990 int i; 6991 6992 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 6993 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 6994 true); 6995 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 6996 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 6997 } 6998 6999 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7000 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7001 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7002 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7003 7004 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7005 if (ret) { 7006 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7007 ret); 7008 return ret; 7009 } 7010 7011 /* The data format in BD0 is different with the others. */ 7012 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7013 memcpy(data, sfp_info_bd0->data, copy_len); 7014 read_len = copy_len; 7015 7016 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7017 if (read_len >= len) 7018 break; 7019 7020 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7021 memcpy(data + read_len, desc[i].data, copy_len); 7022 read_len += copy_len; 7023 } 7024 7025 return (int)read_len; 7026 } 7027 7028 static int 7029 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7030 struct rte_dev_eeprom_info *info) 7031 { 7032 struct hns3_adapter *hns = dev->data->dev_private; 7033 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7034 uint32_t offset = info->offset; 7035 uint32_t len = info->length; 7036 uint8_t *data = info->data; 7037 uint32_t read_len = 0; 7038 7039 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7040 return -ENOTSUP; 7041 7042 if (!hns3_optical_module_existed(hw)) { 7043 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7044 return -EIO; 7045 } 7046 7047 while (read_len < len) { 7048 int ret; 7049 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7050 len - read_len, 7051 data + read_len); 7052 if (ret < 0) 7053 return -EIO; 7054 read_len += ret; 7055 } 7056 7057 return 0; 7058 } 7059 7060 static int 7061 hns3_get_module_info(struct rte_eth_dev *dev, 7062 struct rte_eth_dev_module_info *modinfo) 7063 { 7064 #define HNS3_SFF8024_ID_SFP 0x03 7065 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7066 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7067 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7068 #define HNS3_SFF_8636_V1_3 0x03 7069 struct hns3_adapter *hns = dev->data->dev_private; 7070 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7071 struct rte_dev_eeprom_info info; 7072 struct hns3_sfp_type sfp_type; 7073 int ret; 7074 7075 memset(&sfp_type, 0, sizeof(sfp_type)); 7076 memset(&info, 0, sizeof(info)); 7077 info.data = (uint8_t *)&sfp_type; 7078 info.length = sizeof(sfp_type); 7079 ret = hns3_get_module_eeprom(dev, &info); 7080 if (ret) 7081 return ret; 7082 7083 switch (sfp_type.type) { 7084 case HNS3_SFF8024_ID_SFP: 7085 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7086 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7087 break; 7088 case HNS3_SFF8024_ID_QSFP_8438: 7089 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7090 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7091 break; 7092 case HNS3_SFF8024_ID_QSFP_8436_8636: 7093 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7094 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7095 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7096 } else { 7097 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7098 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7099 } 7100 break; 7101 case HNS3_SFF8024_ID_QSFP28_8636: 7102 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7103 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7104 break; 7105 default: 7106 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7107 sfp_type.type, sfp_type.ext_type); 7108 return -EINVAL; 7109 } 7110 7111 return 0; 7112 } 7113 7114 void 7115 hns3_clock_gettime(struct timeval *tv) 7116 { 7117 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 7118 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW 7119 #else 7120 #define CLOCK_TYPE CLOCK_MONOTONIC 7121 #endif 7122 #define NSEC_TO_USEC_DIV 1000 7123 7124 struct timespec spec; 7125 (void)clock_gettime(CLOCK_TYPE, &spec); 7126 7127 tv->tv_sec = spec.tv_sec; 7128 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; 7129 } 7130 7131 uint64_t 7132 hns3_clock_calctime_ms(struct timeval *tv) 7133 { 7134 return (uint64_t)tv->tv_sec * MSEC_PER_SEC + 7135 tv->tv_usec / USEC_PER_MSEC; 7136 } 7137 7138 uint64_t 7139 hns3_clock_gettime_ms(void) 7140 { 7141 struct timeval tv; 7142 7143 hns3_clock_gettime(&tv); 7144 return hns3_clock_calctime_ms(&tv); 7145 } 7146 7147 static int 7148 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7149 { 7150 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7151 7152 RTE_SET_USED(key); 7153 7154 if (strcmp(value, "vec") == 0) 7155 hint = HNS3_IO_FUNC_HINT_VEC; 7156 else if (strcmp(value, "sve") == 0) 7157 hint = HNS3_IO_FUNC_HINT_SVE; 7158 else if (strcmp(value, "simple") == 0) 7159 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7160 else if (strcmp(value, "common") == 0) 7161 hint = HNS3_IO_FUNC_HINT_COMMON; 7162 7163 /* If the hint is valid then update output parameters */ 7164 if (hint != HNS3_IO_FUNC_HINT_NONE) 7165 *(uint32_t *)extra_args = hint; 7166 7167 return 0; 7168 } 7169 7170 static const char * 7171 hns3_get_io_hint_func_name(uint32_t hint) 7172 { 7173 switch (hint) { 7174 case HNS3_IO_FUNC_HINT_VEC: 7175 return "vec"; 7176 case HNS3_IO_FUNC_HINT_SVE: 7177 return "sve"; 7178 case HNS3_IO_FUNC_HINT_SIMPLE: 7179 return "simple"; 7180 case HNS3_IO_FUNC_HINT_COMMON: 7181 return "common"; 7182 default: 7183 return "none"; 7184 } 7185 } 7186 7187 static int 7188 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) 7189 { 7190 uint64_t val; 7191 7192 RTE_SET_USED(key); 7193 7194 val = strtoull(value, NULL, 16); 7195 *(uint64_t *)extra_args = val; 7196 7197 return 0; 7198 } 7199 7200 static int 7201 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) 7202 { 7203 uint32_t val; 7204 7205 RTE_SET_USED(key); 7206 7207 val = strtoul(value, NULL, 10); 7208 7209 /* 7210 * 500ms is empirical value in process of mailbox communication. If 7211 * the delay value is set to one lower thanthe empirical value, mailbox 7212 * communication may fail. 7213 */ 7214 if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) 7215 *(uint16_t *)extra_args = val; 7216 7217 return 0; 7218 } 7219 7220 void 7221 hns3_parse_devargs(struct rte_eth_dev *dev) 7222 { 7223 uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS; 7224 struct hns3_adapter *hns = dev->data->dev_private; 7225 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7226 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7227 struct hns3_hw *hw = &hns->hw; 7228 uint64_t dev_caps_mask = 0; 7229 struct rte_kvargs *kvlist; 7230 7231 if (dev->device->devargs == NULL) 7232 return; 7233 7234 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7235 if (!kvlist) 7236 return; 7237 7238 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7239 &hns3_parse_io_hint_func, &rx_func_hint); 7240 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7241 &hns3_parse_io_hint_func, &tx_func_hint); 7242 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, 7243 &hns3_parse_dev_caps_mask, &dev_caps_mask); 7244 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, 7245 &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); 7246 7247 rte_kvargs_free(kvlist); 7248 7249 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7250 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7251 hns3_get_io_hint_func_name(rx_func_hint)); 7252 hns->rx_func_hint = rx_func_hint; 7253 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7254 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7255 hns3_get_io_hint_func_name(tx_func_hint)); 7256 hns->tx_func_hint = tx_func_hint; 7257 7258 if (dev_caps_mask != 0) 7259 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", 7260 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); 7261 hns->dev_caps_mask = dev_caps_mask; 7262 7263 if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS) 7264 hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS, 7265 mbx_time_limit_ms); 7266 hns->mbx_time_limit_ms = mbx_time_limit_ms; 7267 } 7268 7269 static const struct eth_dev_ops hns3_eth_dev_ops = { 7270 .dev_configure = hns3_dev_configure, 7271 .dev_start = hns3_dev_start, 7272 .dev_stop = hns3_dev_stop, 7273 .dev_close = hns3_dev_close, 7274 .promiscuous_enable = hns3_dev_promiscuous_enable, 7275 .promiscuous_disable = hns3_dev_promiscuous_disable, 7276 .allmulticast_enable = hns3_dev_allmulticast_enable, 7277 .allmulticast_disable = hns3_dev_allmulticast_disable, 7278 .mtu_set = hns3_dev_mtu_set, 7279 .stats_get = hns3_stats_get, 7280 .stats_reset = hns3_stats_reset, 7281 .xstats_get = hns3_dev_xstats_get, 7282 .xstats_get_names = hns3_dev_xstats_get_names, 7283 .xstats_reset = hns3_dev_xstats_reset, 7284 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7285 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7286 .dev_infos_get = hns3_dev_infos_get, 7287 .fw_version_get = hns3_fw_version_get, 7288 .rx_queue_setup = hns3_rx_queue_setup, 7289 .tx_queue_setup = hns3_tx_queue_setup, 7290 .rx_queue_release = hns3_dev_rx_queue_release, 7291 .tx_queue_release = hns3_dev_tx_queue_release, 7292 .rx_queue_start = hns3_dev_rx_queue_start, 7293 .rx_queue_stop = hns3_dev_rx_queue_stop, 7294 .tx_queue_start = hns3_dev_tx_queue_start, 7295 .tx_queue_stop = hns3_dev_tx_queue_stop, 7296 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7297 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7298 .rxq_info_get = hns3_rxq_info_get, 7299 .txq_info_get = hns3_txq_info_get, 7300 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7301 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7302 .flow_ctrl_get = hns3_flow_ctrl_get, 7303 .flow_ctrl_set = hns3_flow_ctrl_set, 7304 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7305 .mac_addr_add = hns3_add_mac_addr, 7306 .mac_addr_remove = hns3_remove_mac_addr, 7307 .mac_addr_set = hns3_set_default_mac_addr, 7308 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7309 .link_update = hns3_dev_link_update, 7310 .dev_set_link_up = hns3_dev_set_link_up, 7311 .dev_set_link_down = hns3_dev_set_link_down, 7312 .rss_hash_update = hns3_dev_rss_hash_update, 7313 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7314 .reta_update = hns3_dev_rss_reta_update, 7315 .reta_query = hns3_dev_rss_reta_query, 7316 .flow_ops_get = hns3_dev_flow_ops_get, 7317 .vlan_filter_set = hns3_vlan_filter_set, 7318 .vlan_tpid_set = hns3_vlan_tpid_set, 7319 .vlan_offload_set = hns3_vlan_offload_set, 7320 .vlan_pvid_set = hns3_vlan_pvid_set, 7321 .get_reg = hns3_get_regs, 7322 .get_module_info = hns3_get_module_info, 7323 .get_module_eeprom = hns3_get_module_eeprom, 7324 .get_dcb_info = hns3_get_dcb_info, 7325 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7326 .fec_get_capability = hns3_fec_get_capability, 7327 .fec_get = hns3_fec_get, 7328 .fec_set = hns3_fec_set, 7329 .tm_ops_get = hns3_tm_ops_get, 7330 .tx_done_cleanup = hns3_tx_done_cleanup, 7331 .timesync_enable = hns3_timesync_enable, 7332 .timesync_disable = hns3_timesync_disable, 7333 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7334 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7335 .timesync_adjust_time = hns3_timesync_adjust_time, 7336 .timesync_read_time = hns3_timesync_read_time, 7337 .timesync_write_time = hns3_timesync_write_time, 7338 }; 7339 7340 static const struct hns3_reset_ops hns3_reset_ops = { 7341 .reset_service = hns3_reset_service, 7342 .stop_service = hns3_stop_service, 7343 .prepare_reset = hns3_prepare_reset, 7344 .wait_hardware_ready = hns3_wait_hardware_ready, 7345 .reinit_dev = hns3_reinit_dev, 7346 .restore_conf = hns3_restore_conf, 7347 .start_service = hns3_start_service, 7348 }; 7349 7350 static void 7351 hns3_init_hw_ops(struct hns3_hw *hw) 7352 { 7353 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr; 7354 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; 7355 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; 7356 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; 7357 } 7358 7359 static int 7360 hns3_dev_init(struct rte_eth_dev *eth_dev) 7361 { 7362 struct hns3_adapter *hns = eth_dev->data->dev_private; 7363 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7364 struct rte_ether_addr *eth_addr; 7365 struct hns3_hw *hw = &hns->hw; 7366 int ret; 7367 7368 PMD_INIT_FUNC_TRACE(); 7369 7370 hns3_flow_init(eth_dev); 7371 7372 hns3_set_rxtx_function(eth_dev); 7373 eth_dev->dev_ops = &hns3_eth_dev_ops; 7374 eth_dev->rx_queue_count = hns3_rx_queue_count; 7375 ret = hns3_mp_init(eth_dev); 7376 if (ret) 7377 goto err_mp_init; 7378 7379 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7380 hns3_tx_push_init(eth_dev); 7381 return 0; 7382 } 7383 7384 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7385 hns->is_vf = false; 7386 hw->data = eth_dev->data; 7387 hns3_parse_devargs(eth_dev); 7388 7389 /* 7390 * Set default max packet size according to the mtu 7391 * default vale in DPDK frame. 7392 */ 7393 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7394 7395 ret = hns3_reset_init(hw); 7396 if (ret) 7397 goto err_init_reset; 7398 hw->reset.ops = &hns3_reset_ops; 7399 7400 hns3_init_hw_ops(hw); 7401 ret = hns3_init_pf(eth_dev); 7402 if (ret) { 7403 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7404 goto err_init_pf; 7405 } 7406 7407 /* Allocate memory for storing MAC addresses */ 7408 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7409 sizeof(struct rte_ether_addr) * 7410 HNS3_UC_MACADDR_NUM, 0); 7411 if (eth_dev->data->mac_addrs == NULL) { 7412 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7413 "to store MAC addresses", 7414 sizeof(struct rte_ether_addr) * 7415 HNS3_UC_MACADDR_NUM); 7416 ret = -ENOMEM; 7417 goto err_rte_zmalloc; 7418 } 7419 7420 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7421 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7422 rte_eth_random_addr(hw->mac.mac_addr); 7423 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7424 (struct rte_ether_addr *)hw->mac.mac_addr); 7425 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7426 "unicast address, using random MAC address %s", 7427 mac_str); 7428 } 7429 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7430 ð_dev->data->mac_addrs[0]); 7431 7432 hw->adapter_state = HNS3_NIC_INITIALIZED; 7433 7434 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7435 SCHEDULE_PENDING) { 7436 hns3_err(hw, "Reschedule reset service after dev_init"); 7437 hns3_schedule_reset(hns); 7438 } else { 7439 /* IMP will wait ready flag before reset */ 7440 hns3_notify_reset_ready(hw, false); 7441 } 7442 7443 hns3_info(hw, "hns3 dev initialization successful!"); 7444 return 0; 7445 7446 err_rte_zmalloc: 7447 hns3_uninit_pf(eth_dev); 7448 7449 err_init_pf: 7450 rte_free(hw->reset.wait_data); 7451 7452 err_init_reset: 7453 hns3_mp_uninit(eth_dev); 7454 7455 err_mp_init: 7456 eth_dev->dev_ops = NULL; 7457 eth_dev->rx_pkt_burst = NULL; 7458 eth_dev->rx_descriptor_status = NULL; 7459 eth_dev->tx_pkt_burst = NULL; 7460 eth_dev->tx_pkt_prepare = NULL; 7461 eth_dev->tx_descriptor_status = NULL; 7462 return ret; 7463 } 7464 7465 static int 7466 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7467 { 7468 struct hns3_adapter *hns = eth_dev->data->dev_private; 7469 struct hns3_hw *hw = &hns->hw; 7470 7471 PMD_INIT_FUNC_TRACE(); 7472 7473 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7474 hns3_mp_uninit(eth_dev); 7475 return 0; 7476 } 7477 7478 if (hw->adapter_state < HNS3_NIC_CLOSING) 7479 hns3_dev_close(eth_dev); 7480 7481 hw->adapter_state = HNS3_NIC_REMOVED; 7482 return 0; 7483 } 7484 7485 static int 7486 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7487 struct rte_pci_device *pci_dev) 7488 { 7489 return rte_eth_dev_pci_generic_probe(pci_dev, 7490 sizeof(struct hns3_adapter), 7491 hns3_dev_init); 7492 } 7493 7494 static int 7495 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7496 { 7497 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7498 } 7499 7500 static const struct rte_pci_id pci_id_hns3_map[] = { 7501 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7502 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7503 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7504 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7505 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7506 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7507 { .vendor_id = 0, }, /* sentinel */ 7508 }; 7509 7510 static struct rte_pci_driver rte_hns3_pmd = { 7511 .id_table = pci_id_hns3_map, 7512 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7513 .probe = eth_hns3_pci_probe, 7514 .remove = eth_hns3_pci_remove, 7515 }; 7516 7517 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7518 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7519 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7520 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7521 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7522 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 7523 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " 7524 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> "); 7525 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 7526 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 7527