1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 107 void hns3_ether_format_addr(char *buf, uint16_t size, 108 const struct rte_ether_addr *ether_addr) 109 { 110 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 111 ether_addr->addr_bytes[0], 112 ether_addr->addr_bytes[4], 113 ether_addr->addr_bytes[5]); 114 } 115 116 static void 117 hns3_pf_disable_irq0(struct hns3_hw *hw) 118 { 119 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 120 } 121 122 static void 123 hns3_pf_enable_irq0(struct hns3_hw *hw) 124 { 125 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 126 } 127 128 static enum hns3_evt_cause 129 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 130 uint32_t *vec_val) 131 { 132 struct hns3_hw *hw = &hns->hw; 133 134 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 135 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 136 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 137 if (!is_delay) { 138 hw->reset.stats.imp_cnt++; 139 hns3_warn(hw, "IMP reset detected, clear reset status"); 140 } else { 141 hns3_schedule_delayed_reset(hns); 142 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 143 } 144 145 return HNS3_VECTOR0_EVENT_RST; 146 } 147 148 static enum hns3_evt_cause 149 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 150 uint32_t *vec_val) 151 { 152 struct hns3_hw *hw = &hns->hw; 153 154 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 155 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 156 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 157 if (!is_delay) { 158 hw->reset.stats.global_cnt++; 159 hns3_warn(hw, "Global reset detected, clear reset status"); 160 } else { 161 hns3_schedule_delayed_reset(hns); 162 hns3_warn(hw, 163 "Global reset detected, don't clear reset status"); 164 } 165 166 return HNS3_VECTOR0_EVENT_RST; 167 } 168 169 static enum hns3_evt_cause 170 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 171 { 172 struct hns3_hw *hw = &hns->hw; 173 uint32_t vector0_int_stats; 174 uint32_t cmdq_src_val; 175 uint32_t hw_err_src_reg; 176 uint32_t val; 177 enum hns3_evt_cause ret; 178 bool is_delay; 179 180 /* fetch the events from their corresponding regs */ 181 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 182 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 183 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 184 185 is_delay = clearval == NULL ? true : false; 186 /* 187 * Assumption: If by any chance reset and mailbox events are reported 188 * together then we will only process reset event and defer the 189 * processing of the mailbox events. Since, we would have not cleared 190 * RX CMDQ event this time we would receive again another interrupt 191 * from H/W just for the mailbox. 192 */ 193 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 194 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 195 goto out; 196 } 197 198 /* Global reset */ 199 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 200 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 201 goto out; 202 } 203 204 /* Check for vector0 1588 event source */ 205 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 206 val = BIT(HNS3_VECTOR0_1588_INT_B); 207 ret = HNS3_VECTOR0_EVENT_PTP; 208 goto out; 209 } 210 211 /* check for vector0 msix event source */ 212 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 213 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 214 val = vector0_int_stats | hw_err_src_reg; 215 ret = HNS3_VECTOR0_EVENT_ERR; 216 goto out; 217 } 218 219 /* check for vector0 mailbox(=CMDQ RX) event source */ 220 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 221 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 222 val = cmdq_src_val; 223 ret = HNS3_VECTOR0_EVENT_MBX; 224 goto out; 225 } 226 227 val = vector0_int_stats; 228 ret = HNS3_VECTOR0_EVENT_OTHER; 229 out: 230 231 if (clearval) 232 *clearval = val; 233 return ret; 234 } 235 236 static bool 237 hns3_is_1588_event_type(uint32_t event_type) 238 { 239 return (event_type == HNS3_VECTOR0_EVENT_PTP); 240 } 241 242 static void 243 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 244 { 245 if (event_type == HNS3_VECTOR0_EVENT_RST || 246 hns3_is_1588_event_type(event_type)) 247 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 248 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 249 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 250 } 251 252 static void 253 hns3_clear_all_event_cause(struct hns3_hw *hw) 254 { 255 uint32_t vector0_int_stats; 256 257 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 258 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 259 hns3_warn(hw, "Probe during IMP reset interrupt"); 260 261 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 262 hns3_warn(hw, "Probe during Global reset interrupt"); 263 264 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 265 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 266 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 267 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 268 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 269 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 270 BIT(HNS3_VECTOR0_1588_INT_B)); 271 } 272 273 static void 274 hns3_handle_mac_tnl(struct hns3_hw *hw) 275 { 276 struct hns3_cmd_desc desc; 277 uint32_t status; 278 int ret; 279 280 /* query and clear mac tnl interrupt */ 281 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 282 ret = hns3_cmd_send(hw, &desc, 1); 283 if (ret) { 284 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 285 return; 286 } 287 288 status = rte_le_to_cpu_32(desc.data[0]); 289 if (status) { 290 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 291 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 292 false); 293 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 294 ret = hns3_cmd_send(hw, &desc, 1); 295 if (ret) 296 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 297 ret); 298 } 299 } 300 301 static void 302 hns3_interrupt_handler(void *param) 303 { 304 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 305 struct hns3_adapter *hns = dev->data->dev_private; 306 struct hns3_hw *hw = &hns->hw; 307 enum hns3_evt_cause event_cause; 308 uint32_t clearval = 0; 309 uint32_t vector0_int; 310 uint32_t ras_int; 311 uint32_t cmdq_int; 312 313 /* Disable interrupt */ 314 hns3_pf_disable_irq0(hw); 315 316 event_cause = hns3_check_event_cause(hns, &clearval); 317 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 318 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 319 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 320 /* vector 0 interrupt is shared with reset and mailbox source events. */ 321 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 322 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 323 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 324 vector0_int, ras_int, cmdq_int); 325 hns3_handle_mac_tnl(hw); 326 hns3_handle_error(hns); 327 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 328 hns3_warn(hw, "received reset interrupt"); 329 hns3_schedule_reset(hns); 330 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 331 hns3_dev_handle_mbx_msg(hw); 332 } else { 333 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 334 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 335 vector0_int, ras_int, cmdq_int); 336 } 337 338 hns3_clear_event_cause(hw, event_cause, clearval); 339 /* Enable interrupt if it is not cause by reset */ 340 hns3_pf_enable_irq0(hw); 341 } 342 343 static int 344 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 345 { 346 #define HNS3_VLAN_ID_OFFSET_STEP 160 347 #define HNS3_VLAN_BYTE_SIZE 8 348 struct hns3_vlan_filter_pf_cfg_cmd *req; 349 struct hns3_hw *hw = &hns->hw; 350 uint8_t vlan_offset_byte_val; 351 struct hns3_cmd_desc desc; 352 uint8_t vlan_offset_byte; 353 uint8_t vlan_offset_base; 354 int ret; 355 356 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 357 358 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 359 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 360 HNS3_VLAN_BYTE_SIZE; 361 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 362 363 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 364 req->vlan_offset = vlan_offset_base; 365 req->vlan_cfg = on ? 0 : 1; 366 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 367 368 ret = hns3_cmd_send(hw, &desc, 1); 369 if (ret) 370 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 371 vlan_id, ret); 372 373 return ret; 374 } 375 376 static void 377 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 378 { 379 struct hns3_user_vlan_table *vlan_entry; 380 struct hns3_pf *pf = &hns->pf; 381 382 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 383 if (vlan_entry->vlan_id == vlan_id) { 384 if (vlan_entry->hd_tbl_status) 385 hns3_set_port_vlan_filter(hns, vlan_id, 0); 386 LIST_REMOVE(vlan_entry, next); 387 rte_free(vlan_entry); 388 break; 389 } 390 } 391 } 392 393 static void 394 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 395 bool writen_to_tbl) 396 { 397 struct hns3_user_vlan_table *vlan_entry; 398 struct hns3_hw *hw = &hns->hw; 399 struct hns3_pf *pf = &hns->pf; 400 401 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 402 if (vlan_entry->vlan_id == vlan_id) 403 return; 404 } 405 406 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 407 if (vlan_entry == NULL) { 408 hns3_err(hw, "Failed to malloc hns3 vlan table"); 409 return; 410 } 411 412 vlan_entry->hd_tbl_status = writen_to_tbl; 413 vlan_entry->vlan_id = vlan_id; 414 415 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 416 } 417 418 static int 419 hns3_restore_vlan_table(struct hns3_adapter *hns) 420 { 421 struct hns3_user_vlan_table *vlan_entry; 422 struct hns3_hw *hw = &hns->hw; 423 struct hns3_pf *pf = &hns->pf; 424 uint16_t vlan_id; 425 int ret = 0; 426 427 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 428 return hns3_vlan_pvid_configure(hns, 429 hw->port_base_vlan_cfg.pvid, 1); 430 431 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 432 if (vlan_entry->hd_tbl_status) { 433 vlan_id = vlan_entry->vlan_id; 434 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 435 if (ret) 436 break; 437 } 438 } 439 440 return ret; 441 } 442 443 static int 444 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 445 { 446 struct hns3_hw *hw = &hns->hw; 447 bool writen_to_tbl = false; 448 int ret = 0; 449 450 /* 451 * When vlan filter is enabled, hardware regards packets without vlan 452 * as packets with vlan 0. So, to receive packets without vlan, vlan id 453 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 454 */ 455 if (on == 0 && vlan_id == 0) 456 return 0; 457 458 /* 459 * When port base vlan enabled, we use port base vlan as the vlan 460 * filter condition. In this case, we don't update vlan filter table 461 * when user add new vlan or remove exist vlan, just update the 462 * vlan list. The vlan id in vlan list will be written in vlan filter 463 * table until port base vlan disabled 464 */ 465 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 466 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 467 writen_to_tbl = true; 468 } 469 470 if (ret == 0) { 471 if (on) 472 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 473 else 474 hns3_rm_dev_vlan_table(hns, vlan_id); 475 } 476 return ret; 477 } 478 479 static int 480 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 481 { 482 struct hns3_adapter *hns = dev->data->dev_private; 483 struct hns3_hw *hw = &hns->hw; 484 int ret; 485 486 rte_spinlock_lock(&hw->lock); 487 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 488 rte_spinlock_unlock(&hw->lock); 489 return ret; 490 } 491 492 static int 493 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 494 uint16_t tpid) 495 { 496 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 497 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 498 struct hns3_hw *hw = &hns->hw; 499 struct hns3_cmd_desc desc; 500 int ret; 501 502 if ((vlan_type != ETH_VLAN_TYPE_INNER && 503 vlan_type != ETH_VLAN_TYPE_OUTER)) { 504 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 505 return -EINVAL; 506 } 507 508 if (tpid != RTE_ETHER_TYPE_VLAN) { 509 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 510 return -EINVAL; 511 } 512 513 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 514 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 515 516 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 517 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 518 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 519 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 520 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 521 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 522 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 524 } 525 526 ret = hns3_cmd_send(hw, &desc, 1); 527 if (ret) { 528 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 529 ret); 530 return ret; 531 } 532 533 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 534 535 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 536 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 537 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 538 539 ret = hns3_cmd_send(hw, &desc, 1); 540 if (ret) 541 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 542 ret); 543 return ret; 544 } 545 546 static int 547 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 548 uint16_t tpid) 549 { 550 struct hns3_adapter *hns = dev->data->dev_private; 551 struct hns3_hw *hw = &hns->hw; 552 int ret; 553 554 rte_spinlock_lock(&hw->lock); 555 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 556 rte_spinlock_unlock(&hw->lock); 557 return ret; 558 } 559 560 static int 561 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 562 struct hns3_rx_vtag_cfg *vcfg) 563 { 564 struct hns3_vport_vtag_rx_cfg_cmd *req; 565 struct hns3_hw *hw = &hns->hw; 566 struct hns3_cmd_desc desc; 567 uint16_t vport_id; 568 uint8_t bitmap; 569 int ret; 570 571 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 572 573 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 574 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 575 vcfg->strip_tag1_en ? 1 : 0); 576 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 577 vcfg->strip_tag2_en ? 1 : 0); 578 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 579 vcfg->vlan1_vlan_prionly ? 1 : 0); 580 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 581 vcfg->vlan2_vlan_prionly ? 1 : 0); 582 583 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 584 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 585 vcfg->strip_tag1_discard_en ? 1 : 0); 586 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 587 vcfg->strip_tag2_discard_en ? 1 : 0); 588 /* 589 * In current version VF is not supported when PF is driven by DPDK 590 * driver, just need to configure parameters for PF vport. 591 */ 592 vport_id = HNS3_PF_FUNC_ID; 593 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 594 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 595 req->vf_bitmap[req->vf_offset] = bitmap; 596 597 ret = hns3_cmd_send(hw, &desc, 1); 598 if (ret) 599 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 600 return ret; 601 } 602 603 static void 604 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 605 struct hns3_rx_vtag_cfg *vcfg) 606 { 607 struct hns3_pf *pf = &hns->pf; 608 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 609 } 610 611 static void 612 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 613 struct hns3_tx_vtag_cfg *vcfg) 614 { 615 struct hns3_pf *pf = &hns->pf; 616 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 617 } 618 619 static int 620 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 621 { 622 struct hns3_rx_vtag_cfg rxvlan_cfg; 623 struct hns3_hw *hw = &hns->hw; 624 int ret; 625 626 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 627 rxvlan_cfg.strip_tag1_en = false; 628 rxvlan_cfg.strip_tag2_en = enable; 629 rxvlan_cfg.strip_tag2_discard_en = false; 630 } else { 631 rxvlan_cfg.strip_tag1_en = enable; 632 rxvlan_cfg.strip_tag2_en = true; 633 rxvlan_cfg.strip_tag2_discard_en = true; 634 } 635 636 rxvlan_cfg.strip_tag1_discard_en = false; 637 rxvlan_cfg.vlan1_vlan_prionly = false; 638 rxvlan_cfg.vlan2_vlan_prionly = false; 639 rxvlan_cfg.rx_vlan_offload_en = enable; 640 641 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 642 if (ret) { 643 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 644 enable ? "enable" : "disable", ret); 645 return ret; 646 } 647 648 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 649 650 return ret; 651 } 652 653 static int 654 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 655 uint8_t fe_type, bool filter_en, uint8_t vf_id) 656 { 657 struct hns3_vlan_filter_ctrl_cmd *req; 658 struct hns3_cmd_desc desc; 659 int ret; 660 661 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 662 663 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 664 req->vlan_type = vlan_type; 665 req->vlan_fe = filter_en ? fe_type : 0; 666 req->vf_id = vf_id; 667 668 ret = hns3_cmd_send(hw, &desc, 1); 669 if (ret) 670 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 671 672 return ret; 673 } 674 675 static int 676 hns3_vlan_filter_init(struct hns3_adapter *hns) 677 { 678 struct hns3_hw *hw = &hns->hw; 679 int ret; 680 681 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 682 HNS3_FILTER_FE_EGRESS, false, 683 HNS3_PF_FUNC_ID); 684 if (ret) { 685 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 686 return ret; 687 } 688 689 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 690 HNS3_FILTER_FE_INGRESS, false, 691 HNS3_PF_FUNC_ID); 692 if (ret) 693 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 694 695 return ret; 696 } 697 698 static int 699 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 700 { 701 struct hns3_hw *hw = &hns->hw; 702 int ret; 703 704 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 705 HNS3_FILTER_FE_INGRESS, enable, 706 HNS3_PF_FUNC_ID); 707 if (ret) 708 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 709 enable ? "enable" : "disable", ret); 710 711 return ret; 712 } 713 714 static int 715 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 716 { 717 struct hns3_adapter *hns = dev->data->dev_private; 718 struct hns3_hw *hw = &hns->hw; 719 struct rte_eth_rxmode *rxmode; 720 unsigned int tmp_mask; 721 bool enable; 722 int ret = 0; 723 724 rte_spinlock_lock(&hw->lock); 725 rxmode = &dev->data->dev_conf.rxmode; 726 tmp_mask = (unsigned int)mask; 727 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 728 /* ignore vlan filter configuration during promiscuous mode */ 729 if (!dev->data->promiscuous) { 730 /* Enable or disable VLAN filter */ 731 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 732 true : false; 733 734 ret = hns3_enable_vlan_filter(hns, enable); 735 if (ret) { 736 rte_spinlock_unlock(&hw->lock); 737 hns3_err(hw, "failed to %s rx filter, ret = %d", 738 enable ? "enable" : "disable", ret); 739 return ret; 740 } 741 } 742 } 743 744 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 745 /* Enable or disable VLAN stripping */ 746 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 747 true : false; 748 749 ret = hns3_en_hw_strip_rxvtag(hns, enable); 750 if (ret) { 751 rte_spinlock_unlock(&hw->lock); 752 hns3_err(hw, "failed to %s rx strip, ret = %d", 753 enable ? "enable" : "disable", ret); 754 return ret; 755 } 756 } 757 758 rte_spinlock_unlock(&hw->lock); 759 760 return ret; 761 } 762 763 static int 764 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 765 struct hns3_tx_vtag_cfg *vcfg) 766 { 767 struct hns3_vport_vtag_tx_cfg_cmd *req; 768 struct hns3_cmd_desc desc; 769 struct hns3_hw *hw = &hns->hw; 770 uint16_t vport_id; 771 uint8_t bitmap; 772 int ret; 773 774 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 775 776 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 777 req->def_vlan_tag1 = vcfg->default_tag1; 778 req->def_vlan_tag2 = vcfg->default_tag2; 779 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 780 vcfg->accept_tag1 ? 1 : 0); 781 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 782 vcfg->accept_untag1 ? 1 : 0); 783 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 784 vcfg->accept_tag2 ? 1 : 0); 785 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 786 vcfg->accept_untag2 ? 1 : 0); 787 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 788 vcfg->insert_tag1_en ? 1 : 0); 789 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 790 vcfg->insert_tag2_en ? 1 : 0); 791 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 792 793 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 794 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 795 vcfg->tag_shift_mode_en ? 1 : 0); 796 797 /* 798 * In current version VF is not supported when PF is driven by DPDK 799 * driver, just need to configure parameters for PF vport. 800 */ 801 vport_id = HNS3_PF_FUNC_ID; 802 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 803 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 804 req->vf_bitmap[req->vf_offset] = bitmap; 805 806 ret = hns3_cmd_send(hw, &desc, 1); 807 if (ret) 808 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 809 810 return ret; 811 } 812 813 static int 814 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 815 uint16_t pvid) 816 { 817 struct hns3_hw *hw = &hns->hw; 818 struct hns3_tx_vtag_cfg txvlan_cfg; 819 int ret; 820 821 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 822 txvlan_cfg.accept_tag1 = true; 823 txvlan_cfg.insert_tag1_en = false; 824 txvlan_cfg.default_tag1 = 0; 825 } else { 826 txvlan_cfg.accept_tag1 = 827 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 828 txvlan_cfg.insert_tag1_en = true; 829 txvlan_cfg.default_tag1 = pvid; 830 } 831 832 txvlan_cfg.accept_untag1 = true; 833 txvlan_cfg.accept_tag2 = true; 834 txvlan_cfg.accept_untag2 = true; 835 txvlan_cfg.insert_tag2_en = false; 836 txvlan_cfg.default_tag2 = 0; 837 txvlan_cfg.tag_shift_mode_en = true; 838 839 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 840 if (ret) { 841 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 842 ret); 843 return ret; 844 } 845 846 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 847 return ret; 848 } 849 850 851 static void 852 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 853 { 854 struct hns3_user_vlan_table *vlan_entry; 855 struct hns3_pf *pf = &hns->pf; 856 857 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 858 if (vlan_entry->hd_tbl_status) { 859 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 860 vlan_entry->hd_tbl_status = false; 861 } 862 } 863 864 if (is_del_list) { 865 vlan_entry = LIST_FIRST(&pf->vlan_list); 866 while (vlan_entry) { 867 LIST_REMOVE(vlan_entry, next); 868 rte_free(vlan_entry); 869 vlan_entry = LIST_FIRST(&pf->vlan_list); 870 } 871 } 872 } 873 874 static void 875 hns3_add_all_vlan_table(struct hns3_adapter *hns) 876 { 877 struct hns3_user_vlan_table *vlan_entry; 878 struct hns3_pf *pf = &hns->pf; 879 880 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 881 if (!vlan_entry->hd_tbl_status) { 882 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 883 vlan_entry->hd_tbl_status = true; 884 } 885 } 886 } 887 888 static void 889 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 890 { 891 struct hns3_hw *hw = &hns->hw; 892 int ret; 893 894 hns3_rm_all_vlan_table(hns, true); 895 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 896 ret = hns3_set_port_vlan_filter(hns, 897 hw->port_base_vlan_cfg.pvid, 0); 898 if (ret) { 899 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 900 ret); 901 return; 902 } 903 } 904 } 905 906 static int 907 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 908 uint16_t port_base_vlan_state, uint16_t new_pvid) 909 { 910 struct hns3_hw *hw = &hns->hw; 911 uint16_t old_pvid; 912 int ret; 913 914 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 915 old_pvid = hw->port_base_vlan_cfg.pvid; 916 if (old_pvid != HNS3_INVALID_PVID) { 917 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 918 if (ret) { 919 hns3_err(hw, "failed to remove old pvid %u, " 920 "ret = %d", old_pvid, ret); 921 return ret; 922 } 923 } 924 925 hns3_rm_all_vlan_table(hns, false); 926 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 927 if (ret) { 928 hns3_err(hw, "failed to add new pvid %u, ret = %d", 929 new_pvid, ret); 930 return ret; 931 } 932 } else { 933 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 934 if (ret) { 935 hns3_err(hw, "failed to remove pvid %u, ret = %d", 936 new_pvid, ret); 937 return ret; 938 } 939 940 hns3_add_all_vlan_table(hns); 941 } 942 return 0; 943 } 944 945 static int 946 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 947 { 948 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 949 struct hns3_rx_vtag_cfg rx_vlan_cfg; 950 bool rx_strip_en; 951 int ret; 952 953 rx_strip_en = old_cfg->rx_vlan_offload_en; 954 if (on) { 955 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 956 rx_vlan_cfg.strip_tag2_en = true; 957 rx_vlan_cfg.strip_tag2_discard_en = true; 958 } else { 959 rx_vlan_cfg.strip_tag1_en = false; 960 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 961 rx_vlan_cfg.strip_tag2_discard_en = false; 962 } 963 rx_vlan_cfg.strip_tag1_discard_en = false; 964 rx_vlan_cfg.vlan1_vlan_prionly = false; 965 rx_vlan_cfg.vlan2_vlan_prionly = false; 966 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 967 968 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 969 if (ret) 970 return ret; 971 972 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 973 return ret; 974 } 975 976 static int 977 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 978 { 979 struct hns3_hw *hw = &hns->hw; 980 uint16_t port_base_vlan_state; 981 int ret, err; 982 983 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 984 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 985 hns3_warn(hw, "Invalid operation! As current pvid set " 986 "is %u, disable pvid %u is invalid", 987 hw->port_base_vlan_cfg.pvid, pvid); 988 return 0; 989 } 990 991 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 992 HNS3_PORT_BASE_VLAN_DISABLE; 993 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 994 if (ret) { 995 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 996 ret); 997 return ret; 998 } 999 1000 ret = hns3_en_pvid_strip(hns, on); 1001 if (ret) { 1002 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1003 "ret = %d", ret); 1004 goto pvid_vlan_strip_fail; 1005 } 1006 1007 if (pvid == HNS3_INVALID_PVID) 1008 goto out; 1009 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1010 if (ret) { 1011 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1012 ret); 1013 goto vlan_filter_set_fail; 1014 } 1015 1016 out: 1017 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1018 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1019 return ret; 1020 1021 vlan_filter_set_fail: 1022 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1023 HNS3_PORT_BASE_VLAN_ENABLE); 1024 if (err) 1025 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1026 1027 pvid_vlan_strip_fail: 1028 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1029 hw->port_base_vlan_cfg.pvid); 1030 if (err) 1031 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1032 1033 return ret; 1034 } 1035 1036 static int 1037 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1038 { 1039 struct hns3_adapter *hns = dev->data->dev_private; 1040 struct hns3_hw *hw = &hns->hw; 1041 bool pvid_en_state_change; 1042 uint16_t pvid_state; 1043 int ret; 1044 1045 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1046 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1047 RTE_ETHER_MAX_VLAN_ID); 1048 return -EINVAL; 1049 } 1050 1051 /* 1052 * If PVID configuration state change, should refresh the PVID 1053 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1054 */ 1055 pvid_state = hw->port_base_vlan_cfg.state; 1056 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1057 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1058 pvid_en_state_change = false; 1059 else 1060 pvid_en_state_change = true; 1061 1062 rte_spinlock_lock(&hw->lock); 1063 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1064 rte_spinlock_unlock(&hw->lock); 1065 if (ret) 1066 return ret; 1067 /* 1068 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1069 * need be processed by PMD driver. 1070 */ 1071 if (pvid_en_state_change && 1072 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1073 hns3_update_all_queues_pvid_proc_en(hw); 1074 1075 return 0; 1076 } 1077 1078 static int 1079 hns3_default_vlan_config(struct hns3_adapter *hns) 1080 { 1081 struct hns3_hw *hw = &hns->hw; 1082 int ret; 1083 1084 /* 1085 * When vlan filter is enabled, hardware regards packets without vlan 1086 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1087 * table, packets without vlan won't be received. So, add vlan 0 as 1088 * the default vlan. 1089 */ 1090 ret = hns3_vlan_filter_configure(hns, 0, 1); 1091 if (ret) 1092 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1093 return ret; 1094 } 1095 1096 static int 1097 hns3_init_vlan_config(struct hns3_adapter *hns) 1098 { 1099 struct hns3_hw *hw = &hns->hw; 1100 int ret; 1101 1102 /* 1103 * This function can be called in the initialization and reset process, 1104 * when in reset process, it means that hardware had been reseted 1105 * successfully and we need to restore the hardware configuration to 1106 * ensure that the hardware configuration remains unchanged before and 1107 * after reset. 1108 */ 1109 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1110 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1111 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1112 } 1113 1114 ret = hns3_vlan_filter_init(hns); 1115 if (ret) { 1116 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1117 return ret; 1118 } 1119 1120 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1121 RTE_ETHER_TYPE_VLAN); 1122 if (ret) { 1123 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1124 return ret; 1125 } 1126 1127 /* 1128 * When in the reinit dev stage of the reset process, the following 1129 * vlan-related configurations may differ from those at initialization, 1130 * we will restore configurations to hardware in hns3_restore_vlan_table 1131 * and hns3_restore_vlan_conf later. 1132 */ 1133 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1134 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1135 if (ret) { 1136 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1137 return ret; 1138 } 1139 1140 ret = hns3_en_hw_strip_rxvtag(hns, false); 1141 if (ret) { 1142 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1143 ret); 1144 return ret; 1145 } 1146 } 1147 1148 return hns3_default_vlan_config(hns); 1149 } 1150 1151 static int 1152 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1153 { 1154 struct hns3_pf *pf = &hns->pf; 1155 struct hns3_hw *hw = &hns->hw; 1156 uint64_t offloads; 1157 bool enable; 1158 int ret; 1159 1160 if (!hw->data->promiscuous) { 1161 /* restore vlan filter states */ 1162 offloads = hw->data->dev_conf.rxmode.offloads; 1163 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1164 ret = hns3_enable_vlan_filter(hns, enable); 1165 if (ret) { 1166 hns3_err(hw, "failed to restore vlan rx filter conf, " 1167 "ret = %d", ret); 1168 return ret; 1169 } 1170 } 1171 1172 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1173 if (ret) { 1174 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1175 return ret; 1176 } 1177 1178 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1179 if (ret) 1180 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1181 1182 return ret; 1183 } 1184 1185 static int 1186 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1187 { 1188 struct hns3_adapter *hns = dev->data->dev_private; 1189 struct rte_eth_dev_data *data = dev->data; 1190 struct rte_eth_txmode *txmode; 1191 struct hns3_hw *hw = &hns->hw; 1192 int mask; 1193 int ret; 1194 1195 txmode = &data->dev_conf.txmode; 1196 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1197 hns3_warn(hw, 1198 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1199 "configuration is not supported! Ignore these two " 1200 "parameters: hw_vlan_reject_tagged(%u), " 1201 "hw_vlan_reject_untagged(%u)", 1202 txmode->hw_vlan_reject_tagged, 1203 txmode->hw_vlan_reject_untagged); 1204 1205 /* Apply vlan offload setting */ 1206 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1207 ret = hns3_vlan_offload_set(dev, mask); 1208 if (ret) { 1209 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1210 ret); 1211 return ret; 1212 } 1213 1214 /* 1215 * If pvid config is not set in rte_eth_conf, driver needn't to set 1216 * VLAN pvid related configuration to hardware. 1217 */ 1218 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1219 return 0; 1220 1221 /* Apply pvid setting */ 1222 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1223 txmode->hw_vlan_insert_pvid); 1224 if (ret) 1225 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1226 txmode->pvid, ret); 1227 1228 return ret; 1229 } 1230 1231 static int 1232 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1233 unsigned int tso_mss_max) 1234 { 1235 struct hns3_cfg_tso_status_cmd *req; 1236 struct hns3_cmd_desc desc; 1237 uint16_t tso_mss; 1238 1239 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1240 1241 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1242 1243 tso_mss = 0; 1244 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1245 tso_mss_min); 1246 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1247 1248 tso_mss = 0; 1249 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1250 tso_mss_max); 1251 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1252 1253 return hns3_cmd_send(hw, &desc, 1); 1254 } 1255 1256 static int 1257 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1258 uint16_t *allocated_size, bool is_alloc) 1259 { 1260 struct hns3_umv_spc_alc_cmd *req; 1261 struct hns3_cmd_desc desc; 1262 int ret; 1263 1264 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1265 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1266 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1267 req->space_size = rte_cpu_to_le_32(space_size); 1268 1269 ret = hns3_cmd_send(hw, &desc, 1); 1270 if (ret) { 1271 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1272 is_alloc ? "allocate" : "free", ret); 1273 return ret; 1274 } 1275 1276 if (is_alloc && allocated_size) 1277 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1278 1279 return 0; 1280 } 1281 1282 static int 1283 hns3_init_umv_space(struct hns3_hw *hw) 1284 { 1285 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1286 struct hns3_pf *pf = &hns->pf; 1287 uint16_t allocated_size = 0; 1288 int ret; 1289 1290 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1291 true); 1292 if (ret) 1293 return ret; 1294 1295 if (allocated_size < pf->wanted_umv_size) 1296 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1297 pf->wanted_umv_size, allocated_size); 1298 1299 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1300 pf->wanted_umv_size; 1301 pf->used_umv_size = 0; 1302 return 0; 1303 } 1304 1305 static int 1306 hns3_uninit_umv_space(struct hns3_hw *hw) 1307 { 1308 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1309 struct hns3_pf *pf = &hns->pf; 1310 int ret; 1311 1312 if (pf->max_umv_size == 0) 1313 return 0; 1314 1315 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1316 if (ret) 1317 return ret; 1318 1319 pf->max_umv_size = 0; 1320 1321 return 0; 1322 } 1323 1324 static bool 1325 hns3_is_umv_space_full(struct hns3_hw *hw) 1326 { 1327 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1328 struct hns3_pf *pf = &hns->pf; 1329 bool is_full; 1330 1331 is_full = (pf->used_umv_size >= pf->max_umv_size); 1332 1333 return is_full; 1334 } 1335 1336 static void 1337 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1338 { 1339 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1340 struct hns3_pf *pf = &hns->pf; 1341 1342 if (is_free) { 1343 if (pf->used_umv_size > 0) 1344 pf->used_umv_size--; 1345 } else 1346 pf->used_umv_size++; 1347 } 1348 1349 static void 1350 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1351 const uint8_t *addr, bool is_mc) 1352 { 1353 const unsigned char *mac_addr = addr; 1354 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1355 ((uint32_t)mac_addr[2] << 16) | 1356 ((uint32_t)mac_addr[1] << 8) | 1357 (uint32_t)mac_addr[0]; 1358 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1359 1360 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1361 if (is_mc) { 1362 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1363 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1364 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1365 } 1366 1367 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1368 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1369 } 1370 1371 static int 1372 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1373 uint8_t resp_code, 1374 enum hns3_mac_vlan_tbl_opcode op) 1375 { 1376 if (cmdq_resp) { 1377 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1378 cmdq_resp); 1379 return -EIO; 1380 } 1381 1382 if (op == HNS3_MAC_VLAN_ADD) { 1383 if (resp_code == 0 || resp_code == 1) { 1384 return 0; 1385 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1386 hns3_err(hw, "add mac addr failed for uc_overflow"); 1387 return -ENOSPC; 1388 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1389 hns3_err(hw, "add mac addr failed for mc_overflow"); 1390 return -ENOSPC; 1391 } 1392 1393 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1394 resp_code); 1395 return -EIO; 1396 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1397 if (resp_code == 0) { 1398 return 0; 1399 } else if (resp_code == 1) { 1400 hns3_dbg(hw, "remove mac addr failed for miss"); 1401 return -ENOENT; 1402 } 1403 1404 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1405 resp_code); 1406 return -EIO; 1407 } else if (op == HNS3_MAC_VLAN_LKUP) { 1408 if (resp_code == 0) { 1409 return 0; 1410 } else if (resp_code == 1) { 1411 hns3_dbg(hw, "lookup mac addr failed for miss"); 1412 return -ENOENT; 1413 } 1414 1415 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1416 resp_code); 1417 return -EIO; 1418 } 1419 1420 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1421 op); 1422 1423 return -EINVAL; 1424 } 1425 1426 static int 1427 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1428 struct hns3_mac_vlan_tbl_entry_cmd *req, 1429 struct hns3_cmd_desc *desc, bool is_mc) 1430 { 1431 uint8_t resp_code; 1432 uint16_t retval; 1433 int ret; 1434 1435 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1436 if (is_mc) { 1437 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1438 memcpy(desc[0].data, req, 1439 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1440 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1441 true); 1442 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1443 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1444 true); 1445 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1446 } else { 1447 memcpy(desc[0].data, req, 1448 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1449 ret = hns3_cmd_send(hw, desc, 1); 1450 } 1451 if (ret) { 1452 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1453 ret); 1454 return ret; 1455 } 1456 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1457 retval = rte_le_to_cpu_16(desc[0].retval); 1458 1459 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1460 HNS3_MAC_VLAN_LKUP); 1461 } 1462 1463 static int 1464 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1465 struct hns3_mac_vlan_tbl_entry_cmd *req, 1466 struct hns3_cmd_desc *mc_desc) 1467 { 1468 uint8_t resp_code; 1469 uint16_t retval; 1470 int cfg_status; 1471 int ret; 1472 1473 if (mc_desc == NULL) { 1474 struct hns3_cmd_desc desc; 1475 1476 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1477 memcpy(desc.data, req, 1478 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1479 ret = hns3_cmd_send(hw, &desc, 1); 1480 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1481 retval = rte_le_to_cpu_16(desc.retval); 1482 1483 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1484 HNS3_MAC_VLAN_ADD); 1485 } else { 1486 hns3_cmd_reuse_desc(&mc_desc[0], false); 1487 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1488 hns3_cmd_reuse_desc(&mc_desc[1], false); 1489 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1490 hns3_cmd_reuse_desc(&mc_desc[2], false); 1491 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1492 memcpy(mc_desc[0].data, req, 1493 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1494 mc_desc[0].retval = 0; 1495 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1496 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1497 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1498 1499 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1500 HNS3_MAC_VLAN_ADD); 1501 } 1502 1503 if (ret) { 1504 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1505 return ret; 1506 } 1507 1508 return cfg_status; 1509 } 1510 1511 static int 1512 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1513 struct hns3_mac_vlan_tbl_entry_cmd *req) 1514 { 1515 struct hns3_cmd_desc desc; 1516 uint8_t resp_code; 1517 uint16_t retval; 1518 int ret; 1519 1520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1521 1522 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1523 1524 ret = hns3_cmd_send(hw, &desc, 1); 1525 if (ret) { 1526 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1527 return ret; 1528 } 1529 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1530 retval = rte_le_to_cpu_16(desc.retval); 1531 1532 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1533 HNS3_MAC_VLAN_REMOVE); 1534 } 1535 1536 static int 1537 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1538 { 1539 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1540 struct hns3_mac_vlan_tbl_entry_cmd req; 1541 struct hns3_pf *pf = &hns->pf; 1542 struct hns3_cmd_desc desc[3]; 1543 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1544 uint16_t egress_port = 0; 1545 uint8_t vf_id; 1546 int ret; 1547 1548 /* check if mac addr is valid */ 1549 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1550 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1551 mac_addr); 1552 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1553 mac_str); 1554 return -EINVAL; 1555 } 1556 1557 memset(&req, 0, sizeof(req)); 1558 1559 /* 1560 * In current version VF is not supported when PF is driven by DPDK 1561 * driver, just need to configure parameters for PF vport. 1562 */ 1563 vf_id = HNS3_PF_FUNC_ID; 1564 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1565 HNS3_MAC_EPORT_VFID_S, vf_id); 1566 1567 req.egress_port = rte_cpu_to_le_16(egress_port); 1568 1569 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1570 1571 /* 1572 * Lookup the mac address in the mac_vlan table, and add 1573 * it if the entry is inexistent. Repeated unicast entry 1574 * is not allowed in the mac vlan table. 1575 */ 1576 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1577 if (ret == -ENOENT) { 1578 if (!hns3_is_umv_space_full(hw)) { 1579 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1580 if (!ret) 1581 hns3_update_umv_space(hw, false); 1582 return ret; 1583 } 1584 1585 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1586 1587 return -ENOSPC; 1588 } 1589 1590 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1591 1592 /* check if we just hit the duplicate */ 1593 if (ret == 0) { 1594 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1595 return 0; 1596 } 1597 1598 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1599 mac_str); 1600 1601 return ret; 1602 } 1603 1604 static int 1605 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1606 { 1607 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1608 struct rte_ether_addr *addr; 1609 int ret; 1610 int i; 1611 1612 for (i = 0; i < hw->mc_addrs_num; i++) { 1613 addr = &hw->mc_addrs[i]; 1614 /* Check if there are duplicate addresses */ 1615 if (rte_is_same_ether_addr(addr, mac_addr)) { 1616 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1617 addr); 1618 hns3_err(hw, "failed to add mc mac addr, same addrs" 1619 "(%s) is added by the set_mc_mac_addr_list " 1620 "API", mac_str); 1621 return -EINVAL; 1622 } 1623 } 1624 1625 ret = hns3_add_mc_addr(hw, mac_addr); 1626 if (ret) { 1627 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1628 mac_addr); 1629 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1630 mac_str, ret); 1631 } 1632 return ret; 1633 } 1634 1635 static int 1636 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1637 { 1638 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1639 int ret; 1640 1641 ret = hns3_remove_mc_addr(hw, mac_addr); 1642 if (ret) { 1643 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1644 mac_addr); 1645 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1646 mac_str, ret); 1647 } 1648 return ret; 1649 } 1650 1651 static int 1652 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1653 uint32_t idx, __rte_unused uint32_t pool) 1654 { 1655 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1656 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1657 int ret; 1658 1659 rte_spinlock_lock(&hw->lock); 1660 1661 /* 1662 * In hns3 network engine adding UC and MC mac address with different 1663 * commands with firmware. We need to determine whether the input 1664 * address is a UC or a MC address to call different commands. 1665 * By the way, it is recommended calling the API function named 1666 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1667 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1668 * may affect the specifications of UC mac addresses. 1669 */ 1670 if (rte_is_multicast_ether_addr(mac_addr)) 1671 ret = hns3_add_mc_addr_common(hw, mac_addr); 1672 else 1673 ret = hns3_add_uc_addr_common(hw, mac_addr); 1674 1675 if (ret) { 1676 rte_spinlock_unlock(&hw->lock); 1677 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1678 mac_addr); 1679 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1680 ret); 1681 return ret; 1682 } 1683 1684 if (idx == 0) 1685 hw->mac.default_addr_setted = true; 1686 rte_spinlock_unlock(&hw->lock); 1687 1688 return ret; 1689 } 1690 1691 static int 1692 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1693 { 1694 struct hns3_mac_vlan_tbl_entry_cmd req; 1695 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1696 int ret; 1697 1698 /* check if mac addr is valid */ 1699 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1700 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1701 mac_addr); 1702 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1703 mac_str); 1704 return -EINVAL; 1705 } 1706 1707 memset(&req, 0, sizeof(req)); 1708 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1709 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1710 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1711 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1712 return 0; 1713 else if (ret == 0) 1714 hns3_update_umv_space(hw, true); 1715 1716 return ret; 1717 } 1718 1719 static void 1720 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1721 { 1722 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1723 /* index will be checked by upper level rte interface */ 1724 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1725 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1726 int ret; 1727 1728 rte_spinlock_lock(&hw->lock); 1729 1730 if (rte_is_multicast_ether_addr(mac_addr)) 1731 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1732 else 1733 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1734 rte_spinlock_unlock(&hw->lock); 1735 if (ret) { 1736 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1737 mac_addr); 1738 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1739 ret); 1740 } 1741 } 1742 1743 static int 1744 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1745 struct rte_ether_addr *mac_addr) 1746 { 1747 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1748 struct rte_ether_addr *oaddr; 1749 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1750 bool default_addr_setted; 1751 bool rm_succes = false; 1752 int ret, ret_val; 1753 1754 /* 1755 * It has been guaranteed that input parameter named mac_addr is valid 1756 * address in the rte layer of DPDK framework. 1757 */ 1758 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1759 default_addr_setted = hw->mac.default_addr_setted; 1760 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1761 return 0; 1762 1763 rte_spinlock_lock(&hw->lock); 1764 if (default_addr_setted) { 1765 ret = hns3_remove_uc_addr_common(hw, oaddr); 1766 if (ret) { 1767 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1768 oaddr); 1769 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1770 mac_str, ret); 1771 rm_succes = false; 1772 } else 1773 rm_succes = true; 1774 } 1775 1776 ret = hns3_add_uc_addr_common(hw, mac_addr); 1777 if (ret) { 1778 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1779 mac_addr); 1780 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1781 goto err_add_uc_addr; 1782 } 1783 1784 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1785 if (ret) { 1786 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1787 goto err_pause_addr_cfg; 1788 } 1789 1790 rte_ether_addr_copy(mac_addr, 1791 (struct rte_ether_addr *)hw->mac.mac_addr); 1792 hw->mac.default_addr_setted = true; 1793 rte_spinlock_unlock(&hw->lock); 1794 1795 return 0; 1796 1797 err_pause_addr_cfg: 1798 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1799 if (ret_val) { 1800 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1801 mac_addr); 1802 hns3_warn(hw, 1803 "Failed to roll back to del setted mac addr(%s): %d", 1804 mac_str, ret_val); 1805 } 1806 1807 err_add_uc_addr: 1808 if (rm_succes) { 1809 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1810 if (ret_val) { 1811 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1812 oaddr); 1813 hns3_warn(hw, 1814 "Failed to restore old uc mac addr(%s): %d", 1815 mac_str, ret_val); 1816 hw->mac.default_addr_setted = false; 1817 } 1818 } 1819 rte_spinlock_unlock(&hw->lock); 1820 1821 return ret; 1822 } 1823 1824 static int 1825 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1826 { 1827 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1828 struct hns3_hw *hw = &hns->hw; 1829 struct rte_ether_addr *addr; 1830 int err = 0; 1831 int ret; 1832 int i; 1833 1834 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1835 addr = &hw->data->mac_addrs[i]; 1836 if (rte_is_zero_ether_addr(addr)) 1837 continue; 1838 if (rte_is_multicast_ether_addr(addr)) 1839 ret = del ? hns3_remove_mc_addr(hw, addr) : 1840 hns3_add_mc_addr(hw, addr); 1841 else 1842 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1843 hns3_add_uc_addr_common(hw, addr); 1844 1845 if (ret) { 1846 err = ret; 1847 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1848 addr); 1849 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1850 "ret = %d.", del ? "remove" : "restore", 1851 mac_str, i, ret); 1852 } 1853 } 1854 return err; 1855 } 1856 1857 static void 1858 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1859 { 1860 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1861 uint8_t word_num; 1862 uint8_t bit_num; 1863 1864 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1865 word_num = vfid / 32; 1866 bit_num = vfid % 32; 1867 if (clr) 1868 desc[1].data[word_num] &= 1869 rte_cpu_to_le_32(~(1UL << bit_num)); 1870 else 1871 desc[1].data[word_num] |= 1872 rte_cpu_to_le_32(1UL << bit_num); 1873 } else { 1874 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1875 bit_num = vfid % 32; 1876 if (clr) 1877 desc[2].data[word_num] &= 1878 rte_cpu_to_le_32(~(1UL << bit_num)); 1879 else 1880 desc[2].data[word_num] |= 1881 rte_cpu_to_le_32(1UL << bit_num); 1882 } 1883 } 1884 1885 static int 1886 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1887 { 1888 struct hns3_mac_vlan_tbl_entry_cmd req; 1889 struct hns3_cmd_desc desc[3]; 1890 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1891 uint8_t vf_id; 1892 int ret; 1893 1894 /* Check if mac addr is valid */ 1895 if (!rte_is_multicast_ether_addr(mac_addr)) { 1896 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1897 mac_addr); 1898 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1899 mac_str); 1900 return -EINVAL; 1901 } 1902 1903 memset(&req, 0, sizeof(req)); 1904 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1905 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1906 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1907 if (ret) { 1908 /* This mac addr do not exist, add new entry for it */ 1909 memset(desc[0].data, 0, sizeof(desc[0].data)); 1910 memset(desc[1].data, 0, sizeof(desc[0].data)); 1911 memset(desc[2].data, 0, sizeof(desc[0].data)); 1912 } 1913 1914 /* 1915 * In current version VF is not supported when PF is driven by DPDK 1916 * driver, just need to configure parameters for PF vport. 1917 */ 1918 vf_id = HNS3_PF_FUNC_ID; 1919 hns3_update_desc_vfid(desc, vf_id, false); 1920 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1921 if (ret) { 1922 if (ret == -ENOSPC) 1923 hns3_err(hw, "mc mac vlan table is full"); 1924 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1925 mac_addr); 1926 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1927 } 1928 1929 return ret; 1930 } 1931 1932 static int 1933 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1934 { 1935 struct hns3_mac_vlan_tbl_entry_cmd req; 1936 struct hns3_cmd_desc desc[3]; 1937 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1938 uint8_t vf_id; 1939 int ret; 1940 1941 /* Check if mac addr is valid */ 1942 if (!rte_is_multicast_ether_addr(mac_addr)) { 1943 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1944 mac_addr); 1945 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1946 mac_str); 1947 return -EINVAL; 1948 } 1949 1950 memset(&req, 0, sizeof(req)); 1951 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1952 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1953 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1954 if (ret == 0) { 1955 /* 1956 * This mac addr exist, remove this handle's VFID for it. 1957 * In current version VF is not supported when PF is driven by 1958 * DPDK driver, just need to configure parameters for PF vport. 1959 */ 1960 vf_id = HNS3_PF_FUNC_ID; 1961 hns3_update_desc_vfid(desc, vf_id, true); 1962 1963 /* All the vfid is zero, so need to delete this entry */ 1964 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1965 } else if (ret == -ENOENT) { 1966 /* This mac addr doesn't exist. */ 1967 return 0; 1968 } 1969 1970 if (ret) { 1971 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1972 mac_addr); 1973 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1974 } 1975 1976 return ret; 1977 } 1978 1979 static int 1980 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1981 struct rte_ether_addr *mc_addr_set, 1982 uint32_t nb_mc_addr) 1983 { 1984 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1985 struct rte_ether_addr *addr; 1986 uint32_t i; 1987 uint32_t j; 1988 1989 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1990 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1991 "invalid. valid range: 0~%d", 1992 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1993 return -EINVAL; 1994 } 1995 1996 /* Check if input mac addresses are valid */ 1997 for (i = 0; i < nb_mc_addr; i++) { 1998 addr = &mc_addr_set[i]; 1999 if (!rte_is_multicast_ether_addr(addr)) { 2000 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2001 addr); 2002 hns3_err(hw, 2003 "failed to set mc mac addr, addr(%s) invalid.", 2004 mac_str); 2005 return -EINVAL; 2006 } 2007 2008 /* Check if there are duplicate addresses */ 2009 for (j = i + 1; j < nb_mc_addr; j++) { 2010 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2011 hns3_ether_format_addr(mac_str, 2012 RTE_ETHER_ADDR_FMT_SIZE, 2013 addr); 2014 hns3_err(hw, "failed to set mc mac addr, " 2015 "addrs invalid. two same addrs(%s).", 2016 mac_str); 2017 return -EINVAL; 2018 } 2019 } 2020 2021 /* 2022 * Check if there are duplicate addresses between mac_addrs 2023 * and mc_addr_set 2024 */ 2025 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 2026 if (rte_is_same_ether_addr(addr, 2027 &hw->data->mac_addrs[j])) { 2028 hns3_ether_format_addr(mac_str, 2029 RTE_ETHER_ADDR_FMT_SIZE, 2030 addr); 2031 hns3_err(hw, "failed to set mc mac addr, " 2032 "addrs invalid. addrs(%s) has already " 2033 "configured in mac_addr add API", 2034 mac_str); 2035 return -EINVAL; 2036 } 2037 } 2038 } 2039 2040 return 0; 2041 } 2042 2043 static void 2044 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2045 struct rte_ether_addr *mc_addr_set, 2046 int mc_addr_num, 2047 struct rte_ether_addr *reserved_addr_list, 2048 int *reserved_addr_num, 2049 struct rte_ether_addr *add_addr_list, 2050 int *add_addr_num, 2051 struct rte_ether_addr *rm_addr_list, 2052 int *rm_addr_num) 2053 { 2054 struct rte_ether_addr *addr; 2055 int current_addr_num; 2056 int reserved_num = 0; 2057 int add_num = 0; 2058 int rm_num = 0; 2059 int num; 2060 int i; 2061 int j; 2062 bool same_addr; 2063 2064 /* Calculate the mc mac address list that should be removed */ 2065 current_addr_num = hw->mc_addrs_num; 2066 for (i = 0; i < current_addr_num; i++) { 2067 addr = &hw->mc_addrs[i]; 2068 same_addr = false; 2069 for (j = 0; j < mc_addr_num; j++) { 2070 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2071 same_addr = true; 2072 break; 2073 } 2074 } 2075 2076 if (!same_addr) { 2077 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2078 rm_num++; 2079 } else { 2080 rte_ether_addr_copy(addr, 2081 &reserved_addr_list[reserved_num]); 2082 reserved_num++; 2083 } 2084 } 2085 2086 /* Calculate the mc mac address list that should be added */ 2087 for (i = 0; i < mc_addr_num; i++) { 2088 addr = &mc_addr_set[i]; 2089 same_addr = false; 2090 for (j = 0; j < current_addr_num; j++) { 2091 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2092 same_addr = true; 2093 break; 2094 } 2095 } 2096 2097 if (!same_addr) { 2098 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2099 add_num++; 2100 } 2101 } 2102 2103 /* Reorder the mc mac address list maintained by driver */ 2104 for (i = 0; i < reserved_num; i++) 2105 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2106 2107 for (i = 0; i < rm_num; i++) { 2108 num = reserved_num + i; 2109 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2110 } 2111 2112 *reserved_addr_num = reserved_num; 2113 *add_addr_num = add_num; 2114 *rm_addr_num = rm_num; 2115 } 2116 2117 static int 2118 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2119 struct rte_ether_addr *mc_addr_set, 2120 uint32_t nb_mc_addr) 2121 { 2122 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2123 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2124 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2125 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2126 struct rte_ether_addr *addr; 2127 int reserved_addr_num; 2128 int add_addr_num; 2129 int rm_addr_num; 2130 int mc_addr_num; 2131 int num; 2132 int ret; 2133 int i; 2134 2135 /* Check if input parameters are valid */ 2136 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2137 if (ret) 2138 return ret; 2139 2140 rte_spinlock_lock(&hw->lock); 2141 2142 /* 2143 * Calculate the mc mac address lists those should be removed and be 2144 * added, Reorder the mc mac address list maintained by driver. 2145 */ 2146 mc_addr_num = (int)nb_mc_addr; 2147 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2148 reserved_addr_list, &reserved_addr_num, 2149 add_addr_list, &add_addr_num, 2150 rm_addr_list, &rm_addr_num); 2151 2152 /* Remove mc mac addresses */ 2153 for (i = 0; i < rm_addr_num; i++) { 2154 num = rm_addr_num - i - 1; 2155 addr = &rm_addr_list[num]; 2156 ret = hns3_remove_mc_addr(hw, addr); 2157 if (ret) { 2158 rte_spinlock_unlock(&hw->lock); 2159 return ret; 2160 } 2161 hw->mc_addrs_num--; 2162 } 2163 2164 /* Add mc mac addresses */ 2165 for (i = 0; i < add_addr_num; i++) { 2166 addr = &add_addr_list[i]; 2167 ret = hns3_add_mc_addr(hw, addr); 2168 if (ret) { 2169 rte_spinlock_unlock(&hw->lock); 2170 return ret; 2171 } 2172 2173 num = reserved_addr_num + i; 2174 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2175 hw->mc_addrs_num++; 2176 } 2177 rte_spinlock_unlock(&hw->lock); 2178 2179 return 0; 2180 } 2181 2182 static int 2183 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2184 { 2185 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2186 struct hns3_hw *hw = &hns->hw; 2187 struct rte_ether_addr *addr; 2188 int err = 0; 2189 int ret; 2190 int i; 2191 2192 for (i = 0; i < hw->mc_addrs_num; i++) { 2193 addr = &hw->mc_addrs[i]; 2194 if (!rte_is_multicast_ether_addr(addr)) 2195 continue; 2196 if (del) 2197 ret = hns3_remove_mc_addr(hw, addr); 2198 else 2199 ret = hns3_add_mc_addr(hw, addr); 2200 if (ret) { 2201 err = ret; 2202 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2203 addr); 2204 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2205 del ? "Remove" : "Restore", mac_str, ret); 2206 } 2207 } 2208 return err; 2209 } 2210 2211 static int 2212 hns3_check_mq_mode(struct rte_eth_dev *dev) 2213 { 2214 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2215 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2216 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2217 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2218 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2219 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2220 uint8_t num_tc; 2221 int max_tc = 0; 2222 int i; 2223 2224 if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) || 2225 (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB || 2226 tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) { 2227 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 2228 rx_mq_mode, tx_mq_mode); 2229 return -EOPNOTSUPP; 2230 } 2231 2232 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2233 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2234 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { 2235 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2236 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2237 dcb_rx_conf->nb_tcs, pf->tc_max); 2238 return -EINVAL; 2239 } 2240 2241 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2242 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2243 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2244 "nb_tcs(%d) != %d or %d in rx direction.", 2245 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2246 return -EINVAL; 2247 } 2248 2249 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2250 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2251 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2252 return -EINVAL; 2253 } 2254 2255 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2256 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2257 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2258 "is not equal to one in tx direction.", 2259 i, dcb_rx_conf->dcb_tc[i]); 2260 return -EINVAL; 2261 } 2262 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2263 max_tc = dcb_rx_conf->dcb_tc[i]; 2264 } 2265 2266 num_tc = max_tc + 1; 2267 if (num_tc > dcb_rx_conf->nb_tcs) { 2268 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2269 num_tc, dcb_rx_conf->nb_tcs); 2270 return -EINVAL; 2271 } 2272 } 2273 2274 return 0; 2275 } 2276 2277 static int 2278 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2279 enum hns3_ring_type queue_type, uint16_t queue_id) 2280 { 2281 struct hns3_cmd_desc desc; 2282 struct hns3_ctrl_vector_chain_cmd *req = 2283 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2284 enum hns3_opcode_type op; 2285 uint16_t tqp_type_and_id = 0; 2286 uint16_t type; 2287 uint16_t gl; 2288 int ret; 2289 2290 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2291 hns3_cmd_setup_basic_desc(&desc, op, false); 2292 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2293 HNS3_TQP_INT_ID_L_S); 2294 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2295 HNS3_TQP_INT_ID_H_S); 2296 2297 if (queue_type == HNS3_RING_TYPE_RX) 2298 gl = HNS3_RING_GL_RX; 2299 else 2300 gl = HNS3_RING_GL_TX; 2301 2302 type = queue_type; 2303 2304 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2305 type); 2306 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2307 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2308 gl); 2309 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2310 req->int_cause_num = 1; 2311 ret = hns3_cmd_send(hw, &desc, 1); 2312 if (ret) { 2313 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2314 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2315 return ret; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static int 2322 hns3_init_ring_with_vector(struct hns3_hw *hw) 2323 { 2324 uint16_t vec; 2325 int ret; 2326 int i; 2327 2328 /* 2329 * In hns3 network engine, vector 0 is always the misc interrupt of this 2330 * function, vector 1~N can be used respectively for the queues of the 2331 * function. Tx and Rx queues with the same number share the interrupt 2332 * vector. In the initialization clearing the all hardware mapping 2333 * relationship configurations between queues and interrupt vectors is 2334 * needed, so some error caused by the residual configurations, such as 2335 * the unexpected Tx interrupt, can be avoid. 2336 */ 2337 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2338 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2339 vec = vec - 1; /* the last interrupt is reserved */ 2340 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2341 for (i = 0; i < hw->intr_tqps_num; i++) { 2342 /* 2343 * Set gap limiter/rate limiter/quanity limiter algorithm 2344 * configuration for interrupt coalesce of queue's interrupt. 2345 */ 2346 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2347 HNS3_TQP_INTR_GL_DEFAULT); 2348 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2349 HNS3_TQP_INTR_GL_DEFAULT); 2350 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2351 /* 2352 * QL(quantity limiter) is not used currently, just set 0 to 2353 * close it. 2354 */ 2355 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2356 2357 ret = hns3_bind_ring_with_vector(hw, vec, false, 2358 HNS3_RING_TYPE_TX, i); 2359 if (ret) { 2360 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2361 "vector: %u, ret=%d", i, vec, ret); 2362 return ret; 2363 } 2364 2365 ret = hns3_bind_ring_with_vector(hw, vec, false, 2366 HNS3_RING_TYPE_RX, i); 2367 if (ret) { 2368 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2369 "vector: %u, ret=%d", i, vec, ret); 2370 return ret; 2371 } 2372 } 2373 2374 return 0; 2375 } 2376 2377 static int 2378 hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf) 2379 { 2380 struct hns3_adapter *hns = dev->data->dev_private; 2381 struct hns3_hw *hw = &hns->hw; 2382 uint32_t max_rx_pkt_len; 2383 uint16_t mtu; 2384 int ret; 2385 2386 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) 2387 return 0; 2388 2389 /* 2390 * If jumbo frames are enabled, MTU needs to be refreshed 2391 * according to the maximum RX packet length. 2392 */ 2393 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 2394 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 2395 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 2396 hns3_err(hw, "maximum Rx packet length must be greater than %u " 2397 "and no more than %u when jumbo frame enabled.", 2398 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 2399 (uint16_t)HNS3_MAX_FRAME_LEN); 2400 return -EINVAL; 2401 } 2402 2403 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 2404 ret = hns3_dev_mtu_set(dev, mtu); 2405 if (ret) 2406 return ret; 2407 dev->data->mtu = mtu; 2408 2409 return 0; 2410 } 2411 2412 static int 2413 hns3_setup_dcb(struct rte_eth_dev *dev) 2414 { 2415 struct hns3_adapter *hns = dev->data->dev_private; 2416 struct hns3_hw *hw = &hns->hw; 2417 int ret; 2418 2419 if (!hns3_dev_dcb_supported(hw)) { 2420 hns3_err(hw, "this port does not support dcb configurations."); 2421 return -EOPNOTSUPP; 2422 } 2423 2424 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2425 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2426 return -EOPNOTSUPP; 2427 } 2428 2429 ret = hns3_dcb_configure(hns); 2430 if (ret) 2431 hns3_err(hw, "failed to config dcb: %d", ret); 2432 2433 return ret; 2434 } 2435 2436 static int 2437 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2438 { 2439 int ret; 2440 2441 /* 2442 * Some hardware doesn't support auto-negotiation, but users may not 2443 * configure link_speeds (default 0), which means auto-negotiation. 2444 * In this case, a warning message need to be printed, instead of 2445 * an error. 2446 */ 2447 if (link_speeds == ETH_LINK_SPEED_AUTONEG && 2448 hw->mac.support_autoneg == 0) { 2449 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 2450 return 0; 2451 } 2452 2453 if (link_speeds != ETH_LINK_SPEED_AUTONEG) { 2454 ret = hns3_check_port_speed(hw, link_speeds); 2455 if (ret) 2456 return ret; 2457 } 2458 2459 return 0; 2460 } 2461 2462 static int 2463 hns3_check_dev_conf(struct rte_eth_dev *dev) 2464 { 2465 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2466 struct rte_eth_conf *conf = &dev->data->dev_conf; 2467 int ret; 2468 2469 ret = hns3_check_mq_mode(dev); 2470 if (ret) 2471 return ret; 2472 2473 return hns3_check_link_speed(hw, conf->link_speeds); 2474 } 2475 2476 static int 2477 hns3_dev_configure(struct rte_eth_dev *dev) 2478 { 2479 struct hns3_adapter *hns = dev->data->dev_private; 2480 struct rte_eth_conf *conf = &dev->data->dev_conf; 2481 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2482 struct hns3_hw *hw = &hns->hw; 2483 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2484 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2485 struct rte_eth_rss_conf rss_conf; 2486 bool gro_en; 2487 int ret; 2488 2489 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2490 2491 /* 2492 * Some versions of hardware network engine does not support 2493 * individually enable/disable/reset the Tx or Rx queue. These devices 2494 * must enable/disable/reset Tx and Rx queues at the same time. When the 2495 * numbers of Tx queues allocated by upper applications are not equal to 2496 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2497 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2498 * work as usual. But these fake queues are imperceptible, and can not 2499 * be used by upper applications. 2500 */ 2501 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2502 if (ret) { 2503 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2504 hw->cfg_max_queues = 0; 2505 return ret; 2506 } 2507 2508 hw->adapter_state = HNS3_NIC_CONFIGURING; 2509 ret = hns3_check_dev_conf(dev); 2510 if (ret) 2511 goto cfg_err; 2512 2513 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2514 ret = hns3_setup_dcb(dev); 2515 if (ret) 2516 goto cfg_err; 2517 } 2518 2519 /* When RSS is not configured, redirect the packet queue 0 */ 2520 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2521 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2522 rss_conf = conf->rx_adv_conf.rss_conf; 2523 hw->rss_dis_flag = false; 2524 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2525 if (ret) 2526 goto cfg_err; 2527 } 2528 2529 ret = hns3_refresh_mtu(dev, conf); 2530 if (ret) 2531 goto cfg_err; 2532 2533 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2534 if (ret) 2535 goto cfg_err; 2536 2537 ret = hns3_dev_configure_vlan(dev); 2538 if (ret) 2539 goto cfg_err; 2540 2541 /* config hardware GRO */ 2542 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2543 ret = hns3_config_gro(hw, gro_en); 2544 if (ret) 2545 goto cfg_err; 2546 2547 hns3_init_rx_ptype_tble(dev); 2548 hw->adapter_state = HNS3_NIC_CONFIGURED; 2549 2550 return 0; 2551 2552 cfg_err: 2553 hw->cfg_max_queues = 0; 2554 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2555 hw->adapter_state = HNS3_NIC_INITIALIZED; 2556 2557 return ret; 2558 } 2559 2560 static int 2561 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2562 { 2563 struct hns3_config_max_frm_size_cmd *req; 2564 struct hns3_cmd_desc desc; 2565 2566 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2567 2568 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2569 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2570 req->min_frm_size = RTE_ETHER_MIN_LEN; 2571 2572 return hns3_cmd_send(hw, &desc, 1); 2573 } 2574 2575 static int 2576 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2577 { 2578 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2579 uint16_t original_mps = hns->pf.mps; 2580 int err; 2581 int ret; 2582 2583 ret = hns3_set_mac_mtu(hw, mps); 2584 if (ret) { 2585 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2586 return ret; 2587 } 2588 2589 hns->pf.mps = mps; 2590 ret = hns3_buffer_alloc(hw); 2591 if (ret) { 2592 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2593 goto rollback; 2594 } 2595 2596 return 0; 2597 2598 rollback: 2599 err = hns3_set_mac_mtu(hw, original_mps); 2600 if (err) { 2601 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2602 return ret; 2603 } 2604 hns->pf.mps = original_mps; 2605 2606 return ret; 2607 } 2608 2609 static int 2610 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2611 { 2612 struct hns3_adapter *hns = dev->data->dev_private; 2613 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2614 struct hns3_hw *hw = &hns->hw; 2615 bool is_jumbo_frame; 2616 int ret; 2617 2618 if (dev->data->dev_started) { 2619 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2620 "before configuration", dev->data->port_id); 2621 return -EBUSY; 2622 } 2623 2624 rte_spinlock_lock(&hw->lock); 2625 is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; 2626 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2627 2628 /* 2629 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2630 * assign to "uint16_t" type variable. 2631 */ 2632 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2633 if (ret) { 2634 rte_spinlock_unlock(&hw->lock); 2635 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2636 dev->data->port_id, mtu, ret); 2637 return ret; 2638 } 2639 2640 if (is_jumbo_frame) 2641 dev->data->dev_conf.rxmode.offloads |= 2642 DEV_RX_OFFLOAD_JUMBO_FRAME; 2643 else 2644 dev->data->dev_conf.rxmode.offloads &= 2645 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2646 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2647 rte_spinlock_unlock(&hw->lock); 2648 2649 return 0; 2650 } 2651 2652 static uint32_t 2653 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2654 { 2655 uint32_t speed_capa = 0; 2656 2657 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2658 speed_capa |= ETH_LINK_SPEED_10M_HD; 2659 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2660 speed_capa |= ETH_LINK_SPEED_10M; 2661 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2662 speed_capa |= ETH_LINK_SPEED_100M_HD; 2663 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2664 speed_capa |= ETH_LINK_SPEED_100M; 2665 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2666 speed_capa |= ETH_LINK_SPEED_1G; 2667 2668 return speed_capa; 2669 } 2670 2671 static uint32_t 2672 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2673 { 2674 uint32_t speed_capa = 0; 2675 2676 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2677 speed_capa |= ETH_LINK_SPEED_1G; 2678 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2679 speed_capa |= ETH_LINK_SPEED_10G; 2680 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2681 speed_capa |= ETH_LINK_SPEED_25G; 2682 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2683 speed_capa |= ETH_LINK_SPEED_40G; 2684 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2685 speed_capa |= ETH_LINK_SPEED_50G; 2686 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2687 speed_capa |= ETH_LINK_SPEED_100G; 2688 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2689 speed_capa |= ETH_LINK_SPEED_200G; 2690 2691 return speed_capa; 2692 } 2693 2694 static uint32_t 2695 hns3_get_speed_capa(struct hns3_hw *hw) 2696 { 2697 struct hns3_mac *mac = &hw->mac; 2698 uint32_t speed_capa; 2699 2700 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2701 speed_capa = 2702 hns3_get_copper_port_speed_capa(mac->supported_speed); 2703 else 2704 speed_capa = 2705 hns3_get_firber_port_speed_capa(mac->supported_speed); 2706 2707 if (mac->support_autoneg == 0) 2708 speed_capa |= ETH_LINK_SPEED_FIXED; 2709 2710 return speed_capa; 2711 } 2712 2713 int 2714 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2715 { 2716 struct hns3_adapter *hns = eth_dev->data->dev_private; 2717 struct hns3_hw *hw = &hns->hw; 2718 uint16_t queue_num = hw->tqps_num; 2719 2720 /* 2721 * In interrupt mode, 'max_rx_queues' is set based on the number of 2722 * MSI-X interrupt resources of the hardware. 2723 */ 2724 if (hw->data->dev_conf.intr_conf.rxq == 1) 2725 queue_num = hw->intr_tqps_num; 2726 2727 info->max_rx_queues = queue_num; 2728 info->max_tx_queues = hw->tqps_num; 2729 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2730 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2731 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2732 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2733 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2734 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2735 DEV_RX_OFFLOAD_TCP_CKSUM | 2736 DEV_RX_OFFLOAD_UDP_CKSUM | 2737 DEV_RX_OFFLOAD_SCTP_CKSUM | 2738 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2739 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2740 DEV_RX_OFFLOAD_KEEP_CRC | 2741 DEV_RX_OFFLOAD_SCATTER | 2742 DEV_RX_OFFLOAD_VLAN_STRIP | 2743 DEV_RX_OFFLOAD_VLAN_FILTER | 2744 DEV_RX_OFFLOAD_JUMBO_FRAME | 2745 DEV_RX_OFFLOAD_RSS_HASH | 2746 DEV_RX_OFFLOAD_TCP_LRO); 2747 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2748 DEV_TX_OFFLOAD_IPV4_CKSUM | 2749 DEV_TX_OFFLOAD_TCP_CKSUM | 2750 DEV_TX_OFFLOAD_UDP_CKSUM | 2751 DEV_TX_OFFLOAD_SCTP_CKSUM | 2752 DEV_TX_OFFLOAD_MULTI_SEGS | 2753 DEV_TX_OFFLOAD_TCP_TSO | 2754 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2755 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2756 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2757 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2758 hns3_txvlan_cap_get(hw)); 2759 2760 if (hns3_dev_outer_udp_cksum_supported(hw)) 2761 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 2762 2763 if (hns3_dev_indep_txrx_supported(hw)) 2764 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2765 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2766 2767 if (hns3_dev_ptp_supported(hw)) 2768 info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 2769 2770 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2771 .nb_max = HNS3_MAX_RING_DESC, 2772 .nb_min = HNS3_MIN_RING_DESC, 2773 .nb_align = HNS3_ALIGN_RING_DESC, 2774 }; 2775 2776 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2777 .nb_max = HNS3_MAX_RING_DESC, 2778 .nb_min = HNS3_MIN_RING_DESC, 2779 .nb_align = HNS3_ALIGN_RING_DESC, 2780 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2781 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2782 }; 2783 2784 info->speed_capa = hns3_get_speed_capa(hw); 2785 info->default_rxconf = (struct rte_eth_rxconf) { 2786 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2787 /* 2788 * If there are no available Rx buffer descriptors, incoming 2789 * packets are always dropped by hardware based on hns3 network 2790 * engine. 2791 */ 2792 .rx_drop_en = 1, 2793 .offloads = 0, 2794 }; 2795 info->default_txconf = (struct rte_eth_txconf) { 2796 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2797 .offloads = 0, 2798 }; 2799 2800 info->reta_size = hw->rss_ind_tbl_size; 2801 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2802 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2803 2804 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2805 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2806 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2807 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2808 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2809 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2810 2811 return 0; 2812 } 2813 2814 static int 2815 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2816 size_t fw_size) 2817 { 2818 struct hns3_adapter *hns = eth_dev->data->dev_private; 2819 struct hns3_hw *hw = &hns->hw; 2820 uint32_t version = hw->fw_version; 2821 int ret; 2822 2823 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2824 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2825 HNS3_FW_VERSION_BYTE3_S), 2826 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2827 HNS3_FW_VERSION_BYTE2_S), 2828 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2829 HNS3_FW_VERSION_BYTE1_S), 2830 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2831 HNS3_FW_VERSION_BYTE0_S)); 2832 if (ret < 0) 2833 return -EINVAL; 2834 2835 ret += 1; /* add the size of '\0' */ 2836 if (fw_size < (size_t)ret) 2837 return ret; 2838 else 2839 return 0; 2840 } 2841 2842 static int 2843 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2844 { 2845 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2846 int ret; 2847 2848 (void)hns3_update_link_status(hw); 2849 2850 ret = hns3_update_link_info(eth_dev); 2851 if (ret) 2852 hw->mac.link_status = ETH_LINK_DOWN; 2853 2854 return ret; 2855 } 2856 2857 static void 2858 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2859 struct rte_eth_link *new_link) 2860 { 2861 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2862 struct hns3_mac *mac = &hw->mac; 2863 2864 switch (mac->link_speed) { 2865 case ETH_SPEED_NUM_10M: 2866 case ETH_SPEED_NUM_100M: 2867 case ETH_SPEED_NUM_1G: 2868 case ETH_SPEED_NUM_10G: 2869 case ETH_SPEED_NUM_25G: 2870 case ETH_SPEED_NUM_40G: 2871 case ETH_SPEED_NUM_50G: 2872 case ETH_SPEED_NUM_100G: 2873 case ETH_SPEED_NUM_200G: 2874 if (mac->link_status) 2875 new_link->link_speed = mac->link_speed; 2876 break; 2877 default: 2878 if (mac->link_status) 2879 new_link->link_speed = ETH_SPEED_NUM_UNKNOWN; 2880 break; 2881 } 2882 2883 if (!mac->link_status) 2884 new_link->link_speed = ETH_SPEED_NUM_NONE; 2885 2886 new_link->link_duplex = mac->link_duplex; 2887 new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2888 new_link->link_autoneg = mac->link_autoneg; 2889 } 2890 2891 static int 2892 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2893 { 2894 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2895 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2896 2897 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2898 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2899 struct hns3_mac *mac = &hw->mac; 2900 struct rte_eth_link new_link; 2901 int ret; 2902 2903 /* When port is stopped, report link down. */ 2904 if (eth_dev->data->dev_started == 0) { 2905 new_link.link_autoneg = mac->link_autoneg; 2906 new_link.link_duplex = mac->link_duplex; 2907 new_link.link_speed = ETH_SPEED_NUM_NONE; 2908 new_link.link_status = ETH_LINK_DOWN; 2909 goto out; 2910 } 2911 2912 do { 2913 ret = hns3_update_port_link_info(eth_dev); 2914 if (ret) { 2915 hns3_err(hw, "failed to get port link info, ret = %d.", 2916 ret); 2917 break; 2918 } 2919 2920 if (!wait_to_complete || mac->link_status == ETH_LINK_UP) 2921 break; 2922 2923 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2924 } while (retry_cnt--); 2925 2926 memset(&new_link, 0, sizeof(new_link)); 2927 hns3_setup_linkstatus(eth_dev, &new_link); 2928 2929 out: 2930 return rte_eth_linkstatus_set(eth_dev, &new_link); 2931 } 2932 2933 static int 2934 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2935 { 2936 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2937 struct hns3_pf *pf = &hns->pf; 2938 2939 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2940 return -EINVAL; 2941 2942 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2943 2944 return 0; 2945 } 2946 2947 static int 2948 hns3_query_function_status(struct hns3_hw *hw) 2949 { 2950 #define HNS3_QUERY_MAX_CNT 10 2951 #define HNS3_QUERY_SLEEP_MSCOEND 1 2952 struct hns3_func_status_cmd *req; 2953 struct hns3_cmd_desc desc; 2954 int timeout = 0; 2955 int ret; 2956 2957 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2958 req = (struct hns3_func_status_cmd *)desc.data; 2959 2960 do { 2961 ret = hns3_cmd_send(hw, &desc, 1); 2962 if (ret) { 2963 PMD_INIT_LOG(ERR, "query function status failed %d", 2964 ret); 2965 return ret; 2966 } 2967 2968 /* Check pf reset is done */ 2969 if (req->pf_state) 2970 break; 2971 2972 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2973 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2974 2975 return hns3_parse_func_status(hw, req); 2976 } 2977 2978 static int 2979 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2980 { 2981 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2982 struct hns3_pf *pf = &hns->pf; 2983 2984 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2985 /* 2986 * The total_tqps_num obtained from firmware is maximum tqp 2987 * numbers of this port, which should be used for PF and VFs. 2988 * There is no need for pf to have so many tqp numbers in 2989 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2990 * coming from config file, is assigned to maximum queue number 2991 * for the PF of this port by user. So users can modify the 2992 * maximum queue number of PF according to their own application 2993 * scenarios, which is more flexible to use. In addition, many 2994 * memories can be saved due to allocating queue statistics 2995 * room according to the actual number of queues required. The 2996 * maximum queue number of PF for network engine with 2997 * revision_id greater than 0x30 is assigned by config file. 2998 */ 2999 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 3000 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 3001 "must be greater than 0.", 3002 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 3003 return -EINVAL; 3004 } 3005 3006 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3007 hw->total_tqps_num); 3008 } else { 3009 /* 3010 * Due to the limitation on the number of PF interrupts 3011 * available, the maximum queue number assigned to PF on 3012 * the network engine with revision_id 0x21 is 64. 3013 */ 3014 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 3015 HNS3_MAX_TQP_NUM_HIP08_PF); 3016 } 3017 3018 return 0; 3019 } 3020 3021 static int 3022 hns3_query_pf_resource(struct hns3_hw *hw) 3023 { 3024 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3025 struct hns3_pf *pf = &hns->pf; 3026 struct hns3_pf_res_cmd *req; 3027 struct hns3_cmd_desc desc; 3028 int ret; 3029 3030 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 3031 ret = hns3_cmd_send(hw, &desc, 1); 3032 if (ret) { 3033 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 3034 return ret; 3035 } 3036 3037 req = (struct hns3_pf_res_cmd *)desc.data; 3038 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 3039 rte_le_to_cpu_16(req->ext_tqp_num); 3040 ret = hns3_get_pf_max_tqp_num(hw); 3041 if (ret) 3042 return ret; 3043 3044 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 3045 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 3046 3047 if (req->tx_buf_size) 3048 pf->tx_buf_size = 3049 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 3050 else 3051 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 3052 3053 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 3054 3055 if (req->dv_buf_size) 3056 pf->dv_buf_size = 3057 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 3058 else 3059 pf->dv_buf_size = HNS3_DEFAULT_DV; 3060 3061 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 3062 3063 hw->num_msi = 3064 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 3065 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 3066 3067 return 0; 3068 } 3069 3070 static void 3071 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 3072 { 3073 struct hns3_cfg_param_cmd *req; 3074 uint64_t mac_addr_tmp_high; 3075 uint8_t ext_rss_size_max; 3076 uint64_t mac_addr_tmp; 3077 uint32_t i; 3078 3079 req = (struct hns3_cfg_param_cmd *)desc[0].data; 3080 3081 /* get the configuration */ 3082 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3083 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3084 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3085 HNS3_CFG_TQP_DESC_N_M, 3086 HNS3_CFG_TQP_DESC_N_S); 3087 3088 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3089 HNS3_CFG_PHY_ADDR_M, 3090 HNS3_CFG_PHY_ADDR_S); 3091 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3092 HNS3_CFG_MEDIA_TP_M, 3093 HNS3_CFG_MEDIA_TP_S); 3094 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3095 HNS3_CFG_RX_BUF_LEN_M, 3096 HNS3_CFG_RX_BUF_LEN_S); 3097 /* get mac address */ 3098 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3099 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3100 HNS3_CFG_MAC_ADDR_H_M, 3101 HNS3_CFG_MAC_ADDR_H_S); 3102 3103 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3104 3105 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3106 HNS3_CFG_DEFAULT_SPEED_M, 3107 HNS3_CFG_DEFAULT_SPEED_S); 3108 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3109 HNS3_CFG_RSS_SIZE_M, 3110 HNS3_CFG_RSS_SIZE_S); 3111 3112 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3113 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3114 3115 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3116 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3117 3118 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3119 HNS3_CFG_SPEED_ABILITY_M, 3120 HNS3_CFG_SPEED_ABILITY_S); 3121 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3122 HNS3_CFG_UMV_TBL_SPACE_M, 3123 HNS3_CFG_UMV_TBL_SPACE_S); 3124 if (!cfg->umv_space) 3125 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3126 3127 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3128 HNS3_CFG_EXT_RSS_SIZE_M, 3129 HNS3_CFG_EXT_RSS_SIZE_S); 3130 /* 3131 * Field ext_rss_size_max obtained from firmware will be more flexible 3132 * for future changes and expansions, which is an exponent of 2, instead 3133 * of reading out directly. If this field is not zero, hns3 PF PMD 3134 * driver uses it as rss_size_max under one TC. Device, whose revision 3135 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3136 * maximum number of queues supported under a TC through this field. 3137 */ 3138 if (ext_rss_size_max) 3139 cfg->rss_size_max = 1U << ext_rss_size_max; 3140 } 3141 3142 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3143 * @hw: pointer to struct hns3_hw 3144 * @hcfg: the config structure to be getted 3145 */ 3146 static int 3147 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3148 { 3149 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3150 struct hns3_cfg_param_cmd *req; 3151 uint32_t offset; 3152 uint32_t i; 3153 int ret; 3154 3155 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3156 offset = 0; 3157 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3158 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3159 true); 3160 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3161 i * HNS3_CFG_RD_LEN_BYTES); 3162 /* Len should be divided by 4 when send to hardware */ 3163 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3164 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3165 req->offset = rte_cpu_to_le_32(offset); 3166 } 3167 3168 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3169 if (ret) { 3170 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3171 return ret; 3172 } 3173 3174 hns3_parse_cfg(hcfg, desc); 3175 3176 return 0; 3177 } 3178 3179 static int 3180 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3181 { 3182 switch (speed_cmd) { 3183 case HNS3_CFG_SPEED_10M: 3184 *speed = ETH_SPEED_NUM_10M; 3185 break; 3186 case HNS3_CFG_SPEED_100M: 3187 *speed = ETH_SPEED_NUM_100M; 3188 break; 3189 case HNS3_CFG_SPEED_1G: 3190 *speed = ETH_SPEED_NUM_1G; 3191 break; 3192 case HNS3_CFG_SPEED_10G: 3193 *speed = ETH_SPEED_NUM_10G; 3194 break; 3195 case HNS3_CFG_SPEED_25G: 3196 *speed = ETH_SPEED_NUM_25G; 3197 break; 3198 case HNS3_CFG_SPEED_40G: 3199 *speed = ETH_SPEED_NUM_40G; 3200 break; 3201 case HNS3_CFG_SPEED_50G: 3202 *speed = ETH_SPEED_NUM_50G; 3203 break; 3204 case HNS3_CFG_SPEED_100G: 3205 *speed = ETH_SPEED_NUM_100G; 3206 break; 3207 case HNS3_CFG_SPEED_200G: 3208 *speed = ETH_SPEED_NUM_200G; 3209 break; 3210 default: 3211 return -EINVAL; 3212 } 3213 3214 return 0; 3215 } 3216 3217 static void 3218 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3219 { 3220 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3221 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3222 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3223 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3224 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3225 } 3226 3227 static void 3228 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3229 { 3230 struct hns3_dev_specs_0_cmd *req0; 3231 3232 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3233 3234 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3235 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3236 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3237 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3238 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3239 } 3240 3241 static int 3242 hns3_check_dev_specifications(struct hns3_hw *hw) 3243 { 3244 if (hw->rss_ind_tbl_size == 0 || 3245 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3246 hns3_err(hw, "the size of hash lookup table configured (%u)" 3247 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3248 HNS3_RSS_IND_TBL_SIZE_MAX); 3249 return -EINVAL; 3250 } 3251 3252 return 0; 3253 } 3254 3255 static int 3256 hns3_query_dev_specifications(struct hns3_hw *hw) 3257 { 3258 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3259 int ret; 3260 int i; 3261 3262 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3263 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3264 true); 3265 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3266 } 3267 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3268 3269 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3270 if (ret) 3271 return ret; 3272 3273 hns3_parse_dev_specifications(hw, desc); 3274 3275 return hns3_check_dev_specifications(hw); 3276 } 3277 3278 static int 3279 hns3_get_capability(struct hns3_hw *hw) 3280 { 3281 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3282 struct rte_pci_device *pci_dev; 3283 struct hns3_pf *pf = &hns->pf; 3284 struct rte_eth_dev *eth_dev; 3285 uint16_t device_id; 3286 uint8_t revision; 3287 int ret; 3288 3289 eth_dev = &rte_eth_devices[hw->data->port_id]; 3290 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3291 device_id = pci_dev->id.device_id; 3292 3293 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3294 device_id == HNS3_DEV_ID_50GE_RDMA || 3295 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3296 device_id == HNS3_DEV_ID_200G_RDMA) 3297 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3298 3299 /* Get PCI revision id */ 3300 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3301 HNS3_PCI_REVISION_ID); 3302 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3303 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3304 ret); 3305 return -EIO; 3306 } 3307 hw->revision = revision; 3308 3309 if (revision < PCI_REVISION_ID_HIP09_A) { 3310 hns3_set_default_dev_specifications(hw); 3311 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3312 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3313 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3314 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3315 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3316 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3317 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3318 hw->rss_info.ipv6_sctp_offload_supported = false; 3319 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3320 pf->support_multi_tc_pause = false; 3321 return 0; 3322 } 3323 3324 ret = hns3_query_dev_specifications(hw); 3325 if (ret) { 3326 PMD_INIT_LOG(ERR, 3327 "failed to query dev specifications, ret = %d", 3328 ret); 3329 return ret; 3330 } 3331 3332 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3333 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3334 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3335 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3336 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3337 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3338 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3339 hw->rss_info.ipv6_sctp_offload_supported = true; 3340 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3341 pf->support_multi_tc_pause = true; 3342 3343 return 0; 3344 } 3345 3346 static int 3347 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3348 { 3349 int ret; 3350 3351 switch (media_type) { 3352 case HNS3_MEDIA_TYPE_COPPER: 3353 if (!hns3_dev_copper_supported(hw)) { 3354 PMD_INIT_LOG(ERR, 3355 "Media type is copper, not supported."); 3356 ret = -EOPNOTSUPP; 3357 } else { 3358 ret = 0; 3359 } 3360 break; 3361 case HNS3_MEDIA_TYPE_FIBER: 3362 ret = 0; 3363 break; 3364 case HNS3_MEDIA_TYPE_BACKPLANE: 3365 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3366 ret = -EOPNOTSUPP; 3367 break; 3368 default: 3369 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3370 ret = -EINVAL; 3371 break; 3372 } 3373 3374 return ret; 3375 } 3376 3377 static int 3378 hns3_get_board_configuration(struct hns3_hw *hw) 3379 { 3380 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3381 struct hns3_pf *pf = &hns->pf; 3382 struct hns3_cfg cfg; 3383 int ret; 3384 3385 ret = hns3_get_board_cfg(hw, &cfg); 3386 if (ret) { 3387 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3388 return ret; 3389 } 3390 3391 ret = hns3_check_media_type(hw, cfg.media_type); 3392 if (ret) 3393 return ret; 3394 3395 hw->mac.media_type = cfg.media_type; 3396 hw->rss_size_max = cfg.rss_size_max; 3397 hw->rss_dis_flag = false; 3398 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3399 hw->mac.phy_addr = cfg.phy_addr; 3400 hw->mac.default_addr_setted = false; 3401 hw->num_tx_desc = cfg.tqp_desc_num; 3402 hw->num_rx_desc = cfg.tqp_desc_num; 3403 hw->dcb_info.num_pg = 1; 3404 hw->dcb_info.hw_pfc_map = 0; 3405 3406 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3407 if (ret) { 3408 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3409 cfg.default_speed, ret); 3410 return ret; 3411 } 3412 3413 pf->tc_max = cfg.tc_num; 3414 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3415 PMD_INIT_LOG(WARNING, 3416 "Get TC num(%u) from flash, set TC num to 1", 3417 pf->tc_max); 3418 pf->tc_max = 1; 3419 } 3420 3421 /* Dev does not support DCB */ 3422 if (!hns3_dev_dcb_supported(hw)) { 3423 pf->tc_max = 1; 3424 pf->pfc_max = 0; 3425 } else 3426 pf->pfc_max = pf->tc_max; 3427 3428 hw->dcb_info.num_tc = 1; 3429 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3430 hw->tqps_num / hw->dcb_info.num_tc); 3431 hns3_set_bit(hw->hw_tc_map, 0, 1); 3432 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3433 3434 pf->wanted_umv_size = cfg.umv_space; 3435 3436 return ret; 3437 } 3438 3439 static int 3440 hns3_get_configuration(struct hns3_hw *hw) 3441 { 3442 int ret; 3443 3444 ret = hns3_query_function_status(hw); 3445 if (ret) { 3446 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3447 return ret; 3448 } 3449 3450 /* Get device capability */ 3451 ret = hns3_get_capability(hw); 3452 if (ret) { 3453 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3454 return ret; 3455 } 3456 3457 /* Get pf resource */ 3458 ret = hns3_query_pf_resource(hw); 3459 if (ret) { 3460 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3461 return ret; 3462 } 3463 3464 ret = hns3_get_board_configuration(hw); 3465 if (ret) { 3466 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3467 return ret; 3468 } 3469 3470 ret = hns3_query_dev_fec_info(hw); 3471 if (ret) 3472 PMD_INIT_LOG(ERR, 3473 "failed to query FEC information, ret = %d", ret); 3474 3475 return ret; 3476 } 3477 3478 static int 3479 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3480 uint16_t tqp_vid, bool is_pf) 3481 { 3482 struct hns3_tqp_map_cmd *req; 3483 struct hns3_cmd_desc desc; 3484 int ret; 3485 3486 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3487 3488 req = (struct hns3_tqp_map_cmd *)desc.data; 3489 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3490 req->tqp_vf = func_id; 3491 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3492 if (!is_pf) 3493 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3494 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3495 3496 ret = hns3_cmd_send(hw, &desc, 1); 3497 if (ret) 3498 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3499 3500 return ret; 3501 } 3502 3503 static int 3504 hns3_map_tqp(struct hns3_hw *hw) 3505 { 3506 int ret; 3507 int i; 3508 3509 /* 3510 * In current version, VF is not supported when PF is driven by DPDK 3511 * driver, so we assign total tqps_num tqps allocated to this port 3512 * to PF. 3513 */ 3514 for (i = 0; i < hw->total_tqps_num; i++) { 3515 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3516 if (ret) 3517 return ret; 3518 } 3519 3520 return 0; 3521 } 3522 3523 static int 3524 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3525 { 3526 struct hns3_config_mac_speed_dup_cmd *req; 3527 struct hns3_cmd_desc desc; 3528 int ret; 3529 3530 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3531 3532 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3533 3534 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3535 3536 switch (speed) { 3537 case ETH_SPEED_NUM_10M: 3538 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3539 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3540 break; 3541 case ETH_SPEED_NUM_100M: 3542 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3543 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3544 break; 3545 case ETH_SPEED_NUM_1G: 3546 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3547 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3548 break; 3549 case ETH_SPEED_NUM_10G: 3550 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3551 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3552 break; 3553 case ETH_SPEED_NUM_25G: 3554 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3555 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3556 break; 3557 case ETH_SPEED_NUM_40G: 3558 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3559 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3560 break; 3561 case ETH_SPEED_NUM_50G: 3562 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3563 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3564 break; 3565 case ETH_SPEED_NUM_100G: 3566 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3567 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3568 break; 3569 case ETH_SPEED_NUM_200G: 3570 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3571 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3572 break; 3573 default: 3574 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3575 return -EINVAL; 3576 } 3577 3578 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3579 3580 ret = hns3_cmd_send(hw, &desc, 1); 3581 if (ret) 3582 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3583 3584 return ret; 3585 } 3586 3587 static int 3588 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3589 { 3590 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3591 struct hns3_pf *pf = &hns->pf; 3592 struct hns3_priv_buf *priv; 3593 uint32_t i, total_size; 3594 3595 total_size = pf->pkt_buf_size; 3596 3597 /* alloc tx buffer for all enabled tc */ 3598 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3599 priv = &buf_alloc->priv_buf[i]; 3600 3601 if (hw->hw_tc_map & BIT(i)) { 3602 if (total_size < pf->tx_buf_size) 3603 return -ENOMEM; 3604 3605 priv->tx_buf_size = pf->tx_buf_size; 3606 } else 3607 priv->tx_buf_size = 0; 3608 3609 total_size -= priv->tx_buf_size; 3610 } 3611 3612 return 0; 3613 } 3614 3615 static int 3616 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3617 { 3618 /* TX buffer size is unit by 128 byte */ 3619 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3620 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3621 struct hns3_tx_buff_alloc_cmd *req; 3622 struct hns3_cmd_desc desc; 3623 uint32_t buf_size; 3624 uint32_t i; 3625 int ret; 3626 3627 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3628 3629 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3630 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3631 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3632 3633 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3634 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3635 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3636 } 3637 3638 ret = hns3_cmd_send(hw, &desc, 1); 3639 if (ret) 3640 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3641 3642 return ret; 3643 } 3644 3645 static int 3646 hns3_get_tc_num(struct hns3_hw *hw) 3647 { 3648 int cnt = 0; 3649 uint8_t i; 3650 3651 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3652 if (hw->hw_tc_map & BIT(i)) 3653 cnt++; 3654 return cnt; 3655 } 3656 3657 static uint32_t 3658 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3659 { 3660 struct hns3_priv_buf *priv; 3661 uint32_t rx_priv = 0; 3662 int i; 3663 3664 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3665 priv = &buf_alloc->priv_buf[i]; 3666 if (priv->enable) 3667 rx_priv += priv->buf_size; 3668 } 3669 return rx_priv; 3670 } 3671 3672 static uint32_t 3673 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3674 { 3675 uint32_t total_tx_size = 0; 3676 uint32_t i; 3677 3678 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3679 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3680 3681 return total_tx_size; 3682 } 3683 3684 /* Get the number of pfc enabled TCs, which have private buffer */ 3685 static int 3686 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3687 { 3688 struct hns3_priv_buf *priv; 3689 int cnt = 0; 3690 uint8_t i; 3691 3692 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3693 priv = &buf_alloc->priv_buf[i]; 3694 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3695 cnt++; 3696 } 3697 3698 return cnt; 3699 } 3700 3701 /* Get the number of pfc disabled TCs, which have private buffer */ 3702 static int 3703 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3704 struct hns3_pkt_buf_alloc *buf_alloc) 3705 { 3706 struct hns3_priv_buf *priv; 3707 int cnt = 0; 3708 uint8_t i; 3709 3710 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3711 priv = &buf_alloc->priv_buf[i]; 3712 if (hw->hw_tc_map & BIT(i) && 3713 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3714 cnt++; 3715 } 3716 3717 return cnt; 3718 } 3719 3720 static bool 3721 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3722 uint32_t rx_all) 3723 { 3724 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3725 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3726 struct hns3_pf *pf = &hns->pf; 3727 uint32_t shared_buf, aligned_mps; 3728 uint32_t rx_priv; 3729 uint8_t tc_num; 3730 uint8_t i; 3731 3732 tc_num = hns3_get_tc_num(hw); 3733 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3734 3735 if (hns3_dev_dcb_supported(hw)) 3736 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3737 pf->dv_buf_size; 3738 else 3739 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3740 + pf->dv_buf_size; 3741 3742 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3743 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3744 HNS3_BUF_SIZE_UNIT); 3745 3746 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3747 if (rx_all < rx_priv + shared_std) 3748 return false; 3749 3750 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3751 buf_alloc->s_buf.buf_size = shared_buf; 3752 if (hns3_dev_dcb_supported(hw)) { 3753 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3754 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3755 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3756 HNS3_BUF_SIZE_UNIT); 3757 } else { 3758 buf_alloc->s_buf.self.high = 3759 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3760 buf_alloc->s_buf.self.low = aligned_mps; 3761 } 3762 3763 if (hns3_dev_dcb_supported(hw)) { 3764 hi_thrd = shared_buf - pf->dv_buf_size; 3765 3766 if (tc_num <= NEED_RESERVE_TC_NUM) 3767 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3768 BUF_MAX_PERCENT; 3769 3770 if (tc_num) 3771 hi_thrd = hi_thrd / tc_num; 3772 3773 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3774 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3775 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3776 } else { 3777 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3778 lo_thrd = aligned_mps; 3779 } 3780 3781 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3782 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3783 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3784 } 3785 3786 return true; 3787 } 3788 3789 static bool 3790 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3791 struct hns3_pkt_buf_alloc *buf_alloc) 3792 { 3793 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3794 struct hns3_pf *pf = &hns->pf; 3795 struct hns3_priv_buf *priv; 3796 uint32_t aligned_mps; 3797 uint32_t rx_all; 3798 uint8_t i; 3799 3800 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3801 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3802 3803 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3804 priv = &buf_alloc->priv_buf[i]; 3805 3806 priv->enable = 0; 3807 priv->wl.low = 0; 3808 priv->wl.high = 0; 3809 priv->buf_size = 0; 3810 3811 if (!(hw->hw_tc_map & BIT(i))) 3812 continue; 3813 3814 priv->enable = 1; 3815 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3816 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3817 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3818 HNS3_BUF_SIZE_UNIT); 3819 } else { 3820 priv->wl.low = 0; 3821 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3822 aligned_mps; 3823 } 3824 3825 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3826 } 3827 3828 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3829 } 3830 3831 static bool 3832 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3833 struct hns3_pkt_buf_alloc *buf_alloc) 3834 { 3835 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3836 struct hns3_pf *pf = &hns->pf; 3837 struct hns3_priv_buf *priv; 3838 int no_pfc_priv_num; 3839 uint32_t rx_all; 3840 uint8_t mask; 3841 int i; 3842 3843 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3844 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3845 3846 /* let the last to be cleared first */ 3847 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3848 priv = &buf_alloc->priv_buf[i]; 3849 mask = BIT((uint8_t)i); 3850 if (hw->hw_tc_map & mask && 3851 !(hw->dcb_info.hw_pfc_map & mask)) { 3852 /* Clear the no pfc TC private buffer */ 3853 priv->wl.low = 0; 3854 priv->wl.high = 0; 3855 priv->buf_size = 0; 3856 priv->enable = 0; 3857 no_pfc_priv_num--; 3858 } 3859 3860 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3861 no_pfc_priv_num == 0) 3862 break; 3863 } 3864 3865 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3866 } 3867 3868 static bool 3869 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3870 struct hns3_pkt_buf_alloc *buf_alloc) 3871 { 3872 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3873 struct hns3_pf *pf = &hns->pf; 3874 struct hns3_priv_buf *priv; 3875 uint32_t rx_all; 3876 int pfc_priv_num; 3877 uint8_t mask; 3878 int i; 3879 3880 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3881 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3882 3883 /* let the last to be cleared first */ 3884 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3885 priv = &buf_alloc->priv_buf[i]; 3886 mask = BIT((uint8_t)i); 3887 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3888 /* Reduce the number of pfc TC with private buffer */ 3889 priv->wl.low = 0; 3890 priv->enable = 0; 3891 priv->wl.high = 0; 3892 priv->buf_size = 0; 3893 pfc_priv_num--; 3894 } 3895 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3896 pfc_priv_num == 0) 3897 break; 3898 } 3899 3900 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3901 } 3902 3903 static bool 3904 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3905 struct hns3_pkt_buf_alloc *buf_alloc) 3906 { 3907 #define COMPENSATE_BUFFER 0x3C00 3908 #define COMPENSATE_HALF_MPS_NUM 5 3909 #define PRIV_WL_GAP 0x1800 3910 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3911 struct hns3_pf *pf = &hns->pf; 3912 uint32_t tc_num = hns3_get_tc_num(hw); 3913 uint32_t half_mps = pf->mps >> 1; 3914 struct hns3_priv_buf *priv; 3915 uint32_t min_rx_priv; 3916 uint32_t rx_priv; 3917 uint8_t i; 3918 3919 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3920 if (tc_num) 3921 rx_priv = rx_priv / tc_num; 3922 3923 if (tc_num <= NEED_RESERVE_TC_NUM) 3924 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3925 3926 /* 3927 * Minimum value of private buffer in rx direction (min_rx_priv) is 3928 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3929 * buffer if rx_priv is greater than min_rx_priv. 3930 */ 3931 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3932 COMPENSATE_HALF_MPS_NUM * half_mps; 3933 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3934 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3935 if (rx_priv < min_rx_priv) 3936 return false; 3937 3938 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3939 priv = &buf_alloc->priv_buf[i]; 3940 priv->enable = 0; 3941 priv->wl.low = 0; 3942 priv->wl.high = 0; 3943 priv->buf_size = 0; 3944 3945 if (!(hw->hw_tc_map & BIT(i))) 3946 continue; 3947 3948 priv->enable = 1; 3949 priv->buf_size = rx_priv; 3950 priv->wl.high = rx_priv - pf->dv_buf_size; 3951 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3952 } 3953 3954 buf_alloc->s_buf.buf_size = 0; 3955 3956 return true; 3957 } 3958 3959 /* 3960 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3961 * @hw: pointer to struct hns3_hw 3962 * @buf_alloc: pointer to buffer calculation data 3963 * @return: 0: calculate sucessful, negative: fail 3964 */ 3965 static int 3966 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3967 { 3968 /* When DCB is not supported, rx private buffer is not allocated. */ 3969 if (!hns3_dev_dcb_supported(hw)) { 3970 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3971 struct hns3_pf *pf = &hns->pf; 3972 uint32_t rx_all = pf->pkt_buf_size; 3973 3974 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3975 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3976 return -ENOMEM; 3977 3978 return 0; 3979 } 3980 3981 /* 3982 * Try to allocate privated packet buffer for all TCs without share 3983 * buffer. 3984 */ 3985 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3986 return 0; 3987 3988 /* 3989 * Try to allocate privated packet buffer for all TCs with share 3990 * buffer. 3991 */ 3992 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3993 return 0; 3994 3995 /* 3996 * For different application scenes, the enabled port number, TC number 3997 * and no_drop TC number are different. In order to obtain the better 3998 * performance, software could allocate the buffer size and configure 3999 * the waterline by trying to decrease the private buffer size according 4000 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 4001 * enabled tc. 4002 */ 4003 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 4004 return 0; 4005 4006 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 4007 return 0; 4008 4009 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 4010 return 0; 4011 4012 return -ENOMEM; 4013 } 4014 4015 static int 4016 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4017 { 4018 struct hns3_rx_priv_buff_cmd *req; 4019 struct hns3_cmd_desc desc; 4020 uint32_t buf_size; 4021 int ret; 4022 int i; 4023 4024 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 4025 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 4026 4027 /* Alloc private buffer TCs */ 4028 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 4029 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 4030 4031 req->buf_num[i] = 4032 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 4033 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 4034 } 4035 4036 buf_size = buf_alloc->s_buf.buf_size; 4037 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 4038 (1 << HNS3_TC0_PRI_BUF_EN_B)); 4039 4040 ret = hns3_cmd_send(hw, &desc, 1); 4041 if (ret) 4042 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 4043 4044 return ret; 4045 } 4046 4047 static int 4048 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4049 { 4050 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 4051 struct hns3_rx_priv_wl_buf *req; 4052 struct hns3_priv_buf *priv; 4053 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 4054 int i, j; 4055 int ret; 4056 4057 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 4058 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 4059 false); 4060 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 4061 4062 /* The first descriptor set the NEXT bit to 1 */ 4063 if (i == 0) 4064 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4065 else 4066 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4067 4068 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4069 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 4070 4071 priv = &buf_alloc->priv_buf[idx]; 4072 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 4073 HNS3_BUF_UNIT_S); 4074 req->tc_wl[j].high |= 4075 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4076 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 4077 HNS3_BUF_UNIT_S); 4078 req->tc_wl[j].low |= 4079 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4080 } 4081 } 4082 4083 /* Send 2 descriptor at one time */ 4084 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4085 if (ret) 4086 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4087 ret); 4088 return ret; 4089 } 4090 4091 static int 4092 hns3_common_thrd_config(struct hns3_hw *hw, 4093 struct hns3_pkt_buf_alloc *buf_alloc) 4094 { 4095 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4096 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4097 struct hns3_rx_com_thrd *req; 4098 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4099 struct hns3_tc_thrd *tc; 4100 int tc_idx; 4101 int i, j; 4102 int ret; 4103 4104 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4105 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4106 false); 4107 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4108 4109 /* The first descriptor set the NEXT bit to 1 */ 4110 if (i == 0) 4111 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4112 else 4113 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4114 4115 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4116 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4117 tc = &s_buf->tc_thrd[tc_idx]; 4118 4119 req->com_thrd[j].high = 4120 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4121 req->com_thrd[j].high |= 4122 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4123 req->com_thrd[j].low = 4124 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4125 req->com_thrd[j].low |= 4126 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4127 } 4128 } 4129 4130 /* Send 2 descriptors at one time */ 4131 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4132 if (ret) 4133 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4134 4135 return ret; 4136 } 4137 4138 static int 4139 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4140 { 4141 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4142 struct hns3_rx_com_wl *req; 4143 struct hns3_cmd_desc desc; 4144 int ret; 4145 4146 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4147 4148 req = (struct hns3_rx_com_wl *)desc.data; 4149 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4150 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4151 4152 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4153 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4154 4155 ret = hns3_cmd_send(hw, &desc, 1); 4156 if (ret) 4157 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4158 4159 return ret; 4160 } 4161 4162 int 4163 hns3_buffer_alloc(struct hns3_hw *hw) 4164 { 4165 struct hns3_pkt_buf_alloc pkt_buf; 4166 int ret; 4167 4168 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4169 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4170 if (ret) { 4171 PMD_INIT_LOG(ERR, 4172 "could not calc tx buffer size for all TCs %d", 4173 ret); 4174 return ret; 4175 } 4176 4177 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4178 if (ret) { 4179 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4180 return ret; 4181 } 4182 4183 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4184 if (ret) { 4185 PMD_INIT_LOG(ERR, 4186 "could not calc rx priv buffer size for all TCs %d", 4187 ret); 4188 return ret; 4189 } 4190 4191 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4192 if (ret) { 4193 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4194 return ret; 4195 } 4196 4197 if (hns3_dev_dcb_supported(hw)) { 4198 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4199 if (ret) { 4200 PMD_INIT_LOG(ERR, 4201 "could not configure rx private waterline %d", 4202 ret); 4203 return ret; 4204 } 4205 4206 ret = hns3_common_thrd_config(hw, &pkt_buf); 4207 if (ret) { 4208 PMD_INIT_LOG(ERR, 4209 "could not configure common threshold %d", 4210 ret); 4211 return ret; 4212 } 4213 } 4214 4215 ret = hns3_common_wl_config(hw, &pkt_buf); 4216 if (ret) 4217 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4218 ret); 4219 4220 return ret; 4221 } 4222 4223 static int 4224 hns3_mac_init(struct hns3_hw *hw) 4225 { 4226 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4227 struct hns3_mac *mac = &hw->mac; 4228 struct hns3_pf *pf = &hns->pf; 4229 int ret; 4230 4231 pf->support_sfp_query = true; 4232 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 4233 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4234 if (ret) { 4235 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4236 return ret; 4237 } 4238 4239 mac->link_status = ETH_LINK_DOWN; 4240 4241 return hns3_config_mtu(hw, pf->mps); 4242 } 4243 4244 static int 4245 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4246 { 4247 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4248 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4249 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4250 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4251 int return_status; 4252 4253 if (cmdq_resp) { 4254 PMD_INIT_LOG(ERR, 4255 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4256 cmdq_resp); 4257 return -EIO; 4258 } 4259 4260 switch (resp_code) { 4261 case HNS3_ETHERTYPE_SUCCESS_ADD: 4262 case HNS3_ETHERTYPE_ALREADY_ADD: 4263 return_status = 0; 4264 break; 4265 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4266 PMD_INIT_LOG(ERR, 4267 "add mac ethertype failed for manager table overflow."); 4268 return_status = -EIO; 4269 break; 4270 case HNS3_ETHERTYPE_KEY_CONFLICT: 4271 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4272 return_status = -EIO; 4273 break; 4274 default: 4275 PMD_INIT_LOG(ERR, 4276 "add mac ethertype failed for undefined, code=%u.", 4277 resp_code); 4278 return_status = -EIO; 4279 break; 4280 } 4281 4282 return return_status; 4283 } 4284 4285 static int 4286 hns3_add_mgr_tbl(struct hns3_hw *hw, 4287 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4288 { 4289 struct hns3_cmd_desc desc; 4290 uint8_t resp_code; 4291 uint16_t retval; 4292 int ret; 4293 4294 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4295 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4296 4297 ret = hns3_cmd_send(hw, &desc, 1); 4298 if (ret) { 4299 PMD_INIT_LOG(ERR, 4300 "add mac ethertype failed for cmd_send, ret =%d.", 4301 ret); 4302 return ret; 4303 } 4304 4305 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4306 retval = rte_le_to_cpu_16(desc.retval); 4307 4308 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4309 } 4310 4311 static void 4312 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4313 int *table_item_num) 4314 { 4315 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4316 4317 /* 4318 * In current version, we add one item in management table as below: 4319 * 0x0180C200000E -- LLDP MC address 4320 */ 4321 tbl = mgr_table; 4322 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4323 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4324 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4325 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4326 tbl->i_port_bitmap = 0x1; 4327 *table_item_num = 1; 4328 } 4329 4330 static int 4331 hns3_init_mgr_tbl(struct hns3_hw *hw) 4332 { 4333 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4334 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4335 int table_item_num; 4336 int ret; 4337 int i; 4338 4339 memset(mgr_table, 0, sizeof(mgr_table)); 4340 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4341 for (i = 0; i < table_item_num; i++) { 4342 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4343 if (ret) { 4344 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4345 ret); 4346 return ret; 4347 } 4348 } 4349 4350 return 0; 4351 } 4352 4353 static void 4354 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4355 bool en_mc, bool en_bc, int vport_id) 4356 { 4357 if (!param) 4358 return; 4359 4360 memset(param, 0, sizeof(struct hns3_promisc_param)); 4361 if (en_uc) 4362 param->enable = HNS3_PROMISC_EN_UC; 4363 if (en_mc) 4364 param->enable |= HNS3_PROMISC_EN_MC; 4365 if (en_bc) 4366 param->enable |= HNS3_PROMISC_EN_BC; 4367 param->vf_id = vport_id; 4368 } 4369 4370 static int 4371 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4372 { 4373 struct hns3_promisc_cfg_cmd *req; 4374 struct hns3_cmd_desc desc; 4375 int ret; 4376 4377 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4378 4379 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4380 req->vf_id = param->vf_id; 4381 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4382 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4383 4384 ret = hns3_cmd_send(hw, &desc, 1); 4385 if (ret) 4386 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4387 4388 return ret; 4389 } 4390 4391 static int 4392 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4393 { 4394 struct hns3_promisc_param param; 4395 bool en_bc_pmc = true; 4396 uint8_t vf_id; 4397 4398 /* 4399 * In current version VF is not supported when PF is driven by DPDK 4400 * driver, just need to configure parameters for PF vport. 4401 */ 4402 vf_id = HNS3_PF_FUNC_ID; 4403 4404 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4405 return hns3_cmd_set_promisc_mode(hw, ¶m); 4406 } 4407 4408 static int 4409 hns3_promisc_init(struct hns3_hw *hw) 4410 { 4411 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4412 struct hns3_pf *pf = &hns->pf; 4413 struct hns3_promisc_param param; 4414 uint16_t func_id; 4415 int ret; 4416 4417 ret = hns3_set_promisc_mode(hw, false, false); 4418 if (ret) { 4419 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4420 return ret; 4421 } 4422 4423 /* 4424 * In current version VFs are not supported when PF is driven by DPDK 4425 * driver. After PF has been taken over by DPDK, the original VF will 4426 * be invalid. So, there is a possibility of entry residues. It should 4427 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4428 * during init. 4429 */ 4430 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4431 hns3_promisc_param_init(¶m, false, false, false, func_id); 4432 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4433 if (ret) { 4434 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4435 " ret = %d", func_id, ret); 4436 return ret; 4437 } 4438 } 4439 4440 return 0; 4441 } 4442 4443 static void 4444 hns3_promisc_uninit(struct hns3_hw *hw) 4445 { 4446 struct hns3_promisc_param param; 4447 uint16_t func_id; 4448 int ret; 4449 4450 func_id = HNS3_PF_FUNC_ID; 4451 4452 /* 4453 * In current version VFs are not supported when PF is driven by 4454 * DPDK driver, and VFs' promisc mode status has been cleared during 4455 * init and their status will not change. So just clear PF's promisc 4456 * mode status during uninit. 4457 */ 4458 hns3_promisc_param_init(¶m, false, false, false, func_id); 4459 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4460 if (ret) 4461 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4462 " uninit, ret = %d", ret); 4463 } 4464 4465 static int 4466 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4467 { 4468 bool allmulti = dev->data->all_multicast ? true : false; 4469 struct hns3_adapter *hns = dev->data->dev_private; 4470 struct hns3_hw *hw = &hns->hw; 4471 uint64_t offloads; 4472 int err; 4473 int ret; 4474 4475 rte_spinlock_lock(&hw->lock); 4476 ret = hns3_set_promisc_mode(hw, true, true); 4477 if (ret) { 4478 rte_spinlock_unlock(&hw->lock); 4479 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4480 ret); 4481 return ret; 4482 } 4483 4484 /* 4485 * When promiscuous mode was enabled, disable the vlan filter to let 4486 * all packets coming in in the receiving direction. 4487 */ 4488 offloads = dev->data->dev_conf.rxmode.offloads; 4489 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4490 ret = hns3_enable_vlan_filter(hns, false); 4491 if (ret) { 4492 hns3_err(hw, "failed to enable promiscuous mode due to " 4493 "failure to disable vlan filter, ret = %d", 4494 ret); 4495 err = hns3_set_promisc_mode(hw, false, allmulti); 4496 if (err) 4497 hns3_err(hw, "failed to restore promiscuous " 4498 "status after disable vlan filter " 4499 "failed during enabling promiscuous " 4500 "mode, ret = %d", ret); 4501 } 4502 } 4503 4504 rte_spinlock_unlock(&hw->lock); 4505 4506 return ret; 4507 } 4508 4509 static int 4510 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4511 { 4512 bool allmulti = dev->data->all_multicast ? true : false; 4513 struct hns3_adapter *hns = dev->data->dev_private; 4514 struct hns3_hw *hw = &hns->hw; 4515 uint64_t offloads; 4516 int err; 4517 int ret; 4518 4519 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4520 rte_spinlock_lock(&hw->lock); 4521 ret = hns3_set_promisc_mode(hw, false, allmulti); 4522 if (ret) { 4523 rte_spinlock_unlock(&hw->lock); 4524 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4525 ret); 4526 return ret; 4527 } 4528 /* when promiscuous mode was disabled, restore the vlan filter status */ 4529 offloads = dev->data->dev_conf.rxmode.offloads; 4530 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4531 ret = hns3_enable_vlan_filter(hns, true); 4532 if (ret) { 4533 hns3_err(hw, "failed to disable promiscuous mode due to" 4534 " failure to restore vlan filter, ret = %d", 4535 ret); 4536 err = hns3_set_promisc_mode(hw, true, true); 4537 if (err) 4538 hns3_err(hw, "failed to restore promiscuous " 4539 "status after enabling vlan filter " 4540 "failed during disabling promiscuous " 4541 "mode, ret = %d", ret); 4542 } 4543 } 4544 rte_spinlock_unlock(&hw->lock); 4545 4546 return ret; 4547 } 4548 4549 static int 4550 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4551 { 4552 struct hns3_adapter *hns = dev->data->dev_private; 4553 struct hns3_hw *hw = &hns->hw; 4554 int ret; 4555 4556 if (dev->data->promiscuous) 4557 return 0; 4558 4559 rte_spinlock_lock(&hw->lock); 4560 ret = hns3_set_promisc_mode(hw, false, true); 4561 rte_spinlock_unlock(&hw->lock); 4562 if (ret) 4563 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4564 ret); 4565 4566 return ret; 4567 } 4568 4569 static int 4570 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4571 { 4572 struct hns3_adapter *hns = dev->data->dev_private; 4573 struct hns3_hw *hw = &hns->hw; 4574 int ret; 4575 4576 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4577 if (dev->data->promiscuous) 4578 return 0; 4579 4580 rte_spinlock_lock(&hw->lock); 4581 ret = hns3_set_promisc_mode(hw, false, false); 4582 rte_spinlock_unlock(&hw->lock); 4583 if (ret) 4584 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4585 ret); 4586 4587 return ret; 4588 } 4589 4590 static int 4591 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4592 { 4593 struct hns3_hw *hw = &hns->hw; 4594 bool allmulti = hw->data->all_multicast ? true : false; 4595 int ret; 4596 4597 if (hw->data->promiscuous) { 4598 ret = hns3_set_promisc_mode(hw, true, true); 4599 if (ret) 4600 hns3_err(hw, "failed to restore promiscuous mode, " 4601 "ret = %d", ret); 4602 return ret; 4603 } 4604 4605 ret = hns3_set_promisc_mode(hw, false, allmulti); 4606 if (ret) 4607 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4608 ret); 4609 return ret; 4610 } 4611 4612 static int 4613 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4614 { 4615 struct hns3_sfp_info_cmd *resp; 4616 struct hns3_cmd_desc desc; 4617 int ret; 4618 4619 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4620 resp = (struct hns3_sfp_info_cmd *)desc.data; 4621 resp->query_type = HNS3_ACTIVE_QUERY; 4622 4623 ret = hns3_cmd_send(hw, &desc, 1); 4624 if (ret == -EOPNOTSUPP) { 4625 hns3_warn(hw, "firmware does not support get SFP info," 4626 " ret = %d.", ret); 4627 return ret; 4628 } else if (ret) { 4629 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4630 return ret; 4631 } 4632 4633 /* 4634 * In some case, the speed of MAC obtained from firmware may be 0, it 4635 * shouldn't be set to mac->speed. 4636 */ 4637 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4638 return 0; 4639 4640 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4641 /* 4642 * if resp->supported_speed is 0, it means it's an old version 4643 * firmware, do not update these params. 4644 */ 4645 if (resp->supported_speed) { 4646 mac_info->query_type = HNS3_ACTIVE_QUERY; 4647 mac_info->supported_speed = 4648 rte_le_to_cpu_32(resp->supported_speed); 4649 mac_info->support_autoneg = resp->autoneg_ability; 4650 mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED 4651 : ETH_LINK_AUTONEG; 4652 } else { 4653 mac_info->query_type = HNS3_DEFAULT_QUERY; 4654 } 4655 4656 return 0; 4657 } 4658 4659 static uint8_t 4660 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4661 { 4662 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4663 duplex = ETH_LINK_FULL_DUPLEX; 4664 4665 return duplex; 4666 } 4667 4668 static int 4669 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4670 { 4671 struct hns3_mac *mac = &hw->mac; 4672 int ret; 4673 4674 duplex = hns3_check_speed_dup(duplex, speed); 4675 if (mac->link_speed == speed && mac->link_duplex == duplex) 4676 return 0; 4677 4678 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4679 if (ret) 4680 return ret; 4681 4682 ret = hns3_port_shaper_update(hw, speed); 4683 if (ret) 4684 return ret; 4685 4686 mac->link_speed = speed; 4687 mac->link_duplex = duplex; 4688 4689 return 0; 4690 } 4691 4692 static int 4693 hns3_update_fiber_link_info(struct hns3_hw *hw) 4694 { 4695 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4696 struct hns3_mac *mac = &hw->mac; 4697 struct hns3_mac mac_info; 4698 int ret; 4699 4700 /* If firmware do not support get SFP/qSFP speed, return directly */ 4701 if (!pf->support_sfp_query) 4702 return 0; 4703 4704 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4705 ret = hns3_get_sfp_info(hw, &mac_info); 4706 if (ret == -EOPNOTSUPP) { 4707 pf->support_sfp_query = false; 4708 return ret; 4709 } else if (ret) 4710 return ret; 4711 4712 /* Do nothing if no SFP */ 4713 if (mac_info.link_speed == ETH_SPEED_NUM_NONE) 4714 return 0; 4715 4716 /* 4717 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4718 * to reconfigure the speed of MAC. Otherwise, it indicates 4719 * that the current firmware only supports to obtain the 4720 * speed of the SFP, and the speed of MAC needs to reconfigure. 4721 */ 4722 mac->query_type = mac_info.query_type; 4723 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4724 if (mac_info.link_speed != mac->link_speed) { 4725 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4726 if (ret) 4727 return ret; 4728 } 4729 4730 mac->link_speed = mac_info.link_speed; 4731 mac->supported_speed = mac_info.supported_speed; 4732 mac->support_autoneg = mac_info.support_autoneg; 4733 mac->link_autoneg = mac_info.link_autoneg; 4734 4735 return 0; 4736 } 4737 4738 /* Config full duplex for SFP */ 4739 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4740 ETH_LINK_FULL_DUPLEX); 4741 } 4742 4743 static void 4744 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4745 { 4746 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4747 4748 struct hns3_phy_params_bd0_cmd *req; 4749 uint32_t supported; 4750 4751 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4752 mac->link_speed = rte_le_to_cpu_32(req->speed); 4753 mac->link_duplex = hns3_get_bit(req->duplex, 4754 HNS3_PHY_DUPLEX_CFG_B); 4755 mac->link_autoneg = hns3_get_bit(req->autoneg, 4756 HNS3_PHY_AUTONEG_CFG_B); 4757 mac->advertising = rte_le_to_cpu_32(req->advertising); 4758 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4759 supported = rte_le_to_cpu_32(req->supported); 4760 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4761 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4762 } 4763 4764 static int 4765 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4766 { 4767 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4768 uint16_t i; 4769 int ret; 4770 4771 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4772 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4773 true); 4774 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4775 } 4776 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4777 4778 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4779 if (ret) { 4780 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4781 return ret; 4782 } 4783 4784 hns3_parse_copper_phy_params(desc, mac); 4785 4786 return 0; 4787 } 4788 4789 static int 4790 hns3_update_copper_link_info(struct hns3_hw *hw) 4791 { 4792 struct hns3_mac *mac = &hw->mac; 4793 struct hns3_mac mac_info; 4794 int ret; 4795 4796 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4797 ret = hns3_get_copper_phy_params(hw, &mac_info); 4798 if (ret) 4799 return ret; 4800 4801 if (mac_info.link_speed != mac->link_speed) { 4802 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4803 if (ret) 4804 return ret; 4805 } 4806 4807 mac->link_speed = mac_info.link_speed; 4808 mac->link_duplex = mac_info.link_duplex; 4809 mac->link_autoneg = mac_info.link_autoneg; 4810 mac->supported_speed = mac_info.supported_speed; 4811 mac->advertising = mac_info.advertising; 4812 mac->lp_advertising = mac_info.lp_advertising; 4813 mac->support_autoneg = mac_info.support_autoneg; 4814 4815 return 0; 4816 } 4817 4818 static int 4819 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4820 { 4821 struct hns3_adapter *hns = eth_dev->data->dev_private; 4822 struct hns3_hw *hw = &hns->hw; 4823 int ret = 0; 4824 4825 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4826 ret = hns3_update_copper_link_info(hw); 4827 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4828 ret = hns3_update_fiber_link_info(hw); 4829 4830 return ret; 4831 } 4832 4833 static int 4834 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4835 { 4836 struct hns3_config_mac_mode_cmd *req; 4837 struct hns3_cmd_desc desc; 4838 uint32_t loop_en = 0; 4839 uint8_t val = 0; 4840 int ret; 4841 4842 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4843 4844 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4845 if (enable) 4846 val = 1; 4847 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4848 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4849 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4850 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4851 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4852 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4853 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4854 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4855 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4856 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4857 4858 /* 4859 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4860 * when receiving frames. Otherwise, CRC will be stripped. 4861 */ 4862 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4863 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4864 else 4865 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4866 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4867 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4868 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4869 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4870 4871 ret = hns3_cmd_send(hw, &desc, 1); 4872 if (ret) 4873 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4874 4875 return ret; 4876 } 4877 4878 static int 4879 hns3_get_mac_link_status(struct hns3_hw *hw) 4880 { 4881 struct hns3_link_status_cmd *req; 4882 struct hns3_cmd_desc desc; 4883 int link_status; 4884 int ret; 4885 4886 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4887 ret = hns3_cmd_send(hw, &desc, 1); 4888 if (ret) { 4889 hns3_err(hw, "get link status cmd failed %d", ret); 4890 return ETH_LINK_DOWN; 4891 } 4892 4893 req = (struct hns3_link_status_cmd *)desc.data; 4894 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4895 4896 return !!link_status; 4897 } 4898 4899 static bool 4900 hns3_update_link_status(struct hns3_hw *hw) 4901 { 4902 int state; 4903 4904 state = hns3_get_mac_link_status(hw); 4905 if (state != hw->mac.link_status) { 4906 hw->mac.link_status = state; 4907 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4908 return true; 4909 } 4910 4911 return false; 4912 } 4913 4914 void 4915 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4916 { 4917 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4918 struct rte_eth_link new_link; 4919 int ret; 4920 4921 if (query) 4922 hns3_update_port_link_info(dev); 4923 4924 memset(&new_link, 0, sizeof(new_link)); 4925 hns3_setup_linkstatus(dev, &new_link); 4926 4927 ret = rte_eth_linkstatus_set(dev, &new_link); 4928 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4929 hns3_start_report_lse(dev); 4930 } 4931 4932 static void 4933 hns3_service_handler(void *param) 4934 { 4935 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4936 struct hns3_adapter *hns = eth_dev->data->dev_private; 4937 struct hns3_hw *hw = &hns->hw; 4938 4939 if (!hns3_is_reset_pending(hns)) 4940 hns3_update_linkstatus_and_event(hw, true); 4941 else 4942 hns3_warn(hw, "Cancel the query when reset is pending"); 4943 4944 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4945 } 4946 4947 static int 4948 hns3_init_hardware(struct hns3_adapter *hns) 4949 { 4950 struct hns3_hw *hw = &hns->hw; 4951 int ret; 4952 4953 ret = hns3_map_tqp(hw); 4954 if (ret) { 4955 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4956 return ret; 4957 } 4958 4959 ret = hns3_init_umv_space(hw); 4960 if (ret) { 4961 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4962 return ret; 4963 } 4964 4965 ret = hns3_mac_init(hw); 4966 if (ret) { 4967 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4968 goto err_mac_init; 4969 } 4970 4971 ret = hns3_init_mgr_tbl(hw); 4972 if (ret) { 4973 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4974 goto err_mac_init; 4975 } 4976 4977 ret = hns3_promisc_init(hw); 4978 if (ret) { 4979 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4980 ret); 4981 goto err_mac_init; 4982 } 4983 4984 ret = hns3_init_vlan_config(hns); 4985 if (ret) { 4986 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4987 goto err_mac_init; 4988 } 4989 4990 ret = hns3_dcb_init(hw); 4991 if (ret) { 4992 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4993 goto err_mac_init; 4994 } 4995 4996 ret = hns3_init_fd_config(hns); 4997 if (ret) { 4998 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4999 goto err_mac_init; 5000 } 5001 5002 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 5003 if (ret) { 5004 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 5005 goto err_mac_init; 5006 } 5007 5008 ret = hns3_config_gro(hw, false); 5009 if (ret) { 5010 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 5011 goto err_mac_init; 5012 } 5013 5014 /* 5015 * In the initialization clearing the all hardware mapping relationship 5016 * configurations between queues and interrupt vectors is needed, so 5017 * some error caused by the residual configurations, such as the 5018 * unexpected interrupt, can be avoid. 5019 */ 5020 ret = hns3_init_ring_with_vector(hw); 5021 if (ret) { 5022 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 5023 goto err_mac_init; 5024 } 5025 5026 return 0; 5027 5028 err_mac_init: 5029 hns3_uninit_umv_space(hw); 5030 return ret; 5031 } 5032 5033 static int 5034 hns3_clear_hw(struct hns3_hw *hw) 5035 { 5036 struct hns3_cmd_desc desc; 5037 int ret; 5038 5039 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 5040 5041 ret = hns3_cmd_send(hw, &desc, 1); 5042 if (ret && ret != -EOPNOTSUPP) 5043 return ret; 5044 5045 return 0; 5046 } 5047 5048 static void 5049 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 5050 { 5051 uint32_t val; 5052 5053 /* 5054 * The new firmware support report more hardware error types by 5055 * msix mode. These errors are defined as RAS errors in hardware 5056 * and belong to a different type from the MSI-x errors processed 5057 * by the network driver. 5058 * 5059 * Network driver should open the new error report on initialization. 5060 */ 5061 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5062 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 5063 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5064 } 5065 5066 static uint32_t 5067 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 5068 { 5069 struct hns3_mac *mac = &hw->mac; 5070 5071 switch (mac->link_speed) { 5072 case ETH_SPEED_NUM_1G: 5073 return HNS3_FIBER_LINK_SPEED_1G_BIT; 5074 case ETH_SPEED_NUM_10G: 5075 return HNS3_FIBER_LINK_SPEED_10G_BIT; 5076 case ETH_SPEED_NUM_25G: 5077 return HNS3_FIBER_LINK_SPEED_25G_BIT; 5078 case ETH_SPEED_NUM_40G: 5079 return HNS3_FIBER_LINK_SPEED_40G_BIT; 5080 case ETH_SPEED_NUM_50G: 5081 return HNS3_FIBER_LINK_SPEED_50G_BIT; 5082 case ETH_SPEED_NUM_100G: 5083 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5084 case ETH_SPEED_NUM_200G: 5085 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5086 default: 5087 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5088 return 0; 5089 } 5090 } 5091 5092 /* 5093 * Validity of supported_speed for firber and copper media type can be 5094 * guaranteed by the following policy: 5095 * Copper: 5096 * Although the initialization of the phy in the firmware may not be 5097 * completed, the firmware can guarantees that the supported_speed is 5098 * an valid value. 5099 * Firber: 5100 * If the version of firmware supports the acitive query way of the 5101 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5102 * through it. If unsupported, use the SFP's speed as the value of the 5103 * supported_speed. 5104 */ 5105 static int 5106 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5107 { 5108 struct hns3_adapter *hns = eth_dev->data->dev_private; 5109 struct hns3_hw *hw = &hns->hw; 5110 struct hns3_mac *mac = &hw->mac; 5111 int ret; 5112 5113 ret = hns3_update_link_info(eth_dev); 5114 if (ret) 5115 return ret; 5116 5117 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5118 /* 5119 * Some firmware does not support the report of supported_speed, 5120 * and only report the effective speed of SFP. In this case, it 5121 * is necessary to use the SFP's speed as the supported_speed. 5122 */ 5123 if (mac->supported_speed == 0) 5124 mac->supported_speed = 5125 hns3_set_firber_default_support_speed(hw); 5126 } 5127 5128 return 0; 5129 } 5130 5131 static void 5132 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5133 { 5134 struct hns3_mac *mac = &hns->hw.mac; 5135 5136 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5137 hns->pf.support_fc_autoneg = true; 5138 return; 5139 } 5140 5141 /* 5142 * Flow control auto-negotiation requires the cooperation of the driver 5143 * and firmware. Currently, the optical port does not support flow 5144 * control auto-negotiation. 5145 */ 5146 hns->pf.support_fc_autoneg = false; 5147 } 5148 5149 static int 5150 hns3_init_pf(struct rte_eth_dev *eth_dev) 5151 { 5152 struct rte_device *dev = eth_dev->device; 5153 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5154 struct hns3_adapter *hns = eth_dev->data->dev_private; 5155 struct hns3_hw *hw = &hns->hw; 5156 int ret; 5157 5158 PMD_INIT_FUNC_TRACE(); 5159 5160 /* Get hardware io base address from pcie BAR2 IO space */ 5161 hw->io_base = pci_dev->mem_resource[2].addr; 5162 5163 /* Firmware command queue initialize */ 5164 ret = hns3_cmd_init_queue(hw); 5165 if (ret) { 5166 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5167 goto err_cmd_init_queue; 5168 } 5169 5170 hns3_clear_all_event_cause(hw); 5171 5172 /* Firmware command initialize */ 5173 ret = hns3_cmd_init(hw); 5174 if (ret) { 5175 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5176 goto err_cmd_init; 5177 } 5178 5179 hns3_tx_push_init(eth_dev); 5180 5181 /* 5182 * To ensure that the hardware environment is clean during 5183 * initialization, the driver actively clear the hardware environment 5184 * during initialization, including PF and corresponding VFs' vlan, mac, 5185 * flow table configurations, etc. 5186 */ 5187 ret = hns3_clear_hw(hw); 5188 if (ret) { 5189 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5190 goto err_cmd_init; 5191 } 5192 5193 /* Hardware statistics of imissed registers cleared. */ 5194 ret = hns3_update_imissed_stats(hw, true); 5195 if (ret) { 5196 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5197 goto err_cmd_init; 5198 } 5199 5200 hns3_config_all_msix_error(hw, true); 5201 5202 ret = rte_intr_callback_register(&pci_dev->intr_handle, 5203 hns3_interrupt_handler, 5204 eth_dev); 5205 if (ret) { 5206 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5207 goto err_intr_callback_register; 5208 } 5209 5210 ret = hns3_ptp_init(hw); 5211 if (ret) 5212 goto err_get_config; 5213 5214 /* Enable interrupt */ 5215 rte_intr_enable(&pci_dev->intr_handle); 5216 hns3_pf_enable_irq0(hw); 5217 5218 /* Get configuration */ 5219 ret = hns3_get_configuration(hw); 5220 if (ret) { 5221 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5222 goto err_get_config; 5223 } 5224 5225 ret = hns3_tqp_stats_init(hw); 5226 if (ret) 5227 goto err_get_config; 5228 5229 ret = hns3_init_hardware(hns); 5230 if (ret) { 5231 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5232 goto err_init_hw; 5233 } 5234 5235 /* Initialize flow director filter list & hash */ 5236 ret = hns3_fdir_filter_init(hns); 5237 if (ret) { 5238 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5239 goto err_fdir; 5240 } 5241 5242 hns3_rss_set_default_args(hw); 5243 5244 ret = hns3_enable_hw_error_intr(hns, true); 5245 if (ret) { 5246 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5247 ret); 5248 goto err_enable_intr; 5249 } 5250 5251 ret = hns3_get_port_supported_speed(eth_dev); 5252 if (ret) { 5253 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5254 "by device, ret = %d.", ret); 5255 goto err_supported_speed; 5256 } 5257 5258 hns3_get_fc_autoneg_capability(hns); 5259 5260 hns3_tm_conf_init(eth_dev); 5261 5262 return 0; 5263 5264 err_supported_speed: 5265 (void)hns3_enable_hw_error_intr(hns, false); 5266 err_enable_intr: 5267 hns3_fdir_filter_uninit(hns); 5268 err_fdir: 5269 hns3_uninit_umv_space(hw); 5270 err_init_hw: 5271 hns3_tqp_stats_uninit(hw); 5272 err_get_config: 5273 hns3_pf_disable_irq0(hw); 5274 rte_intr_disable(&pci_dev->intr_handle); 5275 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5276 eth_dev); 5277 err_intr_callback_register: 5278 err_cmd_init: 5279 hns3_cmd_uninit(hw); 5280 hns3_cmd_destroy_queue(hw); 5281 err_cmd_init_queue: 5282 hw->io_base = NULL; 5283 5284 return ret; 5285 } 5286 5287 static void 5288 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5289 { 5290 struct hns3_adapter *hns = eth_dev->data->dev_private; 5291 struct rte_device *dev = eth_dev->device; 5292 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5293 struct hns3_hw *hw = &hns->hw; 5294 5295 PMD_INIT_FUNC_TRACE(); 5296 5297 hns3_tm_conf_uninit(eth_dev); 5298 hns3_enable_hw_error_intr(hns, false); 5299 hns3_rss_uninit(hns); 5300 (void)hns3_config_gro(hw, false); 5301 hns3_promisc_uninit(hw); 5302 hns3_fdir_filter_uninit(hns); 5303 hns3_uninit_umv_space(hw); 5304 hns3_tqp_stats_uninit(hw); 5305 hns3_config_mac_tnl_int(hw, false); 5306 hns3_pf_disable_irq0(hw); 5307 rte_intr_disable(&pci_dev->intr_handle); 5308 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5309 eth_dev); 5310 hns3_config_all_msix_error(hw, false); 5311 hns3_cmd_uninit(hw); 5312 hns3_cmd_destroy_queue(hw); 5313 hw->io_base = NULL; 5314 } 5315 5316 static uint32_t 5317 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5318 { 5319 uint32_t speed_bit; 5320 5321 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5322 case ETH_LINK_SPEED_10M: 5323 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5324 break; 5325 case ETH_LINK_SPEED_10M_HD: 5326 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5327 break; 5328 case ETH_LINK_SPEED_100M: 5329 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5330 break; 5331 case ETH_LINK_SPEED_100M_HD: 5332 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5333 break; 5334 case ETH_LINK_SPEED_1G: 5335 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5336 break; 5337 default: 5338 speed_bit = 0; 5339 break; 5340 } 5341 5342 return speed_bit; 5343 } 5344 5345 static uint32_t 5346 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5347 { 5348 uint32_t speed_bit; 5349 5350 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5351 case ETH_LINK_SPEED_1G: 5352 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5353 break; 5354 case ETH_LINK_SPEED_10G: 5355 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5356 break; 5357 case ETH_LINK_SPEED_25G: 5358 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5359 break; 5360 case ETH_LINK_SPEED_40G: 5361 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5362 break; 5363 case ETH_LINK_SPEED_50G: 5364 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5365 break; 5366 case ETH_LINK_SPEED_100G: 5367 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5368 break; 5369 case ETH_LINK_SPEED_200G: 5370 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5371 break; 5372 default: 5373 speed_bit = 0; 5374 break; 5375 } 5376 5377 return speed_bit; 5378 } 5379 5380 static int 5381 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5382 { 5383 struct hns3_mac *mac = &hw->mac; 5384 uint32_t supported_speed = mac->supported_speed; 5385 uint32_t speed_bit = 0; 5386 5387 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5388 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5389 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5390 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5391 5392 if (!(speed_bit & supported_speed)) { 5393 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5394 link_speeds); 5395 return -EINVAL; 5396 } 5397 5398 return 0; 5399 } 5400 5401 static inline uint32_t 5402 hns3_get_link_speed(uint32_t link_speeds) 5403 { 5404 uint32_t speed = ETH_SPEED_NUM_NONE; 5405 5406 if (link_speeds & ETH_LINK_SPEED_10M || 5407 link_speeds & ETH_LINK_SPEED_10M_HD) 5408 speed = ETH_SPEED_NUM_10M; 5409 if (link_speeds & ETH_LINK_SPEED_100M || 5410 link_speeds & ETH_LINK_SPEED_100M_HD) 5411 speed = ETH_SPEED_NUM_100M; 5412 if (link_speeds & ETH_LINK_SPEED_1G) 5413 speed = ETH_SPEED_NUM_1G; 5414 if (link_speeds & ETH_LINK_SPEED_10G) 5415 speed = ETH_SPEED_NUM_10G; 5416 if (link_speeds & ETH_LINK_SPEED_25G) 5417 speed = ETH_SPEED_NUM_25G; 5418 if (link_speeds & ETH_LINK_SPEED_40G) 5419 speed = ETH_SPEED_NUM_40G; 5420 if (link_speeds & ETH_LINK_SPEED_50G) 5421 speed = ETH_SPEED_NUM_50G; 5422 if (link_speeds & ETH_LINK_SPEED_100G) 5423 speed = ETH_SPEED_NUM_100G; 5424 if (link_speeds & ETH_LINK_SPEED_200G) 5425 speed = ETH_SPEED_NUM_200G; 5426 5427 return speed; 5428 } 5429 5430 static uint8_t 5431 hns3_get_link_duplex(uint32_t link_speeds) 5432 { 5433 if ((link_speeds & ETH_LINK_SPEED_10M_HD) || 5434 (link_speeds & ETH_LINK_SPEED_100M_HD)) 5435 return ETH_LINK_HALF_DUPLEX; 5436 else 5437 return ETH_LINK_FULL_DUPLEX; 5438 } 5439 5440 static int 5441 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5442 struct hns3_set_link_speed_cfg *cfg) 5443 { 5444 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5445 struct hns3_phy_params_bd0_cmd *req; 5446 uint16_t i; 5447 5448 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5449 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5450 false); 5451 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5452 } 5453 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5454 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5455 req->autoneg = cfg->autoneg; 5456 5457 /* 5458 * The full speed capability is used to negotiate when 5459 * auto-negotiation is enabled. 5460 */ 5461 if (cfg->autoneg) { 5462 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5463 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5464 HNS3_PHY_LINK_SPEED_100M_BIT | 5465 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5466 HNS3_PHY_LINK_SPEED_1000M_BIT; 5467 } else { 5468 req->speed = cfg->speed; 5469 req->duplex = cfg->duplex; 5470 } 5471 5472 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5473 } 5474 5475 static int 5476 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5477 { 5478 struct hns3_config_auto_neg_cmd *req; 5479 struct hns3_cmd_desc desc; 5480 uint32_t flag = 0; 5481 int ret; 5482 5483 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5484 5485 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5486 if (enable) 5487 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5488 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5489 5490 ret = hns3_cmd_send(hw, &desc, 1); 5491 if (ret) 5492 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5493 5494 return ret; 5495 } 5496 5497 static int 5498 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5499 struct hns3_set_link_speed_cfg *cfg) 5500 { 5501 int ret; 5502 5503 if (hw->mac.support_autoneg) { 5504 ret = hns3_set_autoneg(hw, cfg->autoneg); 5505 if (ret) { 5506 hns3_err(hw, "failed to configure auto-negotiation."); 5507 return ret; 5508 } 5509 5510 /* 5511 * To enable auto-negotiation, we only need to open the switch 5512 * of auto-negotiation, then firmware sets all speed 5513 * capabilities. 5514 */ 5515 if (cfg->autoneg) 5516 return 0; 5517 } 5518 5519 /* 5520 * Some hardware doesn't support auto-negotiation, but users may not 5521 * configure link_speeds (default 0), which means auto-negotiation. 5522 * In this case, it should return success. 5523 */ 5524 if (cfg->autoneg) 5525 return 0; 5526 5527 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5528 } 5529 5530 static int 5531 hns3_set_port_link_speed(struct hns3_hw *hw, 5532 struct hns3_set_link_speed_cfg *cfg) 5533 { 5534 int ret; 5535 5536 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5537 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5538 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5539 if (pf->is_tmp_phy) 5540 return 0; 5541 #endif 5542 5543 ret = hns3_set_copper_port_link_speed(hw, cfg); 5544 if (ret) { 5545 hns3_err(hw, "failed to set copper port link speed," 5546 "ret = %d.", ret); 5547 return ret; 5548 } 5549 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5550 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5551 if (ret) { 5552 hns3_err(hw, "failed to set fiber port link speed," 5553 "ret = %d.", ret); 5554 return ret; 5555 } 5556 } 5557 5558 return 0; 5559 } 5560 5561 static int 5562 hns3_apply_link_speed(struct hns3_hw *hw) 5563 { 5564 struct rte_eth_conf *conf = &hw->data->dev_conf; 5565 struct hns3_set_link_speed_cfg cfg; 5566 5567 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5568 cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ? 5569 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 5570 if (cfg.autoneg != ETH_LINK_AUTONEG) { 5571 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5572 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5573 } 5574 5575 return hns3_set_port_link_speed(hw, &cfg); 5576 } 5577 5578 static int 5579 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5580 { 5581 struct hns3_hw *hw = &hns->hw; 5582 int ret; 5583 5584 ret = hns3_update_queue_map_configure(hns); 5585 if (ret) { 5586 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5587 ret); 5588 return ret; 5589 } 5590 5591 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5592 ret = hns3_tm_conf_update(hw); 5593 if (ret) { 5594 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5595 return ret; 5596 } 5597 5598 hns3_enable_rxd_adv_layout(hw); 5599 5600 ret = hns3_init_queues(hns, reset_queue); 5601 if (ret) { 5602 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5603 return ret; 5604 } 5605 5606 ret = hns3_cfg_mac_mode(hw, true); 5607 if (ret) { 5608 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5609 goto err_config_mac_mode; 5610 } 5611 5612 ret = hns3_apply_link_speed(hw); 5613 if (ret) 5614 goto err_set_link_speed; 5615 5616 return 0; 5617 5618 err_set_link_speed: 5619 (void)hns3_cfg_mac_mode(hw, false); 5620 5621 err_config_mac_mode: 5622 hns3_dev_release_mbufs(hns); 5623 /* 5624 * Here is exception handling, hns3_reset_all_tqps will have the 5625 * corresponding error message if it is handled incorrectly, so it is 5626 * not necessary to check hns3_reset_all_tqps return value, here keep 5627 * ret as the error code causing the exception. 5628 */ 5629 (void)hns3_reset_all_tqps(hns); 5630 return ret; 5631 } 5632 5633 static int 5634 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5635 { 5636 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5637 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5638 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5639 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5640 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5641 uint32_t intr_vector; 5642 uint16_t q_id; 5643 int ret; 5644 5645 /* 5646 * hns3 needs a separate interrupt to be used as event interrupt which 5647 * could not be shared with task queue pair, so KERNEL drivers need 5648 * support multiple interrupt vectors. 5649 */ 5650 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5651 !rte_intr_cap_multiple(intr_handle)) 5652 return 0; 5653 5654 rte_intr_disable(intr_handle); 5655 intr_vector = hw->used_rx_queues; 5656 /* creates event fd for each intr vector when MSIX is used */ 5657 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5658 return -EINVAL; 5659 5660 if (intr_handle->intr_vec == NULL) { 5661 intr_handle->intr_vec = 5662 rte_zmalloc("intr_vec", 5663 hw->used_rx_queues * sizeof(int), 0); 5664 if (intr_handle->intr_vec == NULL) { 5665 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5666 hw->used_rx_queues); 5667 ret = -ENOMEM; 5668 goto alloc_intr_vec_error; 5669 } 5670 } 5671 5672 if (rte_intr_allow_others(intr_handle)) { 5673 vec = RTE_INTR_VEC_RXTX_OFFSET; 5674 base = RTE_INTR_VEC_RXTX_OFFSET; 5675 } 5676 5677 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5678 ret = hns3_bind_ring_with_vector(hw, vec, true, 5679 HNS3_RING_TYPE_RX, q_id); 5680 if (ret) 5681 goto bind_vector_error; 5682 intr_handle->intr_vec[q_id] = vec; 5683 /* 5684 * If there are not enough efds (e.g. not enough interrupt), 5685 * remaining queues will be bond to the last interrupt. 5686 */ 5687 if (vec < base + intr_handle->nb_efd - 1) 5688 vec++; 5689 } 5690 rte_intr_enable(intr_handle); 5691 return 0; 5692 5693 bind_vector_error: 5694 rte_free(intr_handle->intr_vec); 5695 intr_handle->intr_vec = NULL; 5696 alloc_intr_vec_error: 5697 rte_intr_efd_disable(intr_handle); 5698 return ret; 5699 } 5700 5701 static int 5702 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5703 { 5704 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5705 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5706 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5707 uint16_t q_id; 5708 int ret; 5709 5710 if (dev->data->dev_conf.intr_conf.rxq == 0) 5711 return 0; 5712 5713 if (rte_intr_dp_is_en(intr_handle)) { 5714 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5715 ret = hns3_bind_ring_with_vector(hw, 5716 intr_handle->intr_vec[q_id], true, 5717 HNS3_RING_TYPE_RX, q_id); 5718 if (ret) 5719 return ret; 5720 } 5721 } 5722 5723 return 0; 5724 } 5725 5726 static void 5727 hns3_restore_filter(struct rte_eth_dev *dev) 5728 { 5729 hns3_restore_rss_filter(dev); 5730 } 5731 5732 static int 5733 hns3_dev_start(struct rte_eth_dev *dev) 5734 { 5735 struct hns3_adapter *hns = dev->data->dev_private; 5736 struct hns3_hw *hw = &hns->hw; 5737 int ret; 5738 5739 PMD_INIT_FUNC_TRACE(); 5740 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5741 return -EBUSY; 5742 5743 rte_spinlock_lock(&hw->lock); 5744 hw->adapter_state = HNS3_NIC_STARTING; 5745 5746 ret = hns3_do_start(hns, true); 5747 if (ret) { 5748 hw->adapter_state = HNS3_NIC_CONFIGURED; 5749 rte_spinlock_unlock(&hw->lock); 5750 return ret; 5751 } 5752 ret = hns3_map_rx_interrupt(dev); 5753 if (ret) 5754 goto map_rx_inter_err; 5755 5756 /* 5757 * There are three register used to control the status of a TQP 5758 * (contains a pair of Tx queue and Rx queue) in the new version network 5759 * engine. One is used to control the enabling of Tx queue, the other is 5760 * used to control the enabling of Rx queue, and the last is the master 5761 * switch used to control the enabling of the tqp. The Tx register and 5762 * TQP register must be enabled at the same time to enable a Tx queue. 5763 * The same applies to the Rx queue. For the older network engine, this 5764 * function only refresh the enabled flag, and it is used to update the 5765 * status of queue in the dpdk framework. 5766 */ 5767 ret = hns3_start_all_txqs(dev); 5768 if (ret) 5769 goto map_rx_inter_err; 5770 5771 ret = hns3_start_all_rxqs(dev); 5772 if (ret) 5773 goto start_all_rxqs_fail; 5774 5775 hw->adapter_state = HNS3_NIC_STARTED; 5776 rte_spinlock_unlock(&hw->lock); 5777 5778 hns3_rx_scattered_calc(dev); 5779 hns3_set_rxtx_function(dev); 5780 hns3_mp_req_start_rxtx(dev); 5781 5782 hns3_restore_filter(dev); 5783 5784 /* Enable interrupt of all rx queues before enabling queues */ 5785 hns3_dev_all_rx_queue_intr_enable(hw, true); 5786 5787 /* 5788 * After finished the initialization, enable tqps to receive/transmit 5789 * packets and refresh all queue status. 5790 */ 5791 hns3_start_tqps(hw); 5792 5793 hns3_tm_dev_start_proc(hw); 5794 5795 if (dev->data->dev_conf.intr_conf.lsc != 0) 5796 hns3_dev_link_update(dev, 0); 5797 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5798 5799 hns3_info(hw, "hns3 dev start successful!"); 5800 5801 return 0; 5802 5803 start_all_rxqs_fail: 5804 hns3_stop_all_txqs(dev); 5805 map_rx_inter_err: 5806 (void)hns3_do_stop(hns); 5807 hw->adapter_state = HNS3_NIC_CONFIGURED; 5808 rte_spinlock_unlock(&hw->lock); 5809 5810 return ret; 5811 } 5812 5813 static int 5814 hns3_do_stop(struct hns3_adapter *hns) 5815 { 5816 struct hns3_hw *hw = &hns->hw; 5817 int ret; 5818 5819 /* 5820 * The "hns3_do_stop" function will also be called by .stop_service to 5821 * prepare reset. At the time of global or IMP reset, the command cannot 5822 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5823 * accessed during the reset process. So the mbuf can not be released 5824 * during reset and is required to be released after the reset is 5825 * completed. 5826 */ 5827 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5828 hns3_dev_release_mbufs(hns); 5829 5830 ret = hns3_cfg_mac_mode(hw, false); 5831 if (ret) 5832 return ret; 5833 hw->mac.link_status = ETH_LINK_DOWN; 5834 5835 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5836 hns3_configure_all_mac_addr(hns, true); 5837 ret = hns3_reset_all_tqps(hns); 5838 if (ret) { 5839 hns3_err(hw, "failed to reset all queues ret = %d.", 5840 ret); 5841 return ret; 5842 } 5843 } 5844 hw->mac.default_addr_setted = false; 5845 return 0; 5846 } 5847 5848 static void 5849 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5850 { 5851 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5852 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5853 struct hns3_adapter *hns = dev->data->dev_private; 5854 struct hns3_hw *hw = &hns->hw; 5855 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5856 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5857 uint16_t q_id; 5858 5859 if (dev->data->dev_conf.intr_conf.rxq == 0) 5860 return; 5861 5862 /* unmap the ring with vector */ 5863 if (rte_intr_allow_others(intr_handle)) { 5864 vec = RTE_INTR_VEC_RXTX_OFFSET; 5865 base = RTE_INTR_VEC_RXTX_OFFSET; 5866 } 5867 if (rte_intr_dp_is_en(intr_handle)) { 5868 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5869 (void)hns3_bind_ring_with_vector(hw, vec, false, 5870 HNS3_RING_TYPE_RX, 5871 q_id); 5872 if (vec < base + intr_handle->nb_efd - 1) 5873 vec++; 5874 } 5875 } 5876 /* Clean datapath event and queue/vec mapping */ 5877 rte_intr_efd_disable(intr_handle); 5878 if (intr_handle->intr_vec) { 5879 rte_free(intr_handle->intr_vec); 5880 intr_handle->intr_vec = NULL; 5881 } 5882 } 5883 5884 static int 5885 hns3_dev_stop(struct rte_eth_dev *dev) 5886 { 5887 struct hns3_adapter *hns = dev->data->dev_private; 5888 struct hns3_hw *hw = &hns->hw; 5889 5890 PMD_INIT_FUNC_TRACE(); 5891 dev->data->dev_started = 0; 5892 5893 hw->adapter_state = HNS3_NIC_STOPPING; 5894 hns3_set_rxtx_function(dev); 5895 rte_wmb(); 5896 /* Disable datapath on secondary process. */ 5897 hns3_mp_req_stop_rxtx(dev); 5898 /* Prevent crashes when queues are still in use. */ 5899 rte_delay_ms(hw->cfg_max_queues); 5900 5901 rte_spinlock_lock(&hw->lock); 5902 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5903 hns3_tm_dev_stop_proc(hw); 5904 hns3_config_mac_tnl_int(hw, false); 5905 hns3_stop_tqps(hw); 5906 hns3_do_stop(hns); 5907 hns3_unmap_rx_interrupt(dev); 5908 hw->adapter_state = HNS3_NIC_CONFIGURED; 5909 } 5910 hns3_rx_scattered_reset(dev); 5911 rte_eal_alarm_cancel(hns3_service_handler, dev); 5912 hns3_stop_report_lse(dev); 5913 rte_spinlock_unlock(&hw->lock); 5914 5915 return 0; 5916 } 5917 5918 static int 5919 hns3_dev_close(struct rte_eth_dev *eth_dev) 5920 { 5921 struct hns3_adapter *hns = eth_dev->data->dev_private; 5922 struct hns3_hw *hw = &hns->hw; 5923 int ret = 0; 5924 5925 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5926 rte_free(eth_dev->process_private); 5927 eth_dev->process_private = NULL; 5928 return 0; 5929 } 5930 5931 if (hw->adapter_state == HNS3_NIC_STARTED) 5932 ret = hns3_dev_stop(eth_dev); 5933 5934 hw->adapter_state = HNS3_NIC_CLOSING; 5935 hns3_reset_abort(hns); 5936 hw->adapter_state = HNS3_NIC_CLOSED; 5937 5938 hns3_configure_all_mc_mac_addr(hns, true); 5939 hns3_remove_all_vlan_table(hns); 5940 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5941 hns3_uninit_pf(eth_dev); 5942 hns3_free_all_queues(eth_dev); 5943 rte_free(hw->reset.wait_data); 5944 rte_free(eth_dev->process_private); 5945 eth_dev->process_private = NULL; 5946 hns3_mp_uninit_primary(); 5947 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5948 5949 return ret; 5950 } 5951 5952 static void 5953 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5954 bool *tx_pause) 5955 { 5956 struct hns3_mac *mac = &hw->mac; 5957 uint32_t advertising = mac->advertising; 5958 uint32_t lp_advertising = mac->lp_advertising; 5959 *rx_pause = false; 5960 *tx_pause = false; 5961 5962 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5963 *rx_pause = true; 5964 *tx_pause = true; 5965 } else if (advertising & lp_advertising & 5966 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5967 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5968 *rx_pause = true; 5969 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5970 *tx_pause = true; 5971 } 5972 } 5973 5974 static enum hns3_fc_mode 5975 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5976 { 5977 enum hns3_fc_mode current_mode; 5978 bool rx_pause = false; 5979 bool tx_pause = false; 5980 5981 switch (hw->mac.media_type) { 5982 case HNS3_MEDIA_TYPE_COPPER: 5983 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 5984 break; 5985 5986 /* 5987 * Flow control auto-negotiation is not supported for fiber and 5988 * backpalne media type. 5989 */ 5990 case HNS3_MEDIA_TYPE_FIBER: 5991 case HNS3_MEDIA_TYPE_BACKPLANE: 5992 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 5993 current_mode = hw->requested_fc_mode; 5994 goto out; 5995 default: 5996 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 5997 hw->mac.media_type); 5998 current_mode = HNS3_FC_NONE; 5999 goto out; 6000 } 6001 6002 if (rx_pause && tx_pause) 6003 current_mode = HNS3_FC_FULL; 6004 else if (rx_pause) 6005 current_mode = HNS3_FC_RX_PAUSE; 6006 else if (tx_pause) 6007 current_mode = HNS3_FC_TX_PAUSE; 6008 else 6009 current_mode = HNS3_FC_NONE; 6010 6011 out: 6012 return current_mode; 6013 } 6014 6015 static enum hns3_fc_mode 6016 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 6017 { 6018 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6019 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6020 struct hns3_mac *mac = &hw->mac; 6021 6022 /* 6023 * When the flow control mode is obtained, the device may not complete 6024 * auto-negotiation. It is necessary to wait for link establishment. 6025 */ 6026 (void)hns3_dev_link_update(dev, 1); 6027 6028 /* 6029 * If the link auto-negotiation of the nic is disabled, or the flow 6030 * control auto-negotiation is not supported, the forced flow control 6031 * mode is used. 6032 */ 6033 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 6034 return hw->requested_fc_mode; 6035 6036 return hns3_get_autoneg_fc_mode(hw); 6037 } 6038 6039 static int 6040 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6041 { 6042 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6043 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6044 enum hns3_fc_mode current_mode; 6045 6046 current_mode = hns3_get_current_fc_mode(dev); 6047 switch (current_mode) { 6048 case HNS3_FC_FULL: 6049 fc_conf->mode = RTE_FC_FULL; 6050 break; 6051 case HNS3_FC_TX_PAUSE: 6052 fc_conf->mode = RTE_FC_TX_PAUSE; 6053 break; 6054 case HNS3_FC_RX_PAUSE: 6055 fc_conf->mode = RTE_FC_RX_PAUSE; 6056 break; 6057 case HNS3_FC_NONE: 6058 default: 6059 fc_conf->mode = RTE_FC_NONE; 6060 break; 6061 } 6062 6063 fc_conf->pause_time = pf->pause_time; 6064 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 6065 6066 return 0; 6067 } 6068 6069 static int 6070 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 6071 { 6072 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 6073 6074 if (!pf->support_fc_autoneg) { 6075 if (autoneg != 0) { 6076 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6077 return -EOPNOTSUPP; 6078 } 6079 6080 /* 6081 * Flow control auto-negotiation of the NIC is not supported, 6082 * but other auto-negotiation features may be supported. 6083 */ 6084 if (autoneg != hw->mac.link_autoneg) { 6085 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6086 return -EOPNOTSUPP; 6087 } 6088 6089 return 0; 6090 } 6091 6092 /* 6093 * If flow control auto-negotiation of the NIC is supported, all 6094 * auto-negotiation features are supported. 6095 */ 6096 if (autoneg != hw->mac.link_autoneg) { 6097 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6098 return -EOPNOTSUPP; 6099 } 6100 6101 return 0; 6102 } 6103 6104 static int 6105 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6106 { 6107 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6108 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6109 int ret; 6110 6111 if (fc_conf->high_water || fc_conf->low_water || 6112 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6113 hns3_err(hw, "Unsupported flow control settings specified, " 6114 "high_water(%u), low_water(%u), send_xon(%u) and " 6115 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6116 fc_conf->high_water, fc_conf->low_water, 6117 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6118 return -EINVAL; 6119 } 6120 6121 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6122 if (ret) 6123 return ret; 6124 6125 if (!fc_conf->pause_time) { 6126 hns3_err(hw, "Invalid pause time %u setting.", 6127 fc_conf->pause_time); 6128 return -EINVAL; 6129 } 6130 6131 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6132 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6133 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6134 "current_fc_status = %d", hw->current_fc_status); 6135 return -EOPNOTSUPP; 6136 } 6137 6138 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 6139 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6140 return -EOPNOTSUPP; 6141 } 6142 6143 rte_spinlock_lock(&hw->lock); 6144 ret = hns3_fc_enable(dev, fc_conf); 6145 rte_spinlock_unlock(&hw->lock); 6146 6147 return ret; 6148 } 6149 6150 static int 6151 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6152 struct rte_eth_pfc_conf *pfc_conf) 6153 { 6154 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6155 int ret; 6156 6157 if (!hns3_dev_dcb_supported(hw)) { 6158 hns3_err(hw, "This port does not support dcb configurations."); 6159 return -EOPNOTSUPP; 6160 } 6161 6162 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6163 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6164 hns3_err(hw, "Unsupported flow control settings specified, " 6165 "high_water(%u), low_water(%u), send_xon(%u) and " 6166 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6167 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6168 pfc_conf->fc.send_xon, 6169 pfc_conf->fc.mac_ctrl_frame_fwd); 6170 return -EINVAL; 6171 } 6172 if (pfc_conf->fc.autoneg) { 6173 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6174 return -EINVAL; 6175 } 6176 if (pfc_conf->fc.pause_time == 0) { 6177 hns3_err(hw, "Invalid pause time %u setting.", 6178 pfc_conf->fc.pause_time); 6179 return -EINVAL; 6180 } 6181 6182 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6183 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6184 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6185 "current_fc_status = %d", hw->current_fc_status); 6186 return -EOPNOTSUPP; 6187 } 6188 6189 rte_spinlock_lock(&hw->lock); 6190 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6191 rte_spinlock_unlock(&hw->lock); 6192 6193 return ret; 6194 } 6195 6196 static int 6197 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6198 { 6199 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6200 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6201 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6202 int i; 6203 6204 rte_spinlock_lock(&hw->lock); 6205 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 6206 dcb_info->nb_tcs = pf->local_max_tc; 6207 else 6208 dcb_info->nb_tcs = 1; 6209 6210 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6211 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6212 for (i = 0; i < dcb_info->nb_tcs; i++) 6213 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6214 6215 for (i = 0; i < hw->num_tc; i++) { 6216 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6217 dcb_info->tc_queue.tc_txq[0][i].base = 6218 hw->tc_queue[i].tqp_offset; 6219 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6220 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6221 hw->tc_queue[i].tqp_count; 6222 } 6223 rte_spinlock_unlock(&hw->lock); 6224 6225 return 0; 6226 } 6227 6228 static int 6229 hns3_reinit_dev(struct hns3_adapter *hns) 6230 { 6231 struct hns3_hw *hw = &hns->hw; 6232 int ret; 6233 6234 ret = hns3_cmd_init(hw); 6235 if (ret) { 6236 hns3_err(hw, "Failed to init cmd: %d", ret); 6237 return ret; 6238 } 6239 6240 ret = hns3_reset_all_tqps(hns); 6241 if (ret) { 6242 hns3_err(hw, "Failed to reset all queues: %d", ret); 6243 return ret; 6244 } 6245 6246 ret = hns3_init_hardware(hns); 6247 if (ret) { 6248 hns3_err(hw, "Failed to init hardware: %d", ret); 6249 return ret; 6250 } 6251 6252 ret = hns3_enable_hw_error_intr(hns, true); 6253 if (ret) { 6254 hns3_err(hw, "fail to enable hw error interrupts: %d", 6255 ret); 6256 return ret; 6257 } 6258 hns3_info(hw, "Reset done, driver initialization finished."); 6259 6260 return 0; 6261 } 6262 6263 static bool 6264 is_pf_reset_done(struct hns3_hw *hw) 6265 { 6266 uint32_t val, reg, reg_bit; 6267 6268 switch (hw->reset.level) { 6269 case HNS3_IMP_RESET: 6270 reg = HNS3_GLOBAL_RESET_REG; 6271 reg_bit = HNS3_IMP_RESET_BIT; 6272 break; 6273 case HNS3_GLOBAL_RESET: 6274 reg = HNS3_GLOBAL_RESET_REG; 6275 reg_bit = HNS3_GLOBAL_RESET_BIT; 6276 break; 6277 case HNS3_FUNC_RESET: 6278 reg = HNS3_FUN_RST_ING; 6279 reg_bit = HNS3_FUN_RST_ING_B; 6280 break; 6281 case HNS3_FLR_RESET: 6282 default: 6283 hns3_err(hw, "Wait for unsupported reset level: %d", 6284 hw->reset.level); 6285 return true; 6286 } 6287 val = hns3_read_dev(hw, reg); 6288 if (hns3_get_bit(val, reg_bit)) 6289 return false; 6290 else 6291 return true; 6292 } 6293 6294 bool 6295 hns3_is_reset_pending(struct hns3_adapter *hns) 6296 { 6297 struct hns3_hw *hw = &hns->hw; 6298 enum hns3_reset_level reset; 6299 6300 hns3_check_event_cause(hns, NULL); 6301 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6302 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6303 hw->reset.level < reset) { 6304 hns3_warn(hw, "High level reset %d is pending", reset); 6305 return true; 6306 } 6307 reset = hns3_get_reset_level(hns, &hw->reset.request); 6308 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6309 hw->reset.level < reset) { 6310 hns3_warn(hw, "High level reset %d is request", reset); 6311 return true; 6312 } 6313 return false; 6314 } 6315 6316 static int 6317 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6318 { 6319 struct hns3_hw *hw = &hns->hw; 6320 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6321 struct timeval tv; 6322 6323 if (wait_data->result == HNS3_WAIT_SUCCESS) 6324 return 0; 6325 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6326 hns3_clock_gettime(&tv); 6327 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6328 tv.tv_sec, tv.tv_usec); 6329 return -ETIME; 6330 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6331 return -EAGAIN; 6332 6333 wait_data->hns = hns; 6334 wait_data->check_completion = is_pf_reset_done; 6335 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6336 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 6337 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6338 wait_data->count = HNS3_RESET_WAIT_CNT; 6339 wait_data->result = HNS3_WAIT_REQUEST; 6340 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6341 return -EAGAIN; 6342 } 6343 6344 static int 6345 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6346 { 6347 struct hns3_cmd_desc desc; 6348 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6349 6350 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6351 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6352 req->fun_reset_vfid = func_id; 6353 6354 return hns3_cmd_send(hw, &desc, 1); 6355 } 6356 6357 static int 6358 hns3_imp_reset_cmd(struct hns3_hw *hw) 6359 { 6360 struct hns3_cmd_desc desc; 6361 6362 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6363 desc.data[0] = 0xeedd; 6364 6365 return hns3_cmd_send(hw, &desc, 1); 6366 } 6367 6368 static void 6369 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6370 { 6371 struct hns3_hw *hw = &hns->hw; 6372 struct timeval tv; 6373 uint32_t val; 6374 6375 hns3_clock_gettime(&tv); 6376 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6377 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6378 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6379 tv.tv_sec, tv.tv_usec); 6380 return; 6381 } 6382 6383 switch (reset_level) { 6384 case HNS3_IMP_RESET: 6385 hns3_imp_reset_cmd(hw); 6386 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6387 tv.tv_sec, tv.tv_usec); 6388 break; 6389 case HNS3_GLOBAL_RESET: 6390 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6391 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6392 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6393 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6394 tv.tv_sec, tv.tv_usec); 6395 break; 6396 case HNS3_FUNC_RESET: 6397 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6398 tv.tv_sec, tv.tv_usec); 6399 /* schedule again to check later */ 6400 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6401 hns3_schedule_reset(hns); 6402 break; 6403 default: 6404 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6405 return; 6406 } 6407 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6408 } 6409 6410 static enum hns3_reset_level 6411 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6412 { 6413 struct hns3_hw *hw = &hns->hw; 6414 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6415 6416 /* Return the highest priority reset level amongst all */ 6417 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6418 reset_level = HNS3_IMP_RESET; 6419 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6420 reset_level = HNS3_GLOBAL_RESET; 6421 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6422 reset_level = HNS3_FUNC_RESET; 6423 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6424 reset_level = HNS3_FLR_RESET; 6425 6426 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6427 return HNS3_NONE_RESET; 6428 6429 return reset_level; 6430 } 6431 6432 static void 6433 hns3_record_imp_error(struct hns3_adapter *hns) 6434 { 6435 struct hns3_hw *hw = &hns->hw; 6436 uint32_t reg_val; 6437 6438 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6439 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6440 hns3_warn(hw, "Detected IMP RD poison!"); 6441 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6442 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6443 } 6444 6445 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6446 hns3_warn(hw, "Detected IMP CMDQ error!"); 6447 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6448 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6449 } 6450 } 6451 6452 static int 6453 hns3_prepare_reset(struct hns3_adapter *hns) 6454 { 6455 struct hns3_hw *hw = &hns->hw; 6456 uint32_t reg_val; 6457 int ret; 6458 6459 switch (hw->reset.level) { 6460 case HNS3_FUNC_RESET: 6461 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6462 if (ret) 6463 return ret; 6464 6465 /* 6466 * After performaning pf reset, it is not necessary to do the 6467 * mailbox handling or send any command to firmware, because 6468 * any mailbox handling or command to firmware is only valid 6469 * after hns3_cmd_init is called. 6470 */ 6471 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6472 hw->reset.stats.request_cnt++; 6473 break; 6474 case HNS3_IMP_RESET: 6475 hns3_record_imp_error(hns); 6476 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6477 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6478 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6479 break; 6480 default: 6481 break; 6482 } 6483 return 0; 6484 } 6485 6486 static int 6487 hns3_set_rst_done(struct hns3_hw *hw) 6488 { 6489 struct hns3_pf_rst_done_cmd *req; 6490 struct hns3_cmd_desc desc; 6491 6492 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6493 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6494 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6495 return hns3_cmd_send(hw, &desc, 1); 6496 } 6497 6498 static int 6499 hns3_stop_service(struct hns3_adapter *hns) 6500 { 6501 struct hns3_hw *hw = &hns->hw; 6502 struct rte_eth_dev *eth_dev; 6503 6504 eth_dev = &rte_eth_devices[hw->data->port_id]; 6505 hw->mac.link_status = ETH_LINK_DOWN; 6506 if (hw->adapter_state == HNS3_NIC_STARTED) { 6507 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6508 hns3_update_linkstatus_and_event(hw, false); 6509 } 6510 6511 hns3_set_rxtx_function(eth_dev); 6512 rte_wmb(); 6513 /* Disable datapath on secondary process. */ 6514 hns3_mp_req_stop_rxtx(eth_dev); 6515 rte_delay_ms(hw->cfg_max_queues); 6516 6517 rte_spinlock_lock(&hw->lock); 6518 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6519 hw->adapter_state == HNS3_NIC_STOPPING) { 6520 hns3_enable_all_queues(hw, false); 6521 hns3_do_stop(hns); 6522 hw->reset.mbuf_deferred_free = true; 6523 } else 6524 hw->reset.mbuf_deferred_free = false; 6525 6526 /* 6527 * It is cumbersome for hardware to pick-and-choose entries for deletion 6528 * from table space. Hence, for function reset software intervention is 6529 * required to delete the entries 6530 */ 6531 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6532 hns3_configure_all_mc_mac_addr(hns, true); 6533 rte_spinlock_unlock(&hw->lock); 6534 6535 return 0; 6536 } 6537 6538 static int 6539 hns3_start_service(struct hns3_adapter *hns) 6540 { 6541 struct hns3_hw *hw = &hns->hw; 6542 struct rte_eth_dev *eth_dev; 6543 6544 if (hw->reset.level == HNS3_IMP_RESET || 6545 hw->reset.level == HNS3_GLOBAL_RESET) 6546 hns3_set_rst_done(hw); 6547 eth_dev = &rte_eth_devices[hw->data->port_id]; 6548 hns3_set_rxtx_function(eth_dev); 6549 hns3_mp_req_start_rxtx(eth_dev); 6550 if (hw->adapter_state == HNS3_NIC_STARTED) { 6551 /* 6552 * This API parent function already hold the hns3_hw.lock, the 6553 * hns3_service_handler may report lse, in bonding application 6554 * it will call driver's ops which may acquire the hns3_hw.lock 6555 * again, thus lead to deadlock. 6556 * We defer calls hns3_service_handler to avoid the deadlock. 6557 */ 6558 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6559 hns3_service_handler, eth_dev); 6560 6561 /* Enable interrupt of all rx queues before enabling queues */ 6562 hns3_dev_all_rx_queue_intr_enable(hw, true); 6563 /* 6564 * Enable state of each rxq and txq will be recovered after 6565 * reset, so we need to restore them before enable all tqps; 6566 */ 6567 hns3_restore_tqp_enable_state(hw); 6568 /* 6569 * When finished the initialization, enable queues to receive 6570 * and transmit packets. 6571 */ 6572 hns3_enable_all_queues(hw, true); 6573 } 6574 6575 return 0; 6576 } 6577 6578 static int 6579 hns3_restore_conf(struct hns3_adapter *hns) 6580 { 6581 struct hns3_hw *hw = &hns->hw; 6582 int ret; 6583 6584 ret = hns3_configure_all_mac_addr(hns, false); 6585 if (ret) 6586 return ret; 6587 6588 ret = hns3_configure_all_mc_mac_addr(hns, false); 6589 if (ret) 6590 goto err_mc_mac; 6591 6592 ret = hns3_dev_promisc_restore(hns); 6593 if (ret) 6594 goto err_promisc; 6595 6596 ret = hns3_restore_vlan_table(hns); 6597 if (ret) 6598 goto err_promisc; 6599 6600 ret = hns3_restore_vlan_conf(hns); 6601 if (ret) 6602 goto err_promisc; 6603 6604 ret = hns3_restore_all_fdir_filter(hns); 6605 if (ret) 6606 goto err_promisc; 6607 6608 ret = hns3_restore_ptp(hns); 6609 if (ret) 6610 goto err_promisc; 6611 6612 ret = hns3_restore_rx_interrupt(hw); 6613 if (ret) 6614 goto err_promisc; 6615 6616 ret = hns3_restore_gro_conf(hw); 6617 if (ret) 6618 goto err_promisc; 6619 6620 ret = hns3_restore_fec(hw); 6621 if (ret) 6622 goto err_promisc; 6623 6624 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6625 ret = hns3_do_start(hns, false); 6626 if (ret) 6627 goto err_promisc; 6628 hns3_info(hw, "hns3 dev restart successful!"); 6629 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6630 hw->adapter_state = HNS3_NIC_CONFIGURED; 6631 return 0; 6632 6633 err_promisc: 6634 hns3_configure_all_mc_mac_addr(hns, true); 6635 err_mc_mac: 6636 hns3_configure_all_mac_addr(hns, true); 6637 return ret; 6638 } 6639 6640 static void 6641 hns3_reset_service(void *param) 6642 { 6643 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6644 struct hns3_hw *hw = &hns->hw; 6645 enum hns3_reset_level reset_level; 6646 struct timeval tv_delta; 6647 struct timeval tv_start; 6648 struct timeval tv; 6649 uint64_t msec; 6650 int ret; 6651 6652 /* 6653 * The interrupt is not triggered within the delay time. 6654 * The interrupt may have been lost. It is necessary to handle 6655 * the interrupt to recover from the error. 6656 */ 6657 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6658 SCHEDULE_DEFERRED) { 6659 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6660 __ATOMIC_RELAXED); 6661 hns3_err(hw, "Handling interrupts in delayed tasks"); 6662 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6663 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6664 if (reset_level == HNS3_NONE_RESET) { 6665 hns3_err(hw, "No reset level is set, try IMP reset"); 6666 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6667 } 6668 } 6669 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6670 6671 /* 6672 * Check if there is any ongoing reset in the hardware. This status can 6673 * be checked from reset_pending. If there is then, we need to wait for 6674 * hardware to complete reset. 6675 * a. If we are able to figure out in reasonable time that hardware 6676 * has fully resetted then, we can proceed with driver, client 6677 * reset. 6678 * b. else, we can come back later to check this status so re-sched 6679 * now. 6680 */ 6681 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6682 if (reset_level != HNS3_NONE_RESET) { 6683 hns3_clock_gettime(&tv_start); 6684 ret = hns3_reset_process(hns, reset_level); 6685 hns3_clock_gettime(&tv); 6686 timersub(&tv, &tv_start, &tv_delta); 6687 msec = hns3_clock_calctime_ms(&tv_delta); 6688 if (msec > HNS3_RESET_PROCESS_MS) 6689 hns3_err(hw, "%d handle long time delta %" PRIu64 6690 " ms time=%ld.%.6ld", 6691 hw->reset.level, msec, 6692 tv.tv_sec, tv.tv_usec); 6693 if (ret == -EAGAIN) 6694 return; 6695 } 6696 6697 /* Check if we got any *new* reset requests to be honored */ 6698 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6699 if (reset_level != HNS3_NONE_RESET) 6700 hns3_msix_process(hns, reset_level); 6701 } 6702 6703 static unsigned int 6704 hns3_get_speed_capa_num(uint16_t device_id) 6705 { 6706 unsigned int num; 6707 6708 switch (device_id) { 6709 case HNS3_DEV_ID_25GE: 6710 case HNS3_DEV_ID_25GE_RDMA: 6711 num = 2; 6712 break; 6713 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6714 case HNS3_DEV_ID_200G_RDMA: 6715 num = 1; 6716 break; 6717 default: 6718 num = 0; 6719 break; 6720 } 6721 6722 return num; 6723 } 6724 6725 static int 6726 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6727 uint16_t device_id) 6728 { 6729 switch (device_id) { 6730 case HNS3_DEV_ID_25GE: 6731 /* fallthrough */ 6732 case HNS3_DEV_ID_25GE_RDMA: 6733 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6734 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6735 6736 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6737 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6738 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6739 break; 6740 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6741 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6742 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6743 break; 6744 case HNS3_DEV_ID_200G_RDMA: 6745 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6746 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6747 break; 6748 default: 6749 return -ENOTSUP; 6750 } 6751 6752 return 0; 6753 } 6754 6755 static int 6756 hns3_fec_get_capability(struct rte_eth_dev *dev, 6757 struct rte_eth_fec_capa *speed_fec_capa, 6758 unsigned int num) 6759 { 6760 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6761 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6762 uint16_t device_id = pci_dev->id.device_id; 6763 unsigned int capa_num; 6764 int ret; 6765 6766 capa_num = hns3_get_speed_capa_num(device_id); 6767 if (capa_num == 0) { 6768 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6769 device_id); 6770 return -ENOTSUP; 6771 } 6772 6773 if (speed_fec_capa == NULL || num < capa_num) 6774 return capa_num; 6775 6776 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6777 if (ret) 6778 return -ENOTSUP; 6779 6780 return capa_num; 6781 } 6782 6783 static int 6784 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6785 { 6786 struct hns3_config_fec_cmd *req; 6787 struct hns3_cmd_desc desc; 6788 int ret; 6789 6790 /* 6791 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6792 * in device of link speed 6793 * below 10 Gbps. 6794 */ 6795 if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { 6796 *state = 0; 6797 return 0; 6798 } 6799 6800 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6801 req = (struct hns3_config_fec_cmd *)desc.data; 6802 ret = hns3_cmd_send(hw, &desc, 1); 6803 if (ret) { 6804 hns3_err(hw, "get current fec auto state failed, ret = %d", 6805 ret); 6806 return ret; 6807 } 6808 6809 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6810 return 0; 6811 } 6812 6813 static int 6814 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6815 { 6816 struct hns3_sfp_info_cmd *resp; 6817 uint32_t tmp_fec_capa; 6818 uint8_t auto_state; 6819 struct hns3_cmd_desc desc; 6820 int ret; 6821 6822 /* 6823 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6824 * configured FEC mode is returned. 6825 * If link is up, current FEC mode is returned. 6826 */ 6827 if (hw->mac.link_status == ETH_LINK_DOWN) { 6828 ret = get_current_fec_auto_state(hw, &auto_state); 6829 if (ret) 6830 return ret; 6831 6832 if (auto_state == 0x1) { 6833 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6834 return 0; 6835 } 6836 } 6837 6838 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6839 resp = (struct hns3_sfp_info_cmd *)desc.data; 6840 resp->query_type = HNS3_ACTIVE_QUERY; 6841 6842 ret = hns3_cmd_send(hw, &desc, 1); 6843 if (ret == -EOPNOTSUPP) { 6844 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6845 return ret; 6846 } else if (ret) { 6847 hns3_err(hw, "get FEC failed, ret = %d", ret); 6848 return ret; 6849 } 6850 6851 /* 6852 * FEC mode order defined in hns3 hardware is inconsistend with 6853 * that defined in the ethdev library. So the sequence needs 6854 * to be converted. 6855 */ 6856 switch (resp->active_fec) { 6857 case HNS3_HW_FEC_MODE_NOFEC: 6858 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6859 break; 6860 case HNS3_HW_FEC_MODE_BASER: 6861 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6862 break; 6863 case HNS3_HW_FEC_MODE_RS: 6864 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6865 break; 6866 default: 6867 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6868 break; 6869 } 6870 6871 *fec_capa = tmp_fec_capa; 6872 return 0; 6873 } 6874 6875 static int 6876 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6877 { 6878 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6879 6880 return hns3_fec_get_internal(hw, fec_capa); 6881 } 6882 6883 static int 6884 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6885 { 6886 struct hns3_config_fec_cmd *req; 6887 struct hns3_cmd_desc desc; 6888 int ret; 6889 6890 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6891 6892 req = (struct hns3_config_fec_cmd *)desc.data; 6893 switch (mode) { 6894 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6895 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6896 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6897 break; 6898 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6899 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6900 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6901 break; 6902 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6903 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6904 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6905 break; 6906 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6907 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6908 break; 6909 default: 6910 return 0; 6911 } 6912 ret = hns3_cmd_send(hw, &desc, 1); 6913 if (ret) 6914 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6915 6916 return ret; 6917 } 6918 6919 static uint32_t 6920 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6921 { 6922 struct hns3_mac *mac = &hw->mac; 6923 uint32_t cur_capa; 6924 6925 switch (mac->link_speed) { 6926 case ETH_SPEED_NUM_10G: 6927 cur_capa = fec_capa[1].capa; 6928 break; 6929 case ETH_SPEED_NUM_25G: 6930 case ETH_SPEED_NUM_100G: 6931 case ETH_SPEED_NUM_200G: 6932 cur_capa = fec_capa[0].capa; 6933 break; 6934 default: 6935 cur_capa = 0; 6936 break; 6937 } 6938 6939 return cur_capa; 6940 } 6941 6942 static bool 6943 is_fec_mode_one_bit_set(uint32_t mode) 6944 { 6945 int cnt = 0; 6946 uint8_t i; 6947 6948 for (i = 0; i < sizeof(mode); i++) 6949 if (mode >> i & 0x1) 6950 cnt++; 6951 6952 return cnt == 1 ? true : false; 6953 } 6954 6955 static int 6956 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6957 { 6958 #define FEC_CAPA_NUM 2 6959 struct hns3_adapter *hns = dev->data->dev_private; 6960 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6961 struct hns3_pf *pf = &hns->pf; 6962 6963 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6964 uint32_t cur_capa; 6965 uint32_t num = FEC_CAPA_NUM; 6966 int ret; 6967 6968 ret = hns3_fec_get_capability(dev, fec_capa, num); 6969 if (ret < 0) 6970 return ret; 6971 6972 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 6973 if (!is_fec_mode_one_bit_set(mode)) { 6974 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 6975 "FEC mode should be only one bit set", mode); 6976 return -EINVAL; 6977 } 6978 6979 /* 6980 * Check whether the configured mode is within the FEC capability. 6981 * If not, the configured mode will not be supported. 6982 */ 6983 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6984 if (!(cur_capa & mode)) { 6985 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6986 return -EINVAL; 6987 } 6988 6989 rte_spinlock_lock(&hw->lock); 6990 ret = hns3_set_fec_hw(hw, mode); 6991 if (ret) { 6992 rte_spinlock_unlock(&hw->lock); 6993 return ret; 6994 } 6995 6996 pf->fec_mode = mode; 6997 rte_spinlock_unlock(&hw->lock); 6998 6999 return 0; 7000 } 7001 7002 static int 7003 hns3_restore_fec(struct hns3_hw *hw) 7004 { 7005 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7006 struct hns3_pf *pf = &hns->pf; 7007 uint32_t mode = pf->fec_mode; 7008 int ret; 7009 7010 ret = hns3_set_fec_hw(hw, mode); 7011 if (ret) 7012 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 7013 mode, ret); 7014 7015 return ret; 7016 } 7017 7018 static int 7019 hns3_query_dev_fec_info(struct hns3_hw *hw) 7020 { 7021 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7022 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 7023 int ret; 7024 7025 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 7026 if (ret) 7027 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 7028 7029 return ret; 7030 } 7031 7032 static bool 7033 hns3_optical_module_existed(struct hns3_hw *hw) 7034 { 7035 struct hns3_cmd_desc desc; 7036 bool existed; 7037 int ret; 7038 7039 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 7040 ret = hns3_cmd_send(hw, &desc, 1); 7041 if (ret) { 7042 hns3_err(hw, 7043 "fail to get optical module exist state, ret = %d.\n", 7044 ret); 7045 return false; 7046 } 7047 existed = !!desc.data[0]; 7048 7049 return existed; 7050 } 7051 7052 static int 7053 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 7054 uint32_t len, uint8_t *data) 7055 { 7056 #define HNS3_SFP_INFO_CMD_NUM 6 7057 #define HNS3_SFP_INFO_MAX_LEN \ 7058 (HNS3_SFP_INFO_BD0_LEN + \ 7059 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 7060 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 7061 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 7062 uint16_t read_len; 7063 uint16_t copy_len; 7064 int ret; 7065 int i; 7066 7067 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7068 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 7069 true); 7070 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 7071 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 7072 } 7073 7074 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7075 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7076 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7077 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7078 7079 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7080 if (ret) { 7081 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7082 ret); 7083 return ret; 7084 } 7085 7086 /* The data format in BD0 is different with the others. */ 7087 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7088 memcpy(data, sfp_info_bd0->data, copy_len); 7089 read_len = copy_len; 7090 7091 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7092 if (read_len >= len) 7093 break; 7094 7095 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7096 memcpy(data + read_len, desc[i].data, copy_len); 7097 read_len += copy_len; 7098 } 7099 7100 return (int)read_len; 7101 } 7102 7103 static int 7104 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7105 struct rte_dev_eeprom_info *info) 7106 { 7107 struct hns3_adapter *hns = dev->data->dev_private; 7108 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7109 uint32_t offset = info->offset; 7110 uint32_t len = info->length; 7111 uint8_t *data = info->data; 7112 uint32_t read_len = 0; 7113 7114 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7115 return -ENOTSUP; 7116 7117 if (!hns3_optical_module_existed(hw)) { 7118 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7119 return -EIO; 7120 } 7121 7122 while (read_len < len) { 7123 int ret; 7124 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7125 len - read_len, 7126 data + read_len); 7127 if (ret < 0) 7128 return -EIO; 7129 read_len += ret; 7130 } 7131 7132 return 0; 7133 } 7134 7135 static int 7136 hns3_get_module_info(struct rte_eth_dev *dev, 7137 struct rte_eth_dev_module_info *modinfo) 7138 { 7139 #define HNS3_SFF8024_ID_SFP 0x03 7140 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7141 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7142 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7143 #define HNS3_SFF_8636_V1_3 0x03 7144 struct hns3_adapter *hns = dev->data->dev_private; 7145 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7146 struct rte_dev_eeprom_info info; 7147 struct hns3_sfp_type sfp_type; 7148 int ret; 7149 7150 memset(&sfp_type, 0, sizeof(sfp_type)); 7151 memset(&info, 0, sizeof(info)); 7152 info.data = (uint8_t *)&sfp_type; 7153 info.length = sizeof(sfp_type); 7154 ret = hns3_get_module_eeprom(dev, &info); 7155 if (ret) 7156 return ret; 7157 7158 switch (sfp_type.type) { 7159 case HNS3_SFF8024_ID_SFP: 7160 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7161 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7162 break; 7163 case HNS3_SFF8024_ID_QSFP_8438: 7164 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7165 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7166 break; 7167 case HNS3_SFF8024_ID_QSFP_8436_8636: 7168 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7169 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7170 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7171 } else { 7172 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7173 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7174 } 7175 break; 7176 case HNS3_SFF8024_ID_QSFP28_8636: 7177 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7178 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7179 break; 7180 default: 7181 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7182 sfp_type.type, sfp_type.ext_type); 7183 return -EINVAL; 7184 } 7185 7186 return 0; 7187 } 7188 7189 void 7190 hns3_clock_gettime(struct timeval *tv) 7191 { 7192 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 7193 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW 7194 #else 7195 #define CLOCK_TYPE CLOCK_MONOTONIC 7196 #endif 7197 #define NSEC_TO_USEC_DIV 1000 7198 7199 struct timespec spec; 7200 (void)clock_gettime(CLOCK_TYPE, &spec); 7201 7202 tv->tv_sec = spec.tv_sec; 7203 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; 7204 } 7205 7206 uint64_t 7207 hns3_clock_calctime_ms(struct timeval *tv) 7208 { 7209 return (uint64_t)tv->tv_sec * MSEC_PER_SEC + 7210 tv->tv_usec / USEC_PER_MSEC; 7211 } 7212 7213 uint64_t 7214 hns3_clock_gettime_ms(void) 7215 { 7216 struct timeval tv; 7217 7218 hns3_clock_gettime(&tv); 7219 return hns3_clock_calctime_ms(&tv); 7220 } 7221 7222 static int 7223 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7224 { 7225 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7226 7227 RTE_SET_USED(key); 7228 7229 if (strcmp(value, "vec") == 0) 7230 hint = HNS3_IO_FUNC_HINT_VEC; 7231 else if (strcmp(value, "sve") == 0) 7232 hint = HNS3_IO_FUNC_HINT_SVE; 7233 else if (strcmp(value, "simple") == 0) 7234 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7235 else if (strcmp(value, "common") == 0) 7236 hint = HNS3_IO_FUNC_HINT_COMMON; 7237 7238 /* If the hint is valid then update output parameters */ 7239 if (hint != HNS3_IO_FUNC_HINT_NONE) 7240 *(uint32_t *)extra_args = hint; 7241 7242 return 0; 7243 } 7244 7245 static const char * 7246 hns3_get_io_hint_func_name(uint32_t hint) 7247 { 7248 switch (hint) { 7249 case HNS3_IO_FUNC_HINT_VEC: 7250 return "vec"; 7251 case HNS3_IO_FUNC_HINT_SVE: 7252 return "sve"; 7253 case HNS3_IO_FUNC_HINT_SIMPLE: 7254 return "simple"; 7255 case HNS3_IO_FUNC_HINT_COMMON: 7256 return "common"; 7257 default: 7258 return "none"; 7259 } 7260 } 7261 7262 static int 7263 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) 7264 { 7265 uint64_t val; 7266 7267 RTE_SET_USED(key); 7268 7269 val = strtoull(value, NULL, 16); 7270 *(uint64_t *)extra_args = val; 7271 7272 return 0; 7273 } 7274 7275 void 7276 hns3_parse_devargs(struct rte_eth_dev *dev) 7277 { 7278 struct hns3_adapter *hns = dev->data->dev_private; 7279 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7280 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7281 struct hns3_hw *hw = &hns->hw; 7282 uint64_t dev_caps_mask = 0; 7283 struct rte_kvargs *kvlist; 7284 7285 if (dev->device->devargs == NULL) 7286 return; 7287 7288 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7289 if (!kvlist) 7290 return; 7291 7292 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7293 &hns3_parse_io_hint_func, &rx_func_hint); 7294 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7295 &hns3_parse_io_hint_func, &tx_func_hint); 7296 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, 7297 &hns3_parse_dev_caps_mask, &dev_caps_mask); 7298 rte_kvargs_free(kvlist); 7299 7300 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7301 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7302 hns3_get_io_hint_func_name(rx_func_hint)); 7303 hns->rx_func_hint = rx_func_hint; 7304 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7305 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7306 hns3_get_io_hint_func_name(tx_func_hint)); 7307 hns->tx_func_hint = tx_func_hint; 7308 7309 if (dev_caps_mask != 0) 7310 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", 7311 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); 7312 hns->dev_caps_mask = dev_caps_mask; 7313 } 7314 7315 static const struct eth_dev_ops hns3_eth_dev_ops = { 7316 .dev_configure = hns3_dev_configure, 7317 .dev_start = hns3_dev_start, 7318 .dev_stop = hns3_dev_stop, 7319 .dev_close = hns3_dev_close, 7320 .promiscuous_enable = hns3_dev_promiscuous_enable, 7321 .promiscuous_disable = hns3_dev_promiscuous_disable, 7322 .allmulticast_enable = hns3_dev_allmulticast_enable, 7323 .allmulticast_disable = hns3_dev_allmulticast_disable, 7324 .mtu_set = hns3_dev_mtu_set, 7325 .stats_get = hns3_stats_get, 7326 .stats_reset = hns3_stats_reset, 7327 .xstats_get = hns3_dev_xstats_get, 7328 .xstats_get_names = hns3_dev_xstats_get_names, 7329 .xstats_reset = hns3_dev_xstats_reset, 7330 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7331 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7332 .dev_infos_get = hns3_dev_infos_get, 7333 .fw_version_get = hns3_fw_version_get, 7334 .rx_queue_setup = hns3_rx_queue_setup, 7335 .tx_queue_setup = hns3_tx_queue_setup, 7336 .rx_queue_release = hns3_dev_rx_queue_release, 7337 .tx_queue_release = hns3_dev_tx_queue_release, 7338 .rx_queue_start = hns3_dev_rx_queue_start, 7339 .rx_queue_stop = hns3_dev_rx_queue_stop, 7340 .tx_queue_start = hns3_dev_tx_queue_start, 7341 .tx_queue_stop = hns3_dev_tx_queue_stop, 7342 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7343 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7344 .rxq_info_get = hns3_rxq_info_get, 7345 .txq_info_get = hns3_txq_info_get, 7346 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7347 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7348 .flow_ctrl_get = hns3_flow_ctrl_get, 7349 .flow_ctrl_set = hns3_flow_ctrl_set, 7350 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7351 .mac_addr_add = hns3_add_mac_addr, 7352 .mac_addr_remove = hns3_remove_mac_addr, 7353 .mac_addr_set = hns3_set_default_mac_addr, 7354 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7355 .link_update = hns3_dev_link_update, 7356 .rss_hash_update = hns3_dev_rss_hash_update, 7357 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7358 .reta_update = hns3_dev_rss_reta_update, 7359 .reta_query = hns3_dev_rss_reta_query, 7360 .flow_ops_get = hns3_dev_flow_ops_get, 7361 .vlan_filter_set = hns3_vlan_filter_set, 7362 .vlan_tpid_set = hns3_vlan_tpid_set, 7363 .vlan_offload_set = hns3_vlan_offload_set, 7364 .vlan_pvid_set = hns3_vlan_pvid_set, 7365 .get_reg = hns3_get_regs, 7366 .get_module_info = hns3_get_module_info, 7367 .get_module_eeprom = hns3_get_module_eeprom, 7368 .get_dcb_info = hns3_get_dcb_info, 7369 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7370 .fec_get_capability = hns3_fec_get_capability, 7371 .fec_get = hns3_fec_get, 7372 .fec_set = hns3_fec_set, 7373 .tm_ops_get = hns3_tm_ops_get, 7374 .tx_done_cleanup = hns3_tx_done_cleanup, 7375 .timesync_enable = hns3_timesync_enable, 7376 .timesync_disable = hns3_timesync_disable, 7377 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7378 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7379 .timesync_adjust_time = hns3_timesync_adjust_time, 7380 .timesync_read_time = hns3_timesync_read_time, 7381 .timesync_write_time = hns3_timesync_write_time, 7382 }; 7383 7384 static const struct hns3_reset_ops hns3_reset_ops = { 7385 .reset_service = hns3_reset_service, 7386 .stop_service = hns3_stop_service, 7387 .prepare_reset = hns3_prepare_reset, 7388 .wait_hardware_ready = hns3_wait_hardware_ready, 7389 .reinit_dev = hns3_reinit_dev, 7390 .restore_conf = hns3_restore_conf, 7391 .start_service = hns3_start_service, 7392 }; 7393 7394 static int 7395 hns3_dev_init(struct rte_eth_dev *eth_dev) 7396 { 7397 struct hns3_adapter *hns = eth_dev->data->dev_private; 7398 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7399 struct rte_ether_addr *eth_addr; 7400 struct hns3_hw *hw = &hns->hw; 7401 int ret; 7402 7403 PMD_INIT_FUNC_TRACE(); 7404 7405 eth_dev->process_private = (struct hns3_process_private *) 7406 rte_zmalloc_socket("hns3_filter_list", 7407 sizeof(struct hns3_process_private), 7408 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 7409 if (eth_dev->process_private == NULL) { 7410 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 7411 return -ENOMEM; 7412 } 7413 7414 hns3_flow_init(eth_dev); 7415 7416 hns3_set_rxtx_function(eth_dev); 7417 eth_dev->dev_ops = &hns3_eth_dev_ops; 7418 eth_dev->rx_queue_count = hns3_rx_queue_count; 7419 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7420 ret = hns3_mp_init_secondary(); 7421 if (ret) { 7422 PMD_INIT_LOG(ERR, "Failed to init for secondary " 7423 "process, ret = %d", ret); 7424 goto err_mp_init_secondary; 7425 } 7426 hw->secondary_cnt++; 7427 hns3_tx_push_init(eth_dev); 7428 return 0; 7429 } 7430 7431 ret = hns3_mp_init_primary(); 7432 if (ret) { 7433 PMD_INIT_LOG(ERR, 7434 "Failed to init for primary process, ret = %d", 7435 ret); 7436 goto err_mp_init_primary; 7437 } 7438 7439 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7440 hns->is_vf = false; 7441 hw->data = eth_dev->data; 7442 hns3_parse_devargs(eth_dev); 7443 7444 /* 7445 * Set default max packet size according to the mtu 7446 * default vale in DPDK frame. 7447 */ 7448 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7449 7450 ret = hns3_reset_init(hw); 7451 if (ret) 7452 goto err_init_reset; 7453 hw->reset.ops = &hns3_reset_ops; 7454 7455 ret = hns3_init_pf(eth_dev); 7456 if (ret) { 7457 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7458 goto err_init_pf; 7459 } 7460 7461 /* Allocate memory for storing MAC addresses */ 7462 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7463 sizeof(struct rte_ether_addr) * 7464 HNS3_UC_MACADDR_NUM, 0); 7465 if (eth_dev->data->mac_addrs == NULL) { 7466 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7467 "to store MAC addresses", 7468 sizeof(struct rte_ether_addr) * 7469 HNS3_UC_MACADDR_NUM); 7470 ret = -ENOMEM; 7471 goto err_rte_zmalloc; 7472 } 7473 7474 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7475 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7476 rte_eth_random_addr(hw->mac.mac_addr); 7477 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7478 (struct rte_ether_addr *)hw->mac.mac_addr); 7479 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7480 "unicast address, using random MAC address %s", 7481 mac_str); 7482 } 7483 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7484 ð_dev->data->mac_addrs[0]); 7485 7486 hw->adapter_state = HNS3_NIC_INITIALIZED; 7487 7488 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7489 SCHEDULE_PENDING) { 7490 hns3_err(hw, "Reschedule reset service after dev_init"); 7491 hns3_schedule_reset(hns); 7492 } else { 7493 /* IMP will wait ready flag before reset */ 7494 hns3_notify_reset_ready(hw, false); 7495 } 7496 7497 hns3_info(hw, "hns3 dev initialization successful!"); 7498 return 0; 7499 7500 err_rte_zmalloc: 7501 hns3_uninit_pf(eth_dev); 7502 7503 err_init_pf: 7504 rte_free(hw->reset.wait_data); 7505 7506 err_init_reset: 7507 hns3_mp_uninit_primary(); 7508 7509 err_mp_init_primary: 7510 err_mp_init_secondary: 7511 eth_dev->dev_ops = NULL; 7512 eth_dev->rx_pkt_burst = NULL; 7513 eth_dev->rx_descriptor_status = NULL; 7514 eth_dev->tx_pkt_burst = NULL; 7515 eth_dev->tx_pkt_prepare = NULL; 7516 eth_dev->tx_descriptor_status = NULL; 7517 rte_free(eth_dev->process_private); 7518 eth_dev->process_private = NULL; 7519 return ret; 7520 } 7521 7522 static int 7523 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7524 { 7525 struct hns3_adapter *hns = eth_dev->data->dev_private; 7526 struct hns3_hw *hw = &hns->hw; 7527 7528 PMD_INIT_FUNC_TRACE(); 7529 7530 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7531 rte_free(eth_dev->process_private); 7532 eth_dev->process_private = NULL; 7533 return 0; 7534 } 7535 7536 if (hw->adapter_state < HNS3_NIC_CLOSING) 7537 hns3_dev_close(eth_dev); 7538 7539 hw->adapter_state = HNS3_NIC_REMOVED; 7540 return 0; 7541 } 7542 7543 static int 7544 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7545 struct rte_pci_device *pci_dev) 7546 { 7547 return rte_eth_dev_pci_generic_probe(pci_dev, 7548 sizeof(struct hns3_adapter), 7549 hns3_dev_init); 7550 } 7551 7552 static int 7553 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7554 { 7555 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7556 } 7557 7558 static const struct rte_pci_id pci_id_hns3_map[] = { 7559 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7560 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7561 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7562 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7563 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7564 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7565 { .vendor_id = 0, }, /* sentinel */ 7566 }; 7567 7568 static struct rte_pci_driver rte_hns3_pmd = { 7569 .id_table = pci_id_hns3_map, 7570 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7571 .probe = eth_hns3_pci_probe, 7572 .remove = eth_hns3_pci_remove, 7573 }; 7574 7575 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7576 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7577 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7578 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7579 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7580 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 7581 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "); 7582 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 7583 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 7584