1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 107 void hns3_ether_format_addr(char *buf, uint16_t size, 108 const struct rte_ether_addr *ether_addr) 109 { 110 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 111 ether_addr->addr_bytes[0], 112 ether_addr->addr_bytes[4], 113 ether_addr->addr_bytes[5]); 114 } 115 116 static void 117 hns3_pf_disable_irq0(struct hns3_hw *hw) 118 { 119 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 120 } 121 122 static void 123 hns3_pf_enable_irq0(struct hns3_hw *hw) 124 { 125 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 126 } 127 128 static enum hns3_evt_cause 129 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 130 uint32_t *vec_val) 131 { 132 struct hns3_hw *hw = &hns->hw; 133 134 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 135 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 136 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 137 if (!is_delay) { 138 hw->reset.stats.imp_cnt++; 139 hns3_warn(hw, "IMP reset detected, clear reset status"); 140 } else { 141 hns3_schedule_delayed_reset(hns); 142 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 143 } 144 145 return HNS3_VECTOR0_EVENT_RST; 146 } 147 148 static enum hns3_evt_cause 149 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 150 uint32_t *vec_val) 151 { 152 struct hns3_hw *hw = &hns->hw; 153 154 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 155 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 156 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 157 if (!is_delay) { 158 hw->reset.stats.global_cnt++; 159 hns3_warn(hw, "Global reset detected, clear reset status"); 160 } else { 161 hns3_schedule_delayed_reset(hns); 162 hns3_warn(hw, 163 "Global reset detected, don't clear reset status"); 164 } 165 166 return HNS3_VECTOR0_EVENT_RST; 167 } 168 169 static enum hns3_evt_cause 170 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 171 { 172 struct hns3_hw *hw = &hns->hw; 173 uint32_t vector0_int_stats; 174 uint32_t cmdq_src_val; 175 uint32_t hw_err_src_reg; 176 uint32_t val; 177 enum hns3_evt_cause ret; 178 bool is_delay; 179 180 /* fetch the events from their corresponding regs */ 181 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 182 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 183 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 184 185 is_delay = clearval == NULL ? true : false; 186 /* 187 * Assumption: If by any chance reset and mailbox events are reported 188 * together then we will only process reset event and defer the 189 * processing of the mailbox events. Since, we would have not cleared 190 * RX CMDQ event this time we would receive again another interrupt 191 * from H/W just for the mailbox. 192 */ 193 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 194 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 195 goto out; 196 } 197 198 /* Global reset */ 199 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 200 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 201 goto out; 202 } 203 204 /* Check for vector0 1588 event source */ 205 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 206 val = BIT(HNS3_VECTOR0_1588_INT_B); 207 ret = HNS3_VECTOR0_EVENT_PTP; 208 goto out; 209 } 210 211 /* check for vector0 msix event source */ 212 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 213 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 214 val = vector0_int_stats | hw_err_src_reg; 215 ret = HNS3_VECTOR0_EVENT_ERR; 216 goto out; 217 } 218 219 /* check for vector0 mailbox(=CMDQ RX) event source */ 220 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 221 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 222 val = cmdq_src_val; 223 ret = HNS3_VECTOR0_EVENT_MBX; 224 goto out; 225 } 226 227 val = vector0_int_stats; 228 ret = HNS3_VECTOR0_EVENT_OTHER; 229 out: 230 231 if (clearval) 232 *clearval = val; 233 return ret; 234 } 235 236 static bool 237 hns3_is_1588_event_type(uint32_t event_type) 238 { 239 return (event_type == HNS3_VECTOR0_EVENT_PTP); 240 } 241 242 static void 243 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 244 { 245 if (event_type == HNS3_VECTOR0_EVENT_RST || 246 hns3_is_1588_event_type(event_type)) 247 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 248 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 249 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 250 } 251 252 static void 253 hns3_clear_all_event_cause(struct hns3_hw *hw) 254 { 255 uint32_t vector0_int_stats; 256 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 257 258 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 259 hns3_warn(hw, "Probe during IMP reset interrupt"); 260 261 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 262 hns3_warn(hw, "Probe during Global reset interrupt"); 263 264 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 265 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 266 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 267 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 268 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 269 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 270 BIT(HNS3_VECTOR0_1588_INT_B)); 271 } 272 273 static void 274 hns3_handle_mac_tnl(struct hns3_hw *hw) 275 { 276 struct hns3_cmd_desc desc; 277 uint32_t status; 278 int ret; 279 280 /* query and clear mac tnl interrupt */ 281 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 282 ret = hns3_cmd_send(hw, &desc, 1); 283 if (ret) { 284 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 285 return; 286 } 287 288 status = rte_le_to_cpu_32(desc.data[0]); 289 if (status) { 290 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 291 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 292 false); 293 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 294 ret = hns3_cmd_send(hw, &desc, 1); 295 if (ret) 296 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 297 ret); 298 } 299 } 300 301 static void 302 hns3_interrupt_handler(void *param) 303 { 304 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 305 struct hns3_adapter *hns = dev->data->dev_private; 306 struct hns3_hw *hw = &hns->hw; 307 enum hns3_evt_cause event_cause; 308 uint32_t clearval = 0; 309 uint32_t vector0_int; 310 uint32_t ras_int; 311 uint32_t cmdq_int; 312 313 /* Disable interrupt */ 314 hns3_pf_disable_irq0(hw); 315 316 event_cause = hns3_check_event_cause(hns, &clearval); 317 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 318 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 319 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 320 /* vector 0 interrupt is shared with reset and mailbox source events. */ 321 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 322 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 323 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 324 vector0_int, ras_int, cmdq_int); 325 hns3_handle_mac_tnl(hw); 326 hns3_handle_error(hns); 327 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 328 hns3_warn(hw, "received reset interrupt"); 329 hns3_schedule_reset(hns); 330 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 331 hns3_dev_handle_mbx_msg(hw); 332 } else { 333 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 334 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 335 vector0_int, ras_int, cmdq_int); 336 } 337 338 hns3_clear_event_cause(hw, event_cause, clearval); 339 /* Enable interrupt if it is not cause by reset */ 340 hns3_pf_enable_irq0(hw); 341 } 342 343 static int 344 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 345 { 346 #define HNS3_VLAN_ID_OFFSET_STEP 160 347 #define HNS3_VLAN_BYTE_SIZE 8 348 struct hns3_vlan_filter_pf_cfg_cmd *req; 349 struct hns3_hw *hw = &hns->hw; 350 uint8_t vlan_offset_byte_val; 351 struct hns3_cmd_desc desc; 352 uint8_t vlan_offset_byte; 353 uint8_t vlan_offset_base; 354 int ret; 355 356 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 357 358 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 359 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 360 HNS3_VLAN_BYTE_SIZE; 361 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 362 363 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 364 req->vlan_offset = vlan_offset_base; 365 req->vlan_cfg = on ? 0 : 1; 366 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 367 368 ret = hns3_cmd_send(hw, &desc, 1); 369 if (ret) 370 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 371 vlan_id, ret); 372 373 return ret; 374 } 375 376 static void 377 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 378 { 379 struct hns3_user_vlan_table *vlan_entry; 380 struct hns3_pf *pf = &hns->pf; 381 382 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 383 if (vlan_entry->vlan_id == vlan_id) { 384 if (vlan_entry->hd_tbl_status) 385 hns3_set_port_vlan_filter(hns, vlan_id, 0); 386 LIST_REMOVE(vlan_entry, next); 387 rte_free(vlan_entry); 388 break; 389 } 390 } 391 } 392 393 static void 394 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 395 bool writen_to_tbl) 396 { 397 struct hns3_user_vlan_table *vlan_entry; 398 struct hns3_hw *hw = &hns->hw; 399 struct hns3_pf *pf = &hns->pf; 400 401 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 402 if (vlan_entry->vlan_id == vlan_id) 403 return; 404 } 405 406 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 407 if (vlan_entry == NULL) { 408 hns3_err(hw, "Failed to malloc hns3 vlan table"); 409 return; 410 } 411 412 vlan_entry->hd_tbl_status = writen_to_tbl; 413 vlan_entry->vlan_id = vlan_id; 414 415 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 416 } 417 418 static int 419 hns3_restore_vlan_table(struct hns3_adapter *hns) 420 { 421 struct hns3_user_vlan_table *vlan_entry; 422 struct hns3_hw *hw = &hns->hw; 423 struct hns3_pf *pf = &hns->pf; 424 uint16_t vlan_id; 425 int ret = 0; 426 427 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 428 return hns3_vlan_pvid_configure(hns, 429 hw->port_base_vlan_cfg.pvid, 1); 430 431 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 432 if (vlan_entry->hd_tbl_status) { 433 vlan_id = vlan_entry->vlan_id; 434 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 435 if (ret) 436 break; 437 } 438 } 439 440 return ret; 441 } 442 443 static int 444 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 445 { 446 struct hns3_hw *hw = &hns->hw; 447 bool writen_to_tbl = false; 448 int ret = 0; 449 450 /* 451 * When vlan filter is enabled, hardware regards packets without vlan 452 * as packets with vlan 0. So, to receive packets without vlan, vlan id 453 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 454 */ 455 if (on == 0 && vlan_id == 0) 456 return 0; 457 458 /* 459 * When port base vlan enabled, we use port base vlan as the vlan 460 * filter condition. In this case, we don't update vlan filter table 461 * when user add new vlan or remove exist vlan, just update the 462 * vlan list. The vlan id in vlan list will be written in vlan filter 463 * table until port base vlan disabled 464 */ 465 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 466 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 467 writen_to_tbl = true; 468 } 469 470 if (ret == 0) { 471 if (on) 472 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 473 else 474 hns3_rm_dev_vlan_table(hns, vlan_id); 475 } 476 return ret; 477 } 478 479 static int 480 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 481 { 482 struct hns3_adapter *hns = dev->data->dev_private; 483 struct hns3_hw *hw = &hns->hw; 484 int ret; 485 486 rte_spinlock_lock(&hw->lock); 487 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 488 rte_spinlock_unlock(&hw->lock); 489 return ret; 490 } 491 492 static int 493 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 494 uint16_t tpid) 495 { 496 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 497 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 498 struct hns3_hw *hw = &hns->hw; 499 struct hns3_cmd_desc desc; 500 int ret; 501 502 if ((vlan_type != ETH_VLAN_TYPE_INNER && 503 vlan_type != ETH_VLAN_TYPE_OUTER)) { 504 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 505 return -EINVAL; 506 } 507 508 if (tpid != RTE_ETHER_TYPE_VLAN) { 509 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 510 return -EINVAL; 511 } 512 513 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 514 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 515 516 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 517 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 518 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 519 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 520 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 521 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 522 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 524 } 525 526 ret = hns3_cmd_send(hw, &desc, 1); 527 if (ret) { 528 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 529 ret); 530 return ret; 531 } 532 533 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 534 535 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 536 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 537 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 538 539 ret = hns3_cmd_send(hw, &desc, 1); 540 if (ret) 541 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 542 ret); 543 return ret; 544 } 545 546 static int 547 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 548 uint16_t tpid) 549 { 550 struct hns3_adapter *hns = dev->data->dev_private; 551 struct hns3_hw *hw = &hns->hw; 552 int ret; 553 554 rte_spinlock_lock(&hw->lock); 555 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 556 rte_spinlock_unlock(&hw->lock); 557 return ret; 558 } 559 560 static int 561 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 562 struct hns3_rx_vtag_cfg *vcfg) 563 { 564 struct hns3_vport_vtag_rx_cfg_cmd *req; 565 struct hns3_hw *hw = &hns->hw; 566 struct hns3_cmd_desc desc; 567 uint16_t vport_id; 568 uint8_t bitmap; 569 int ret; 570 571 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 572 573 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 574 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 575 vcfg->strip_tag1_en ? 1 : 0); 576 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 577 vcfg->strip_tag2_en ? 1 : 0); 578 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 579 vcfg->vlan1_vlan_prionly ? 1 : 0); 580 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 581 vcfg->vlan2_vlan_prionly ? 1 : 0); 582 583 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 584 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 585 vcfg->strip_tag1_discard_en ? 1 : 0); 586 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 587 vcfg->strip_tag2_discard_en ? 1 : 0); 588 /* 589 * In current version VF is not supported when PF is driven by DPDK 590 * driver, just need to configure parameters for PF vport. 591 */ 592 vport_id = HNS3_PF_FUNC_ID; 593 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 594 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 595 req->vf_bitmap[req->vf_offset] = bitmap; 596 597 ret = hns3_cmd_send(hw, &desc, 1); 598 if (ret) 599 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 600 return ret; 601 } 602 603 static void 604 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 605 struct hns3_rx_vtag_cfg *vcfg) 606 { 607 struct hns3_pf *pf = &hns->pf; 608 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 609 } 610 611 static void 612 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 613 struct hns3_tx_vtag_cfg *vcfg) 614 { 615 struct hns3_pf *pf = &hns->pf; 616 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 617 } 618 619 static int 620 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 621 { 622 struct hns3_rx_vtag_cfg rxvlan_cfg; 623 struct hns3_hw *hw = &hns->hw; 624 int ret; 625 626 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 627 rxvlan_cfg.strip_tag1_en = false; 628 rxvlan_cfg.strip_tag2_en = enable; 629 rxvlan_cfg.strip_tag2_discard_en = false; 630 } else { 631 rxvlan_cfg.strip_tag1_en = enable; 632 rxvlan_cfg.strip_tag2_en = true; 633 rxvlan_cfg.strip_tag2_discard_en = true; 634 } 635 636 rxvlan_cfg.strip_tag1_discard_en = false; 637 rxvlan_cfg.vlan1_vlan_prionly = false; 638 rxvlan_cfg.vlan2_vlan_prionly = false; 639 rxvlan_cfg.rx_vlan_offload_en = enable; 640 641 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 642 if (ret) { 643 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret); 644 return ret; 645 } 646 647 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 648 649 return ret; 650 } 651 652 static int 653 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 654 uint8_t fe_type, bool filter_en, uint8_t vf_id) 655 { 656 struct hns3_vlan_filter_ctrl_cmd *req; 657 struct hns3_cmd_desc desc; 658 int ret; 659 660 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 661 662 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 663 req->vlan_type = vlan_type; 664 req->vlan_fe = filter_en ? fe_type : 0; 665 req->vf_id = vf_id; 666 667 ret = hns3_cmd_send(hw, &desc, 1); 668 if (ret) 669 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 670 671 return ret; 672 } 673 674 static int 675 hns3_vlan_filter_init(struct hns3_adapter *hns) 676 { 677 struct hns3_hw *hw = &hns->hw; 678 int ret; 679 680 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 681 HNS3_FILTER_FE_EGRESS, false, 682 HNS3_PF_FUNC_ID); 683 if (ret) { 684 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 685 return ret; 686 } 687 688 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 689 HNS3_FILTER_FE_INGRESS, false, 690 HNS3_PF_FUNC_ID); 691 if (ret) 692 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 693 694 return ret; 695 } 696 697 static int 698 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 699 { 700 struct hns3_hw *hw = &hns->hw; 701 int ret; 702 703 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 704 HNS3_FILTER_FE_INGRESS, enable, 705 HNS3_PF_FUNC_ID); 706 if (ret) 707 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 708 enable ? "enable" : "disable", ret); 709 710 return ret; 711 } 712 713 static int 714 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 715 { 716 struct hns3_adapter *hns = dev->data->dev_private; 717 struct hns3_hw *hw = &hns->hw; 718 struct rte_eth_rxmode *rxmode; 719 unsigned int tmp_mask; 720 bool enable; 721 int ret = 0; 722 723 rte_spinlock_lock(&hw->lock); 724 rxmode = &dev->data->dev_conf.rxmode; 725 tmp_mask = (unsigned int)mask; 726 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 727 /* ignore vlan filter configuration during promiscuous mode */ 728 if (!dev->data->promiscuous) { 729 /* Enable or disable VLAN filter */ 730 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 731 true : false; 732 733 ret = hns3_enable_vlan_filter(hns, enable); 734 if (ret) { 735 rte_spinlock_unlock(&hw->lock); 736 hns3_err(hw, "failed to %s rx filter, ret = %d", 737 enable ? "enable" : "disable", ret); 738 return ret; 739 } 740 } 741 } 742 743 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 744 /* Enable or disable VLAN stripping */ 745 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 746 true : false; 747 748 ret = hns3_en_hw_strip_rxvtag(hns, enable); 749 if (ret) { 750 rte_spinlock_unlock(&hw->lock); 751 hns3_err(hw, "failed to %s rx strip, ret = %d", 752 enable ? "enable" : "disable", ret); 753 return ret; 754 } 755 } 756 757 rte_spinlock_unlock(&hw->lock); 758 759 return ret; 760 } 761 762 static int 763 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 764 struct hns3_tx_vtag_cfg *vcfg) 765 { 766 struct hns3_vport_vtag_tx_cfg_cmd *req; 767 struct hns3_cmd_desc desc; 768 struct hns3_hw *hw = &hns->hw; 769 uint16_t vport_id; 770 uint8_t bitmap; 771 int ret; 772 773 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 774 775 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 776 req->def_vlan_tag1 = vcfg->default_tag1; 777 req->def_vlan_tag2 = vcfg->default_tag2; 778 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 779 vcfg->accept_tag1 ? 1 : 0); 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 781 vcfg->accept_untag1 ? 1 : 0); 782 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 783 vcfg->accept_tag2 ? 1 : 0); 784 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 785 vcfg->accept_untag2 ? 1 : 0); 786 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 787 vcfg->insert_tag1_en ? 1 : 0); 788 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 789 vcfg->insert_tag2_en ? 1 : 0); 790 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 791 792 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 793 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 794 vcfg->tag_shift_mode_en ? 1 : 0); 795 796 /* 797 * In current version VF is not supported when PF is driven by DPDK 798 * driver, just need to configure parameters for PF vport. 799 */ 800 vport_id = HNS3_PF_FUNC_ID; 801 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 802 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 803 req->vf_bitmap[req->vf_offset] = bitmap; 804 805 ret = hns3_cmd_send(hw, &desc, 1); 806 if (ret) 807 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 808 809 return ret; 810 } 811 812 static int 813 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 814 uint16_t pvid) 815 { 816 struct hns3_hw *hw = &hns->hw; 817 struct hns3_tx_vtag_cfg txvlan_cfg; 818 int ret; 819 820 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 821 txvlan_cfg.accept_tag1 = true; 822 txvlan_cfg.insert_tag1_en = false; 823 txvlan_cfg.default_tag1 = 0; 824 } else { 825 txvlan_cfg.accept_tag1 = 826 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 827 txvlan_cfg.insert_tag1_en = true; 828 txvlan_cfg.default_tag1 = pvid; 829 } 830 831 txvlan_cfg.accept_untag1 = true; 832 txvlan_cfg.accept_tag2 = true; 833 txvlan_cfg.accept_untag2 = true; 834 txvlan_cfg.insert_tag2_en = false; 835 txvlan_cfg.default_tag2 = 0; 836 txvlan_cfg.tag_shift_mode_en = true; 837 838 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 839 if (ret) { 840 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 841 ret); 842 return ret; 843 } 844 845 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 846 return ret; 847 } 848 849 850 static void 851 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 852 { 853 struct hns3_user_vlan_table *vlan_entry; 854 struct hns3_pf *pf = &hns->pf; 855 856 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 857 if (vlan_entry->hd_tbl_status) { 858 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 859 vlan_entry->hd_tbl_status = false; 860 } 861 } 862 863 if (is_del_list) { 864 vlan_entry = LIST_FIRST(&pf->vlan_list); 865 while (vlan_entry) { 866 LIST_REMOVE(vlan_entry, next); 867 rte_free(vlan_entry); 868 vlan_entry = LIST_FIRST(&pf->vlan_list); 869 } 870 } 871 } 872 873 static void 874 hns3_add_all_vlan_table(struct hns3_adapter *hns) 875 { 876 struct hns3_user_vlan_table *vlan_entry; 877 struct hns3_pf *pf = &hns->pf; 878 879 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 880 if (!vlan_entry->hd_tbl_status) { 881 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 882 vlan_entry->hd_tbl_status = true; 883 } 884 } 885 } 886 887 static void 888 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 889 { 890 struct hns3_hw *hw = &hns->hw; 891 int ret; 892 893 hns3_rm_all_vlan_table(hns, true); 894 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 895 ret = hns3_set_port_vlan_filter(hns, 896 hw->port_base_vlan_cfg.pvid, 0); 897 if (ret) { 898 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 899 ret); 900 return; 901 } 902 } 903 } 904 905 static int 906 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 907 uint16_t port_base_vlan_state, uint16_t new_pvid) 908 { 909 struct hns3_hw *hw = &hns->hw; 910 uint16_t old_pvid; 911 int ret; 912 913 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 914 old_pvid = hw->port_base_vlan_cfg.pvid; 915 if (old_pvid != HNS3_INVALID_PVID) { 916 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 917 if (ret) { 918 hns3_err(hw, "failed to remove old pvid %u, " 919 "ret = %d", old_pvid, ret); 920 return ret; 921 } 922 } 923 924 hns3_rm_all_vlan_table(hns, false); 925 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 926 if (ret) { 927 hns3_err(hw, "failed to add new pvid %u, ret = %d", 928 new_pvid, ret); 929 return ret; 930 } 931 } else { 932 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 933 if (ret) { 934 hns3_err(hw, "failed to remove pvid %u, ret = %d", 935 new_pvid, ret); 936 return ret; 937 } 938 939 hns3_add_all_vlan_table(hns); 940 } 941 return 0; 942 } 943 944 static int 945 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 946 { 947 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 948 struct hns3_rx_vtag_cfg rx_vlan_cfg; 949 bool rx_strip_en; 950 int ret; 951 952 rx_strip_en = old_cfg->rx_vlan_offload_en; 953 if (on) { 954 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 955 rx_vlan_cfg.strip_tag2_en = true; 956 rx_vlan_cfg.strip_tag2_discard_en = true; 957 } else { 958 rx_vlan_cfg.strip_tag1_en = false; 959 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 960 rx_vlan_cfg.strip_tag2_discard_en = false; 961 } 962 rx_vlan_cfg.strip_tag1_discard_en = false; 963 rx_vlan_cfg.vlan1_vlan_prionly = false; 964 rx_vlan_cfg.vlan2_vlan_prionly = false; 965 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 966 967 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 968 if (ret) 969 return ret; 970 971 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 972 return ret; 973 } 974 975 static int 976 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 977 { 978 struct hns3_hw *hw = &hns->hw; 979 uint16_t port_base_vlan_state; 980 int ret, err; 981 982 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 983 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 984 hns3_warn(hw, "Invalid operation! As current pvid set " 985 "is %u, disable pvid %u is invalid", 986 hw->port_base_vlan_cfg.pvid, pvid); 987 return 0; 988 } 989 990 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 991 HNS3_PORT_BASE_VLAN_DISABLE; 992 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 993 if (ret) { 994 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 995 ret); 996 return ret; 997 } 998 999 ret = hns3_en_pvid_strip(hns, on); 1000 if (ret) { 1001 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1002 "ret = %d", ret); 1003 goto pvid_vlan_strip_fail; 1004 } 1005 1006 if (pvid == HNS3_INVALID_PVID) 1007 goto out; 1008 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1009 if (ret) { 1010 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1011 ret); 1012 goto vlan_filter_set_fail; 1013 } 1014 1015 out: 1016 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1017 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1018 return ret; 1019 1020 vlan_filter_set_fail: 1021 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1022 HNS3_PORT_BASE_VLAN_ENABLE); 1023 if (err) 1024 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1025 1026 pvid_vlan_strip_fail: 1027 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1028 hw->port_base_vlan_cfg.pvid); 1029 if (err) 1030 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1031 1032 return ret; 1033 } 1034 1035 static int 1036 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1037 { 1038 struct hns3_adapter *hns = dev->data->dev_private; 1039 struct hns3_hw *hw = &hns->hw; 1040 bool pvid_en_state_change; 1041 uint16_t pvid_state; 1042 int ret; 1043 1044 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1045 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1046 RTE_ETHER_MAX_VLAN_ID); 1047 return -EINVAL; 1048 } 1049 1050 /* 1051 * If PVID configuration state change, should refresh the PVID 1052 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1053 */ 1054 pvid_state = hw->port_base_vlan_cfg.state; 1055 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1056 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1057 pvid_en_state_change = false; 1058 else 1059 pvid_en_state_change = true; 1060 1061 rte_spinlock_lock(&hw->lock); 1062 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1063 rte_spinlock_unlock(&hw->lock); 1064 if (ret) 1065 return ret; 1066 /* 1067 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1068 * need be processed by PMD driver. 1069 */ 1070 if (pvid_en_state_change && 1071 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1072 hns3_update_all_queues_pvid_proc_en(hw); 1073 1074 return 0; 1075 } 1076 1077 static int 1078 hns3_default_vlan_config(struct hns3_adapter *hns) 1079 { 1080 struct hns3_hw *hw = &hns->hw; 1081 int ret; 1082 1083 /* 1084 * When vlan filter is enabled, hardware regards packets without vlan 1085 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1086 * table, packets without vlan won't be received. So, add vlan 0 as 1087 * the default vlan. 1088 */ 1089 ret = hns3_vlan_filter_configure(hns, 0, 1); 1090 if (ret) 1091 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1092 return ret; 1093 } 1094 1095 static int 1096 hns3_init_vlan_config(struct hns3_adapter *hns) 1097 { 1098 struct hns3_hw *hw = &hns->hw; 1099 int ret; 1100 1101 /* 1102 * This function can be called in the initialization and reset process, 1103 * when in reset process, it means that hardware had been reseted 1104 * successfully and we need to restore the hardware configuration to 1105 * ensure that the hardware configuration remains unchanged before and 1106 * after reset. 1107 */ 1108 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1109 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1110 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1111 } 1112 1113 ret = hns3_vlan_filter_init(hns); 1114 if (ret) { 1115 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1116 return ret; 1117 } 1118 1119 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1120 RTE_ETHER_TYPE_VLAN); 1121 if (ret) { 1122 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1123 return ret; 1124 } 1125 1126 /* 1127 * When in the reinit dev stage of the reset process, the following 1128 * vlan-related configurations may differ from those at initialization, 1129 * we will restore configurations to hardware in hns3_restore_vlan_table 1130 * and hns3_restore_vlan_conf later. 1131 */ 1132 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1133 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1134 if (ret) { 1135 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1136 return ret; 1137 } 1138 1139 ret = hns3_en_hw_strip_rxvtag(hns, false); 1140 if (ret) { 1141 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1142 ret); 1143 return ret; 1144 } 1145 } 1146 1147 return hns3_default_vlan_config(hns); 1148 } 1149 1150 static int 1151 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1152 { 1153 struct hns3_pf *pf = &hns->pf; 1154 struct hns3_hw *hw = &hns->hw; 1155 uint64_t offloads; 1156 bool enable; 1157 int ret; 1158 1159 if (!hw->data->promiscuous) { 1160 /* restore vlan filter states */ 1161 offloads = hw->data->dev_conf.rxmode.offloads; 1162 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1163 ret = hns3_enable_vlan_filter(hns, enable); 1164 if (ret) { 1165 hns3_err(hw, "failed to restore vlan rx filter conf, " 1166 "ret = %d", ret); 1167 return ret; 1168 } 1169 } 1170 1171 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1172 if (ret) { 1173 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1174 return ret; 1175 } 1176 1177 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1178 if (ret) 1179 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1180 1181 return ret; 1182 } 1183 1184 static int 1185 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1186 { 1187 struct hns3_adapter *hns = dev->data->dev_private; 1188 struct rte_eth_dev_data *data = dev->data; 1189 struct rte_eth_txmode *txmode; 1190 struct hns3_hw *hw = &hns->hw; 1191 int mask; 1192 int ret; 1193 1194 txmode = &data->dev_conf.txmode; 1195 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1196 hns3_warn(hw, 1197 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1198 "configuration is not supported! Ignore these two " 1199 "parameters: hw_vlan_reject_tagged(%u), " 1200 "hw_vlan_reject_untagged(%u)", 1201 txmode->hw_vlan_reject_tagged, 1202 txmode->hw_vlan_reject_untagged); 1203 1204 /* Apply vlan offload setting */ 1205 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1206 ret = hns3_vlan_offload_set(dev, mask); 1207 if (ret) { 1208 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1209 ret); 1210 return ret; 1211 } 1212 1213 /* 1214 * If pvid config is not set in rte_eth_conf, driver needn't to set 1215 * VLAN pvid related configuration to hardware. 1216 */ 1217 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1218 return 0; 1219 1220 /* Apply pvid setting */ 1221 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1222 txmode->hw_vlan_insert_pvid); 1223 if (ret) 1224 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1225 txmode->pvid, ret); 1226 1227 return ret; 1228 } 1229 1230 static int 1231 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1232 unsigned int tso_mss_max) 1233 { 1234 struct hns3_cfg_tso_status_cmd *req; 1235 struct hns3_cmd_desc desc; 1236 uint16_t tso_mss; 1237 1238 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1239 1240 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1241 1242 tso_mss = 0; 1243 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1244 tso_mss_min); 1245 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1246 1247 tso_mss = 0; 1248 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1249 tso_mss_max); 1250 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1251 1252 return hns3_cmd_send(hw, &desc, 1); 1253 } 1254 1255 static int 1256 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1257 uint16_t *allocated_size, bool is_alloc) 1258 { 1259 struct hns3_umv_spc_alc_cmd *req; 1260 struct hns3_cmd_desc desc; 1261 int ret; 1262 1263 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1264 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1265 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1266 req->space_size = rte_cpu_to_le_32(space_size); 1267 1268 ret = hns3_cmd_send(hw, &desc, 1); 1269 if (ret) { 1270 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1271 is_alloc ? "allocate" : "free", ret); 1272 return ret; 1273 } 1274 1275 if (is_alloc && allocated_size) 1276 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1277 1278 return 0; 1279 } 1280 1281 static int 1282 hns3_init_umv_space(struct hns3_hw *hw) 1283 { 1284 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1285 struct hns3_pf *pf = &hns->pf; 1286 uint16_t allocated_size = 0; 1287 int ret; 1288 1289 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1290 true); 1291 if (ret) 1292 return ret; 1293 1294 if (allocated_size < pf->wanted_umv_size) 1295 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1296 pf->wanted_umv_size, allocated_size); 1297 1298 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1299 pf->wanted_umv_size; 1300 pf->used_umv_size = 0; 1301 return 0; 1302 } 1303 1304 static int 1305 hns3_uninit_umv_space(struct hns3_hw *hw) 1306 { 1307 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1308 struct hns3_pf *pf = &hns->pf; 1309 int ret; 1310 1311 if (pf->max_umv_size == 0) 1312 return 0; 1313 1314 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1315 if (ret) 1316 return ret; 1317 1318 pf->max_umv_size = 0; 1319 1320 return 0; 1321 } 1322 1323 static bool 1324 hns3_is_umv_space_full(struct hns3_hw *hw) 1325 { 1326 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1327 struct hns3_pf *pf = &hns->pf; 1328 bool is_full; 1329 1330 is_full = (pf->used_umv_size >= pf->max_umv_size); 1331 1332 return is_full; 1333 } 1334 1335 static void 1336 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1337 { 1338 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1339 struct hns3_pf *pf = &hns->pf; 1340 1341 if (is_free) { 1342 if (pf->used_umv_size > 0) 1343 pf->used_umv_size--; 1344 } else 1345 pf->used_umv_size++; 1346 } 1347 1348 static void 1349 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1350 const uint8_t *addr, bool is_mc) 1351 { 1352 const unsigned char *mac_addr = addr; 1353 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1354 ((uint32_t)mac_addr[2] << 16) | 1355 ((uint32_t)mac_addr[1] << 8) | 1356 (uint32_t)mac_addr[0]; 1357 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1358 1359 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1360 if (is_mc) { 1361 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1362 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1363 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1364 } 1365 1366 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1367 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1368 } 1369 1370 static int 1371 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1372 uint8_t resp_code, 1373 enum hns3_mac_vlan_tbl_opcode op) 1374 { 1375 if (cmdq_resp) { 1376 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1377 cmdq_resp); 1378 return -EIO; 1379 } 1380 1381 if (op == HNS3_MAC_VLAN_ADD) { 1382 if (resp_code == 0 || resp_code == 1) { 1383 return 0; 1384 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1385 hns3_err(hw, "add mac addr failed for uc_overflow"); 1386 return -ENOSPC; 1387 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1388 hns3_err(hw, "add mac addr failed for mc_overflow"); 1389 return -ENOSPC; 1390 } 1391 1392 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1393 resp_code); 1394 return -EIO; 1395 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1396 if (resp_code == 0) { 1397 return 0; 1398 } else if (resp_code == 1) { 1399 hns3_dbg(hw, "remove mac addr failed for miss"); 1400 return -ENOENT; 1401 } 1402 1403 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1404 resp_code); 1405 return -EIO; 1406 } else if (op == HNS3_MAC_VLAN_LKUP) { 1407 if (resp_code == 0) { 1408 return 0; 1409 } else if (resp_code == 1) { 1410 hns3_dbg(hw, "lookup mac addr failed for miss"); 1411 return -ENOENT; 1412 } 1413 1414 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1415 resp_code); 1416 return -EIO; 1417 } 1418 1419 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1420 op); 1421 1422 return -EINVAL; 1423 } 1424 1425 static int 1426 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1427 struct hns3_mac_vlan_tbl_entry_cmd *req, 1428 struct hns3_cmd_desc *desc, bool is_mc) 1429 { 1430 uint8_t resp_code; 1431 uint16_t retval; 1432 int ret; 1433 1434 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1435 if (is_mc) { 1436 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1437 memcpy(desc[0].data, req, 1438 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1439 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1440 true); 1441 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1442 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1443 true); 1444 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1445 } else { 1446 memcpy(desc[0].data, req, 1447 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1448 ret = hns3_cmd_send(hw, desc, 1); 1449 } 1450 if (ret) { 1451 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1452 ret); 1453 return ret; 1454 } 1455 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1456 retval = rte_le_to_cpu_16(desc[0].retval); 1457 1458 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1459 HNS3_MAC_VLAN_LKUP); 1460 } 1461 1462 static int 1463 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1464 struct hns3_mac_vlan_tbl_entry_cmd *req, 1465 struct hns3_cmd_desc *mc_desc) 1466 { 1467 uint8_t resp_code; 1468 uint16_t retval; 1469 int cfg_status; 1470 int ret; 1471 1472 if (mc_desc == NULL) { 1473 struct hns3_cmd_desc desc; 1474 1475 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1476 memcpy(desc.data, req, 1477 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1478 ret = hns3_cmd_send(hw, &desc, 1); 1479 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1480 retval = rte_le_to_cpu_16(desc.retval); 1481 1482 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1483 HNS3_MAC_VLAN_ADD); 1484 } else { 1485 hns3_cmd_reuse_desc(&mc_desc[0], false); 1486 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1487 hns3_cmd_reuse_desc(&mc_desc[1], false); 1488 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1489 hns3_cmd_reuse_desc(&mc_desc[2], false); 1490 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1491 memcpy(mc_desc[0].data, req, 1492 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1493 mc_desc[0].retval = 0; 1494 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1495 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1496 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1497 1498 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1499 HNS3_MAC_VLAN_ADD); 1500 } 1501 1502 if (ret) { 1503 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1504 return ret; 1505 } 1506 1507 return cfg_status; 1508 } 1509 1510 static int 1511 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1512 struct hns3_mac_vlan_tbl_entry_cmd *req) 1513 { 1514 struct hns3_cmd_desc desc; 1515 uint8_t resp_code; 1516 uint16_t retval; 1517 int ret; 1518 1519 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1520 1521 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1522 1523 ret = hns3_cmd_send(hw, &desc, 1); 1524 if (ret) { 1525 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1526 return ret; 1527 } 1528 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1529 retval = rte_le_to_cpu_16(desc.retval); 1530 1531 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1532 HNS3_MAC_VLAN_REMOVE); 1533 } 1534 1535 static int 1536 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1537 { 1538 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1539 struct hns3_mac_vlan_tbl_entry_cmd req; 1540 struct hns3_pf *pf = &hns->pf; 1541 struct hns3_cmd_desc desc[3]; 1542 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1543 uint16_t egress_port = 0; 1544 uint8_t vf_id; 1545 int ret; 1546 1547 /* check if mac addr is valid */ 1548 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1549 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1550 mac_addr); 1551 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1552 mac_str); 1553 return -EINVAL; 1554 } 1555 1556 memset(&req, 0, sizeof(req)); 1557 1558 /* 1559 * In current version VF is not supported when PF is driven by DPDK 1560 * driver, just need to configure parameters for PF vport. 1561 */ 1562 vf_id = HNS3_PF_FUNC_ID; 1563 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1564 HNS3_MAC_EPORT_VFID_S, vf_id); 1565 1566 req.egress_port = rte_cpu_to_le_16(egress_port); 1567 1568 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1569 1570 /* 1571 * Lookup the mac address in the mac_vlan table, and add 1572 * it if the entry is inexistent. Repeated unicast entry 1573 * is not allowed in the mac vlan table. 1574 */ 1575 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1576 if (ret == -ENOENT) { 1577 if (!hns3_is_umv_space_full(hw)) { 1578 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1579 if (!ret) 1580 hns3_update_umv_space(hw, false); 1581 return ret; 1582 } 1583 1584 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1585 1586 return -ENOSPC; 1587 } 1588 1589 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1590 1591 /* check if we just hit the duplicate */ 1592 if (ret == 0) { 1593 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1594 return 0; 1595 } 1596 1597 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1598 mac_str); 1599 1600 return ret; 1601 } 1602 1603 static int 1604 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1605 { 1606 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1607 struct rte_ether_addr *addr; 1608 int ret; 1609 int i; 1610 1611 for (i = 0; i < hw->mc_addrs_num; i++) { 1612 addr = &hw->mc_addrs[i]; 1613 /* Check if there are duplicate addresses */ 1614 if (rte_is_same_ether_addr(addr, mac_addr)) { 1615 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1616 addr); 1617 hns3_err(hw, "failed to add mc mac addr, same addrs" 1618 "(%s) is added by the set_mc_mac_addr_list " 1619 "API", mac_str); 1620 return -EINVAL; 1621 } 1622 } 1623 1624 ret = hns3_add_mc_addr(hw, mac_addr); 1625 if (ret) { 1626 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1627 mac_addr); 1628 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1629 mac_str, ret); 1630 } 1631 return ret; 1632 } 1633 1634 static int 1635 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1636 { 1637 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1638 int ret; 1639 1640 ret = hns3_remove_mc_addr(hw, mac_addr); 1641 if (ret) { 1642 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1643 mac_addr); 1644 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1645 mac_str, ret); 1646 } 1647 return ret; 1648 } 1649 1650 static int 1651 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1652 uint32_t idx, __rte_unused uint32_t pool) 1653 { 1654 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1655 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1656 int ret; 1657 1658 rte_spinlock_lock(&hw->lock); 1659 1660 /* 1661 * In hns3 network engine adding UC and MC mac address with different 1662 * commands with firmware. We need to determine whether the input 1663 * address is a UC or a MC address to call different commands. 1664 * By the way, it is recommended calling the API function named 1665 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1666 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1667 * may affect the specifications of UC mac addresses. 1668 */ 1669 if (rte_is_multicast_ether_addr(mac_addr)) 1670 ret = hns3_add_mc_addr_common(hw, mac_addr); 1671 else 1672 ret = hns3_add_uc_addr_common(hw, mac_addr); 1673 1674 if (ret) { 1675 rte_spinlock_unlock(&hw->lock); 1676 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1677 mac_addr); 1678 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1679 ret); 1680 return ret; 1681 } 1682 1683 if (idx == 0) 1684 hw->mac.default_addr_setted = true; 1685 rte_spinlock_unlock(&hw->lock); 1686 1687 return ret; 1688 } 1689 1690 static int 1691 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1692 { 1693 struct hns3_mac_vlan_tbl_entry_cmd req; 1694 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1695 int ret; 1696 1697 /* check if mac addr is valid */ 1698 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1699 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1700 mac_addr); 1701 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1702 mac_str); 1703 return -EINVAL; 1704 } 1705 1706 memset(&req, 0, sizeof(req)); 1707 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1708 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1709 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1710 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1711 return 0; 1712 else if (ret == 0) 1713 hns3_update_umv_space(hw, true); 1714 1715 return ret; 1716 } 1717 1718 static void 1719 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1720 { 1721 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1722 /* index will be checked by upper level rte interface */ 1723 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1724 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1725 int ret; 1726 1727 rte_spinlock_lock(&hw->lock); 1728 1729 if (rte_is_multicast_ether_addr(mac_addr)) 1730 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1731 else 1732 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1733 rte_spinlock_unlock(&hw->lock); 1734 if (ret) { 1735 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1736 mac_addr); 1737 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1738 ret); 1739 } 1740 } 1741 1742 static int 1743 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1744 struct rte_ether_addr *mac_addr) 1745 { 1746 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1747 struct rte_ether_addr *oaddr; 1748 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1749 bool default_addr_setted; 1750 bool rm_succes = false; 1751 int ret, ret_val; 1752 1753 /* 1754 * It has been guaranteed that input parameter named mac_addr is valid 1755 * address in the rte layer of DPDK framework. 1756 */ 1757 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1758 default_addr_setted = hw->mac.default_addr_setted; 1759 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1760 return 0; 1761 1762 rte_spinlock_lock(&hw->lock); 1763 if (default_addr_setted) { 1764 ret = hns3_remove_uc_addr_common(hw, oaddr); 1765 if (ret) { 1766 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1767 oaddr); 1768 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1769 mac_str, ret); 1770 rm_succes = false; 1771 } else 1772 rm_succes = true; 1773 } 1774 1775 ret = hns3_add_uc_addr_common(hw, mac_addr); 1776 if (ret) { 1777 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1778 mac_addr); 1779 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1780 goto err_add_uc_addr; 1781 } 1782 1783 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1784 if (ret) { 1785 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1786 goto err_pause_addr_cfg; 1787 } 1788 1789 rte_ether_addr_copy(mac_addr, 1790 (struct rte_ether_addr *)hw->mac.mac_addr); 1791 hw->mac.default_addr_setted = true; 1792 rte_spinlock_unlock(&hw->lock); 1793 1794 return 0; 1795 1796 err_pause_addr_cfg: 1797 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1798 if (ret_val) { 1799 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1800 mac_addr); 1801 hns3_warn(hw, 1802 "Failed to roll back to del setted mac addr(%s): %d", 1803 mac_str, ret_val); 1804 } 1805 1806 err_add_uc_addr: 1807 if (rm_succes) { 1808 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1809 if (ret_val) { 1810 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1811 oaddr); 1812 hns3_warn(hw, 1813 "Failed to restore old uc mac addr(%s): %d", 1814 mac_str, ret_val); 1815 hw->mac.default_addr_setted = false; 1816 } 1817 } 1818 rte_spinlock_unlock(&hw->lock); 1819 1820 return ret; 1821 } 1822 1823 static int 1824 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1825 { 1826 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1827 struct hns3_hw *hw = &hns->hw; 1828 struct rte_ether_addr *addr; 1829 int err = 0; 1830 int ret; 1831 int i; 1832 1833 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1834 addr = &hw->data->mac_addrs[i]; 1835 if (rte_is_zero_ether_addr(addr)) 1836 continue; 1837 if (rte_is_multicast_ether_addr(addr)) 1838 ret = del ? hns3_remove_mc_addr(hw, addr) : 1839 hns3_add_mc_addr(hw, addr); 1840 else 1841 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1842 hns3_add_uc_addr_common(hw, addr); 1843 1844 if (ret) { 1845 err = ret; 1846 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1847 addr); 1848 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1849 "ret = %d.", del ? "remove" : "restore", 1850 mac_str, i, ret); 1851 } 1852 } 1853 return err; 1854 } 1855 1856 static void 1857 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1858 { 1859 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1860 uint8_t word_num; 1861 uint8_t bit_num; 1862 1863 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1864 word_num = vfid / 32; 1865 bit_num = vfid % 32; 1866 if (clr) 1867 desc[1].data[word_num] &= 1868 rte_cpu_to_le_32(~(1UL << bit_num)); 1869 else 1870 desc[1].data[word_num] |= 1871 rte_cpu_to_le_32(1UL << bit_num); 1872 } else { 1873 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1874 bit_num = vfid % 32; 1875 if (clr) 1876 desc[2].data[word_num] &= 1877 rte_cpu_to_le_32(~(1UL << bit_num)); 1878 else 1879 desc[2].data[word_num] |= 1880 rte_cpu_to_le_32(1UL << bit_num); 1881 } 1882 } 1883 1884 static int 1885 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1886 { 1887 struct hns3_mac_vlan_tbl_entry_cmd req; 1888 struct hns3_cmd_desc desc[3]; 1889 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1890 uint8_t vf_id; 1891 int ret; 1892 1893 /* Check if mac addr is valid */ 1894 if (!rte_is_multicast_ether_addr(mac_addr)) { 1895 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1896 mac_addr); 1897 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1898 mac_str); 1899 return -EINVAL; 1900 } 1901 1902 memset(&req, 0, sizeof(req)); 1903 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1904 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1905 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1906 if (ret) { 1907 /* This mac addr do not exist, add new entry for it */ 1908 memset(desc[0].data, 0, sizeof(desc[0].data)); 1909 memset(desc[1].data, 0, sizeof(desc[0].data)); 1910 memset(desc[2].data, 0, sizeof(desc[0].data)); 1911 } 1912 1913 /* 1914 * In current version VF is not supported when PF is driven by DPDK 1915 * driver, just need to configure parameters for PF vport. 1916 */ 1917 vf_id = HNS3_PF_FUNC_ID; 1918 hns3_update_desc_vfid(desc, vf_id, false); 1919 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1920 if (ret) { 1921 if (ret == -ENOSPC) 1922 hns3_err(hw, "mc mac vlan table is full"); 1923 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1924 mac_addr); 1925 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1926 } 1927 1928 return ret; 1929 } 1930 1931 static int 1932 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1933 { 1934 struct hns3_mac_vlan_tbl_entry_cmd req; 1935 struct hns3_cmd_desc desc[3]; 1936 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1937 uint8_t vf_id; 1938 int ret; 1939 1940 /* Check if mac addr is valid */ 1941 if (!rte_is_multicast_ether_addr(mac_addr)) { 1942 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1943 mac_addr); 1944 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1945 mac_str); 1946 return -EINVAL; 1947 } 1948 1949 memset(&req, 0, sizeof(req)); 1950 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1951 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1952 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1953 if (ret == 0) { 1954 /* 1955 * This mac addr exist, remove this handle's VFID for it. 1956 * In current version VF is not supported when PF is driven by 1957 * DPDK driver, just need to configure parameters for PF vport. 1958 */ 1959 vf_id = HNS3_PF_FUNC_ID; 1960 hns3_update_desc_vfid(desc, vf_id, true); 1961 1962 /* All the vfid is zero, so need to delete this entry */ 1963 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1964 } else if (ret == -ENOENT) { 1965 /* This mac addr doesn't exist. */ 1966 return 0; 1967 } 1968 1969 if (ret) { 1970 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1971 mac_addr); 1972 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1973 } 1974 1975 return ret; 1976 } 1977 1978 static int 1979 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1980 struct rte_ether_addr *mc_addr_set, 1981 uint32_t nb_mc_addr) 1982 { 1983 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1984 struct rte_ether_addr *addr; 1985 uint32_t i; 1986 uint32_t j; 1987 1988 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1989 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1990 "invalid. valid range: 0~%d", 1991 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1992 return -EINVAL; 1993 } 1994 1995 /* Check if input mac addresses are valid */ 1996 for (i = 0; i < nb_mc_addr; i++) { 1997 addr = &mc_addr_set[i]; 1998 if (!rte_is_multicast_ether_addr(addr)) { 1999 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2000 addr); 2001 hns3_err(hw, 2002 "failed to set mc mac addr, addr(%s) invalid.", 2003 mac_str); 2004 return -EINVAL; 2005 } 2006 2007 /* Check if there are duplicate addresses */ 2008 for (j = i + 1; j < nb_mc_addr; j++) { 2009 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2010 hns3_ether_format_addr(mac_str, 2011 RTE_ETHER_ADDR_FMT_SIZE, 2012 addr); 2013 hns3_err(hw, "failed to set mc mac addr, " 2014 "addrs invalid. two same addrs(%s).", 2015 mac_str); 2016 return -EINVAL; 2017 } 2018 } 2019 2020 /* 2021 * Check if there are duplicate addresses between mac_addrs 2022 * and mc_addr_set 2023 */ 2024 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 2025 if (rte_is_same_ether_addr(addr, 2026 &hw->data->mac_addrs[j])) { 2027 hns3_ether_format_addr(mac_str, 2028 RTE_ETHER_ADDR_FMT_SIZE, 2029 addr); 2030 hns3_err(hw, "failed to set mc mac addr, " 2031 "addrs invalid. addrs(%s) has already " 2032 "configured in mac_addr add API", 2033 mac_str); 2034 return -EINVAL; 2035 } 2036 } 2037 } 2038 2039 return 0; 2040 } 2041 2042 static void 2043 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2044 struct rte_ether_addr *mc_addr_set, 2045 int mc_addr_num, 2046 struct rte_ether_addr *reserved_addr_list, 2047 int *reserved_addr_num, 2048 struct rte_ether_addr *add_addr_list, 2049 int *add_addr_num, 2050 struct rte_ether_addr *rm_addr_list, 2051 int *rm_addr_num) 2052 { 2053 struct rte_ether_addr *addr; 2054 int current_addr_num; 2055 int reserved_num = 0; 2056 int add_num = 0; 2057 int rm_num = 0; 2058 int num; 2059 int i; 2060 int j; 2061 bool same_addr; 2062 2063 /* Calculate the mc mac address list that should be removed */ 2064 current_addr_num = hw->mc_addrs_num; 2065 for (i = 0; i < current_addr_num; i++) { 2066 addr = &hw->mc_addrs[i]; 2067 same_addr = false; 2068 for (j = 0; j < mc_addr_num; j++) { 2069 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2070 same_addr = true; 2071 break; 2072 } 2073 } 2074 2075 if (!same_addr) { 2076 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2077 rm_num++; 2078 } else { 2079 rte_ether_addr_copy(addr, 2080 &reserved_addr_list[reserved_num]); 2081 reserved_num++; 2082 } 2083 } 2084 2085 /* Calculate the mc mac address list that should be added */ 2086 for (i = 0; i < mc_addr_num; i++) { 2087 addr = &mc_addr_set[i]; 2088 same_addr = false; 2089 for (j = 0; j < current_addr_num; j++) { 2090 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2091 same_addr = true; 2092 break; 2093 } 2094 } 2095 2096 if (!same_addr) { 2097 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2098 add_num++; 2099 } 2100 } 2101 2102 /* Reorder the mc mac address list maintained by driver */ 2103 for (i = 0; i < reserved_num; i++) 2104 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2105 2106 for (i = 0; i < rm_num; i++) { 2107 num = reserved_num + i; 2108 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2109 } 2110 2111 *reserved_addr_num = reserved_num; 2112 *add_addr_num = add_num; 2113 *rm_addr_num = rm_num; 2114 } 2115 2116 static int 2117 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2118 struct rte_ether_addr *mc_addr_set, 2119 uint32_t nb_mc_addr) 2120 { 2121 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2122 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2123 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2124 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2125 struct rte_ether_addr *addr; 2126 int reserved_addr_num; 2127 int add_addr_num; 2128 int rm_addr_num; 2129 int mc_addr_num; 2130 int num; 2131 int ret; 2132 int i; 2133 2134 /* Check if input parameters are valid */ 2135 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2136 if (ret) 2137 return ret; 2138 2139 rte_spinlock_lock(&hw->lock); 2140 2141 /* 2142 * Calculate the mc mac address lists those should be removed and be 2143 * added, Reorder the mc mac address list maintained by driver. 2144 */ 2145 mc_addr_num = (int)nb_mc_addr; 2146 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2147 reserved_addr_list, &reserved_addr_num, 2148 add_addr_list, &add_addr_num, 2149 rm_addr_list, &rm_addr_num); 2150 2151 /* Remove mc mac addresses */ 2152 for (i = 0; i < rm_addr_num; i++) { 2153 num = rm_addr_num - i - 1; 2154 addr = &rm_addr_list[num]; 2155 ret = hns3_remove_mc_addr(hw, addr); 2156 if (ret) { 2157 rte_spinlock_unlock(&hw->lock); 2158 return ret; 2159 } 2160 hw->mc_addrs_num--; 2161 } 2162 2163 /* Add mc mac addresses */ 2164 for (i = 0; i < add_addr_num; i++) { 2165 addr = &add_addr_list[i]; 2166 ret = hns3_add_mc_addr(hw, addr); 2167 if (ret) { 2168 rte_spinlock_unlock(&hw->lock); 2169 return ret; 2170 } 2171 2172 num = reserved_addr_num + i; 2173 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2174 hw->mc_addrs_num++; 2175 } 2176 rte_spinlock_unlock(&hw->lock); 2177 2178 return 0; 2179 } 2180 2181 static int 2182 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2183 { 2184 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2185 struct hns3_hw *hw = &hns->hw; 2186 struct rte_ether_addr *addr; 2187 int err = 0; 2188 int ret; 2189 int i; 2190 2191 for (i = 0; i < hw->mc_addrs_num; i++) { 2192 addr = &hw->mc_addrs[i]; 2193 if (!rte_is_multicast_ether_addr(addr)) 2194 continue; 2195 if (del) 2196 ret = hns3_remove_mc_addr(hw, addr); 2197 else 2198 ret = hns3_add_mc_addr(hw, addr); 2199 if (ret) { 2200 err = ret; 2201 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2202 addr); 2203 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2204 del ? "Remove" : "Restore", mac_str, ret); 2205 } 2206 } 2207 return err; 2208 } 2209 2210 static int 2211 hns3_check_mq_mode(struct rte_eth_dev *dev) 2212 { 2213 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2214 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2215 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2216 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2217 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2218 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2219 uint8_t num_tc; 2220 int max_tc = 0; 2221 int i; 2222 2223 if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) || 2224 (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB || 2225 tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) { 2226 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 2227 rx_mq_mode, tx_mq_mode); 2228 return -EOPNOTSUPP; 2229 } 2230 2231 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2232 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2233 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { 2234 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2235 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2236 dcb_rx_conf->nb_tcs, pf->tc_max); 2237 return -EINVAL; 2238 } 2239 2240 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2241 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2242 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2243 "nb_tcs(%d) != %d or %d in rx direction.", 2244 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2245 return -EINVAL; 2246 } 2247 2248 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2249 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2250 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2251 return -EINVAL; 2252 } 2253 2254 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2255 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2256 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2257 "is not equal to one in tx direction.", 2258 i, dcb_rx_conf->dcb_tc[i]); 2259 return -EINVAL; 2260 } 2261 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2262 max_tc = dcb_rx_conf->dcb_tc[i]; 2263 } 2264 2265 num_tc = max_tc + 1; 2266 if (num_tc > dcb_rx_conf->nb_tcs) { 2267 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2268 num_tc, dcb_rx_conf->nb_tcs); 2269 return -EINVAL; 2270 } 2271 } 2272 2273 return 0; 2274 } 2275 2276 static int 2277 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2278 enum hns3_ring_type queue_type, uint16_t queue_id) 2279 { 2280 struct hns3_cmd_desc desc; 2281 struct hns3_ctrl_vector_chain_cmd *req = 2282 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2283 enum hns3_opcode_type op; 2284 uint16_t tqp_type_and_id = 0; 2285 uint16_t type; 2286 uint16_t gl; 2287 int ret; 2288 2289 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2290 hns3_cmd_setup_basic_desc(&desc, op, false); 2291 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2292 HNS3_TQP_INT_ID_L_S); 2293 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2294 HNS3_TQP_INT_ID_H_S); 2295 2296 if (queue_type == HNS3_RING_TYPE_RX) 2297 gl = HNS3_RING_GL_RX; 2298 else 2299 gl = HNS3_RING_GL_TX; 2300 2301 type = queue_type; 2302 2303 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2304 type); 2305 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2306 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2307 gl); 2308 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2309 req->int_cause_num = 1; 2310 ret = hns3_cmd_send(hw, &desc, 1); 2311 if (ret) { 2312 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2313 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2314 return ret; 2315 } 2316 2317 return 0; 2318 } 2319 2320 static int 2321 hns3_init_ring_with_vector(struct hns3_hw *hw) 2322 { 2323 uint16_t vec; 2324 int ret; 2325 int i; 2326 2327 /* 2328 * In hns3 network engine, vector 0 is always the misc interrupt of this 2329 * function, vector 1~N can be used respectively for the queues of the 2330 * function. Tx and Rx queues with the same number share the interrupt 2331 * vector. In the initialization clearing the all hardware mapping 2332 * relationship configurations between queues and interrupt vectors is 2333 * needed, so some error caused by the residual configurations, such as 2334 * the unexpected Tx interrupt, can be avoid. 2335 */ 2336 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2337 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2338 vec = vec - 1; /* the last interrupt is reserved */ 2339 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2340 for (i = 0; i < hw->intr_tqps_num; i++) { 2341 /* 2342 * Set gap limiter/rate limiter/quanity limiter algorithm 2343 * configuration for interrupt coalesce of queue's interrupt. 2344 */ 2345 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2346 HNS3_TQP_INTR_GL_DEFAULT); 2347 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2348 HNS3_TQP_INTR_GL_DEFAULT); 2349 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2350 /* 2351 * QL(quantity limiter) is not used currently, just set 0 to 2352 * close it. 2353 */ 2354 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2355 2356 ret = hns3_bind_ring_with_vector(hw, vec, false, 2357 HNS3_RING_TYPE_TX, i); 2358 if (ret) { 2359 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2360 "vector: %u, ret=%d", i, vec, ret); 2361 return ret; 2362 } 2363 2364 ret = hns3_bind_ring_with_vector(hw, vec, false, 2365 HNS3_RING_TYPE_RX, i); 2366 if (ret) { 2367 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2368 "vector: %u, ret=%d", i, vec, ret); 2369 return ret; 2370 } 2371 } 2372 2373 return 0; 2374 } 2375 2376 static int 2377 hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf) 2378 { 2379 struct hns3_adapter *hns = dev->data->dev_private; 2380 struct hns3_hw *hw = &hns->hw; 2381 uint32_t max_rx_pkt_len; 2382 uint16_t mtu; 2383 int ret; 2384 2385 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) 2386 return 0; 2387 2388 /* 2389 * If jumbo frames are enabled, MTU needs to be refreshed 2390 * according to the maximum RX packet length. 2391 */ 2392 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 2393 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 2394 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 2395 hns3_err(hw, "maximum Rx packet length must be greater than %u " 2396 "and no more than %u when jumbo frame enabled.", 2397 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 2398 (uint16_t)HNS3_MAX_FRAME_LEN); 2399 return -EINVAL; 2400 } 2401 2402 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 2403 ret = hns3_dev_mtu_set(dev, mtu); 2404 if (ret) 2405 return ret; 2406 dev->data->mtu = mtu; 2407 2408 return 0; 2409 } 2410 2411 static int 2412 hns3_setup_dcb(struct rte_eth_dev *dev) 2413 { 2414 struct hns3_adapter *hns = dev->data->dev_private; 2415 struct hns3_hw *hw = &hns->hw; 2416 int ret; 2417 2418 if (!hns3_dev_dcb_supported(hw)) { 2419 hns3_err(hw, "this port does not support dcb configurations."); 2420 return -EOPNOTSUPP; 2421 } 2422 2423 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2424 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2425 return -EOPNOTSUPP; 2426 } 2427 2428 ret = hns3_dcb_configure(hns); 2429 if (ret) 2430 hns3_err(hw, "failed to config dcb: %d", ret); 2431 2432 return ret; 2433 } 2434 2435 static int 2436 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2437 { 2438 int ret; 2439 2440 /* 2441 * Some hardware doesn't support auto-negotiation, but users may not 2442 * configure link_speeds (default 0), which means auto-negotiation. 2443 * In this case, a warning message need to be printed, instead of 2444 * an error. 2445 */ 2446 if (link_speeds == ETH_LINK_SPEED_AUTONEG && 2447 hw->mac.support_autoneg == 0) { 2448 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 2449 return 0; 2450 } 2451 2452 if (link_speeds != ETH_LINK_SPEED_AUTONEG) { 2453 ret = hns3_check_port_speed(hw, link_speeds); 2454 if (ret) 2455 return ret; 2456 } 2457 2458 return 0; 2459 } 2460 2461 static int 2462 hns3_check_dev_conf(struct rte_eth_dev *dev) 2463 { 2464 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2465 struct rte_eth_conf *conf = &dev->data->dev_conf; 2466 int ret; 2467 2468 ret = hns3_check_mq_mode(dev); 2469 if (ret) 2470 return ret; 2471 2472 return hns3_check_link_speed(hw, conf->link_speeds); 2473 } 2474 2475 static int 2476 hns3_dev_configure(struct rte_eth_dev *dev) 2477 { 2478 struct hns3_adapter *hns = dev->data->dev_private; 2479 struct rte_eth_conf *conf = &dev->data->dev_conf; 2480 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2481 struct hns3_hw *hw = &hns->hw; 2482 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2483 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2484 struct rte_eth_rss_conf rss_conf; 2485 bool gro_en; 2486 int ret; 2487 2488 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2489 2490 /* 2491 * Some versions of hardware network engine does not support 2492 * individually enable/disable/reset the Tx or Rx queue. These devices 2493 * must enable/disable/reset Tx and Rx queues at the same time. When the 2494 * numbers of Tx queues allocated by upper applications are not equal to 2495 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2496 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2497 * work as usual. But these fake queues are imperceptible, and can not 2498 * be used by upper applications. 2499 */ 2500 if (!hns3_dev_indep_txrx_supported(hw)) { 2501 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2502 if (ret) { 2503 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", 2504 ret); 2505 return ret; 2506 } 2507 } 2508 2509 hw->adapter_state = HNS3_NIC_CONFIGURING; 2510 ret = hns3_check_dev_conf(dev); 2511 if (ret) 2512 goto cfg_err; 2513 2514 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2515 ret = hns3_setup_dcb(dev); 2516 if (ret) 2517 goto cfg_err; 2518 } 2519 2520 /* When RSS is not configured, redirect the packet queue 0 */ 2521 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2522 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2523 rss_conf = conf->rx_adv_conf.rss_conf; 2524 hw->rss_dis_flag = false; 2525 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2526 if (ret) 2527 goto cfg_err; 2528 } 2529 2530 ret = hns3_refresh_mtu(dev, conf); 2531 if (ret) 2532 goto cfg_err; 2533 2534 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2535 if (ret) 2536 goto cfg_err; 2537 2538 ret = hns3_dev_configure_vlan(dev); 2539 if (ret) 2540 goto cfg_err; 2541 2542 /* config hardware GRO */ 2543 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2544 ret = hns3_config_gro(hw, gro_en); 2545 if (ret) 2546 goto cfg_err; 2547 2548 hns3_init_rx_ptype_tble(dev); 2549 hw->adapter_state = HNS3_NIC_CONFIGURED; 2550 2551 return 0; 2552 2553 cfg_err: 2554 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2555 hw->adapter_state = HNS3_NIC_INITIALIZED; 2556 2557 return ret; 2558 } 2559 2560 static int 2561 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2562 { 2563 struct hns3_config_max_frm_size_cmd *req; 2564 struct hns3_cmd_desc desc; 2565 2566 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2567 2568 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2569 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2570 req->min_frm_size = RTE_ETHER_MIN_LEN; 2571 2572 return hns3_cmd_send(hw, &desc, 1); 2573 } 2574 2575 static int 2576 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2577 { 2578 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2579 uint16_t original_mps = hns->pf.mps; 2580 int err; 2581 int ret; 2582 2583 ret = hns3_set_mac_mtu(hw, mps); 2584 if (ret) { 2585 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2586 return ret; 2587 } 2588 2589 hns->pf.mps = mps; 2590 ret = hns3_buffer_alloc(hw); 2591 if (ret) { 2592 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2593 goto rollback; 2594 } 2595 2596 return 0; 2597 2598 rollback: 2599 err = hns3_set_mac_mtu(hw, original_mps); 2600 if (err) { 2601 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2602 return ret; 2603 } 2604 hns->pf.mps = original_mps; 2605 2606 return ret; 2607 } 2608 2609 static int 2610 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2611 { 2612 struct hns3_adapter *hns = dev->data->dev_private; 2613 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2614 struct hns3_hw *hw = &hns->hw; 2615 bool is_jumbo_frame; 2616 int ret; 2617 2618 if (dev->data->dev_started) { 2619 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2620 "before configuration", dev->data->port_id); 2621 return -EBUSY; 2622 } 2623 2624 rte_spinlock_lock(&hw->lock); 2625 is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; 2626 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2627 2628 /* 2629 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2630 * assign to "uint16_t" type variable. 2631 */ 2632 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2633 if (ret) { 2634 rte_spinlock_unlock(&hw->lock); 2635 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2636 dev->data->port_id, mtu, ret); 2637 return ret; 2638 } 2639 2640 if (is_jumbo_frame) 2641 dev->data->dev_conf.rxmode.offloads |= 2642 DEV_RX_OFFLOAD_JUMBO_FRAME; 2643 else 2644 dev->data->dev_conf.rxmode.offloads &= 2645 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2646 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2647 rte_spinlock_unlock(&hw->lock); 2648 2649 return 0; 2650 } 2651 2652 static uint32_t 2653 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2654 { 2655 uint32_t speed_capa = 0; 2656 2657 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2658 speed_capa |= ETH_LINK_SPEED_10M_HD; 2659 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2660 speed_capa |= ETH_LINK_SPEED_10M; 2661 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2662 speed_capa |= ETH_LINK_SPEED_100M_HD; 2663 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2664 speed_capa |= ETH_LINK_SPEED_100M; 2665 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2666 speed_capa |= ETH_LINK_SPEED_1G; 2667 2668 return speed_capa; 2669 } 2670 2671 static uint32_t 2672 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2673 { 2674 uint32_t speed_capa = 0; 2675 2676 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2677 speed_capa |= ETH_LINK_SPEED_1G; 2678 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2679 speed_capa |= ETH_LINK_SPEED_10G; 2680 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2681 speed_capa |= ETH_LINK_SPEED_25G; 2682 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2683 speed_capa |= ETH_LINK_SPEED_40G; 2684 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2685 speed_capa |= ETH_LINK_SPEED_50G; 2686 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2687 speed_capa |= ETH_LINK_SPEED_100G; 2688 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2689 speed_capa |= ETH_LINK_SPEED_200G; 2690 2691 return speed_capa; 2692 } 2693 2694 static uint32_t 2695 hns3_get_speed_capa(struct hns3_hw *hw) 2696 { 2697 struct hns3_mac *mac = &hw->mac; 2698 uint32_t speed_capa; 2699 2700 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2701 speed_capa = 2702 hns3_get_copper_port_speed_capa(mac->supported_speed); 2703 else 2704 speed_capa = 2705 hns3_get_firber_port_speed_capa(mac->supported_speed); 2706 2707 if (mac->support_autoneg == 0) 2708 speed_capa |= ETH_LINK_SPEED_FIXED; 2709 2710 return speed_capa; 2711 } 2712 2713 int 2714 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2715 { 2716 struct hns3_adapter *hns = eth_dev->data->dev_private; 2717 struct hns3_hw *hw = &hns->hw; 2718 uint16_t queue_num = hw->tqps_num; 2719 2720 /* 2721 * In interrupt mode, 'max_rx_queues' is set based on the number of 2722 * MSI-X interrupt resources of the hardware. 2723 */ 2724 if (hw->data->dev_conf.intr_conf.rxq == 1) 2725 queue_num = hw->intr_tqps_num; 2726 2727 info->max_rx_queues = queue_num; 2728 info->max_tx_queues = hw->tqps_num; 2729 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2730 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2731 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2732 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2733 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2734 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2735 DEV_RX_OFFLOAD_TCP_CKSUM | 2736 DEV_RX_OFFLOAD_UDP_CKSUM | 2737 DEV_RX_OFFLOAD_SCTP_CKSUM | 2738 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2739 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2740 DEV_RX_OFFLOAD_KEEP_CRC | 2741 DEV_RX_OFFLOAD_SCATTER | 2742 DEV_RX_OFFLOAD_VLAN_STRIP | 2743 DEV_RX_OFFLOAD_VLAN_FILTER | 2744 DEV_RX_OFFLOAD_JUMBO_FRAME | 2745 DEV_RX_OFFLOAD_RSS_HASH | 2746 DEV_RX_OFFLOAD_TCP_LRO); 2747 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2748 DEV_TX_OFFLOAD_IPV4_CKSUM | 2749 DEV_TX_OFFLOAD_TCP_CKSUM | 2750 DEV_TX_OFFLOAD_UDP_CKSUM | 2751 DEV_TX_OFFLOAD_SCTP_CKSUM | 2752 DEV_TX_OFFLOAD_MULTI_SEGS | 2753 DEV_TX_OFFLOAD_TCP_TSO | 2754 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2755 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2756 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2757 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2758 hns3_txvlan_cap_get(hw)); 2759 2760 if (hns3_dev_outer_udp_cksum_supported(hw)) 2761 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 2762 2763 if (hns3_dev_indep_txrx_supported(hw)) 2764 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2765 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2766 2767 if (hns3_dev_ptp_supported(hw)) 2768 info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 2769 2770 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2771 .nb_max = HNS3_MAX_RING_DESC, 2772 .nb_min = HNS3_MIN_RING_DESC, 2773 .nb_align = HNS3_ALIGN_RING_DESC, 2774 }; 2775 2776 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2777 .nb_max = HNS3_MAX_RING_DESC, 2778 .nb_min = HNS3_MIN_RING_DESC, 2779 .nb_align = HNS3_ALIGN_RING_DESC, 2780 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2781 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2782 }; 2783 2784 info->speed_capa = hns3_get_speed_capa(hw); 2785 info->default_rxconf = (struct rte_eth_rxconf) { 2786 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2787 /* 2788 * If there are no available Rx buffer descriptors, incoming 2789 * packets are always dropped by hardware based on hns3 network 2790 * engine. 2791 */ 2792 .rx_drop_en = 1, 2793 .offloads = 0, 2794 }; 2795 info->default_txconf = (struct rte_eth_txconf) { 2796 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2797 .offloads = 0, 2798 }; 2799 2800 info->reta_size = hw->rss_ind_tbl_size; 2801 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2802 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2803 2804 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2805 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2806 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2807 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2808 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2809 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2810 2811 return 0; 2812 } 2813 2814 static int 2815 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2816 size_t fw_size) 2817 { 2818 struct hns3_adapter *hns = eth_dev->data->dev_private; 2819 struct hns3_hw *hw = &hns->hw; 2820 uint32_t version = hw->fw_version; 2821 int ret; 2822 2823 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2824 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2825 HNS3_FW_VERSION_BYTE3_S), 2826 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2827 HNS3_FW_VERSION_BYTE2_S), 2828 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2829 HNS3_FW_VERSION_BYTE1_S), 2830 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2831 HNS3_FW_VERSION_BYTE0_S)); 2832 if (ret < 0) 2833 return -EINVAL; 2834 2835 ret += 1; /* add the size of '\0' */ 2836 if (fw_size < (size_t)ret) 2837 return ret; 2838 else 2839 return 0; 2840 } 2841 2842 static int 2843 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2844 { 2845 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2846 int ret; 2847 2848 (void)hns3_update_link_status(hw); 2849 2850 ret = hns3_update_link_info(eth_dev); 2851 if (ret) 2852 hw->mac.link_status = ETH_LINK_DOWN; 2853 2854 return ret; 2855 } 2856 2857 static void 2858 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2859 struct rte_eth_link *new_link) 2860 { 2861 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2862 struct hns3_mac *mac = &hw->mac; 2863 2864 switch (mac->link_speed) { 2865 case ETH_SPEED_NUM_10M: 2866 case ETH_SPEED_NUM_100M: 2867 case ETH_SPEED_NUM_1G: 2868 case ETH_SPEED_NUM_10G: 2869 case ETH_SPEED_NUM_25G: 2870 case ETH_SPEED_NUM_40G: 2871 case ETH_SPEED_NUM_50G: 2872 case ETH_SPEED_NUM_100G: 2873 case ETH_SPEED_NUM_200G: 2874 if (mac->link_status) 2875 new_link->link_speed = mac->link_speed; 2876 break; 2877 default: 2878 if (mac->link_status) 2879 new_link->link_speed = ETH_SPEED_NUM_UNKNOWN; 2880 break; 2881 } 2882 2883 if (!mac->link_status) 2884 new_link->link_speed = ETH_SPEED_NUM_NONE; 2885 2886 new_link->link_duplex = mac->link_duplex; 2887 new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2888 new_link->link_autoneg = mac->link_autoneg; 2889 } 2890 2891 static int 2892 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2893 { 2894 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2895 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2896 2897 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2898 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2899 struct hns3_mac *mac = &hw->mac; 2900 struct rte_eth_link new_link; 2901 int ret; 2902 2903 /* When port is stopped, report link down. */ 2904 if (eth_dev->data->dev_started == 0) { 2905 new_link.link_autoneg = mac->link_autoneg; 2906 new_link.link_duplex = mac->link_duplex; 2907 new_link.link_speed = ETH_SPEED_NUM_NONE; 2908 new_link.link_status = ETH_LINK_DOWN; 2909 goto out; 2910 } 2911 2912 do { 2913 ret = hns3_update_port_link_info(eth_dev); 2914 if (ret) { 2915 hns3_err(hw, "failed to get port link info, ret = %d.", 2916 ret); 2917 break; 2918 } 2919 2920 if (!wait_to_complete || mac->link_status == ETH_LINK_UP) 2921 break; 2922 2923 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2924 } while (retry_cnt--); 2925 2926 memset(&new_link, 0, sizeof(new_link)); 2927 hns3_setup_linkstatus(eth_dev, &new_link); 2928 2929 out: 2930 return rte_eth_linkstatus_set(eth_dev, &new_link); 2931 } 2932 2933 static int 2934 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2935 { 2936 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2937 struct hns3_pf *pf = &hns->pf; 2938 2939 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2940 return -EINVAL; 2941 2942 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2943 2944 return 0; 2945 } 2946 2947 static int 2948 hns3_query_function_status(struct hns3_hw *hw) 2949 { 2950 #define HNS3_QUERY_MAX_CNT 10 2951 #define HNS3_QUERY_SLEEP_MSCOEND 1 2952 struct hns3_func_status_cmd *req; 2953 struct hns3_cmd_desc desc; 2954 int timeout = 0; 2955 int ret; 2956 2957 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2958 req = (struct hns3_func_status_cmd *)desc.data; 2959 2960 do { 2961 ret = hns3_cmd_send(hw, &desc, 1); 2962 if (ret) { 2963 PMD_INIT_LOG(ERR, "query function status failed %d", 2964 ret); 2965 return ret; 2966 } 2967 2968 /* Check pf reset is done */ 2969 if (req->pf_state) 2970 break; 2971 2972 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2973 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2974 2975 return hns3_parse_func_status(hw, req); 2976 } 2977 2978 static int 2979 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2980 { 2981 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2982 struct hns3_pf *pf = &hns->pf; 2983 2984 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2985 /* 2986 * The total_tqps_num obtained from firmware is maximum tqp 2987 * numbers of this port, which should be used for PF and VFs. 2988 * There is no need for pf to have so many tqp numbers in 2989 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2990 * coming from config file, is assigned to maximum queue number 2991 * for the PF of this port by user. So users can modify the 2992 * maximum queue number of PF according to their own application 2993 * scenarios, which is more flexible to use. In addition, many 2994 * memories can be saved due to allocating queue statistics 2995 * room according to the actual number of queues required. The 2996 * maximum queue number of PF for network engine with 2997 * revision_id greater than 0x30 is assigned by config file. 2998 */ 2999 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 3000 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 3001 "must be greater than 0.", 3002 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 3003 return -EINVAL; 3004 } 3005 3006 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3007 hw->total_tqps_num); 3008 } else { 3009 /* 3010 * Due to the limitation on the number of PF interrupts 3011 * available, the maximum queue number assigned to PF on 3012 * the network engine with revision_id 0x21 is 64. 3013 */ 3014 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 3015 HNS3_MAX_TQP_NUM_HIP08_PF); 3016 } 3017 3018 return 0; 3019 } 3020 3021 static int 3022 hns3_query_pf_resource(struct hns3_hw *hw) 3023 { 3024 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3025 struct hns3_pf *pf = &hns->pf; 3026 struct hns3_pf_res_cmd *req; 3027 struct hns3_cmd_desc desc; 3028 int ret; 3029 3030 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 3031 ret = hns3_cmd_send(hw, &desc, 1); 3032 if (ret) { 3033 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 3034 return ret; 3035 } 3036 3037 req = (struct hns3_pf_res_cmd *)desc.data; 3038 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 3039 rte_le_to_cpu_16(req->ext_tqp_num); 3040 ret = hns3_get_pf_max_tqp_num(hw); 3041 if (ret) 3042 return ret; 3043 3044 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 3045 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 3046 3047 if (req->tx_buf_size) 3048 pf->tx_buf_size = 3049 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 3050 else 3051 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 3052 3053 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 3054 3055 if (req->dv_buf_size) 3056 pf->dv_buf_size = 3057 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 3058 else 3059 pf->dv_buf_size = HNS3_DEFAULT_DV; 3060 3061 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 3062 3063 hw->num_msi = 3064 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 3065 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 3066 3067 return 0; 3068 } 3069 3070 static void 3071 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 3072 { 3073 struct hns3_cfg_param_cmd *req; 3074 uint64_t mac_addr_tmp_high; 3075 uint8_t ext_rss_size_max; 3076 uint64_t mac_addr_tmp; 3077 uint32_t i; 3078 3079 req = (struct hns3_cfg_param_cmd *)desc[0].data; 3080 3081 /* get the configuration */ 3082 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3083 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3084 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3085 HNS3_CFG_TQP_DESC_N_M, 3086 HNS3_CFG_TQP_DESC_N_S); 3087 3088 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3089 HNS3_CFG_PHY_ADDR_M, 3090 HNS3_CFG_PHY_ADDR_S); 3091 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3092 HNS3_CFG_MEDIA_TP_M, 3093 HNS3_CFG_MEDIA_TP_S); 3094 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3095 HNS3_CFG_RX_BUF_LEN_M, 3096 HNS3_CFG_RX_BUF_LEN_S); 3097 /* get mac address */ 3098 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3099 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3100 HNS3_CFG_MAC_ADDR_H_M, 3101 HNS3_CFG_MAC_ADDR_H_S); 3102 3103 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3104 3105 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3106 HNS3_CFG_DEFAULT_SPEED_M, 3107 HNS3_CFG_DEFAULT_SPEED_S); 3108 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3109 HNS3_CFG_RSS_SIZE_M, 3110 HNS3_CFG_RSS_SIZE_S); 3111 3112 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3113 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3114 3115 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3116 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3117 3118 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3119 HNS3_CFG_SPEED_ABILITY_M, 3120 HNS3_CFG_SPEED_ABILITY_S); 3121 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3122 HNS3_CFG_UMV_TBL_SPACE_M, 3123 HNS3_CFG_UMV_TBL_SPACE_S); 3124 if (!cfg->umv_space) 3125 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3126 3127 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3128 HNS3_CFG_EXT_RSS_SIZE_M, 3129 HNS3_CFG_EXT_RSS_SIZE_S); 3130 3131 /* 3132 * Field ext_rss_size_max obtained from firmware will be more flexible 3133 * for future changes and expansions, which is an exponent of 2, instead 3134 * of reading out directly. If this field is not zero, hns3 PF PMD 3135 * driver uses it as rss_size_max under one TC. Device, whose revision 3136 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3137 * maximum number of queues supported under a TC through this field. 3138 */ 3139 if (ext_rss_size_max) 3140 cfg->rss_size_max = 1U << ext_rss_size_max; 3141 } 3142 3143 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3144 * @hw: pointer to struct hns3_hw 3145 * @hcfg: the config structure to be getted 3146 */ 3147 static int 3148 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3149 { 3150 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3151 struct hns3_cfg_param_cmd *req; 3152 uint32_t offset; 3153 uint32_t i; 3154 int ret; 3155 3156 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3157 offset = 0; 3158 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3159 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3160 true); 3161 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3162 i * HNS3_CFG_RD_LEN_BYTES); 3163 /* Len should be divided by 4 when send to hardware */ 3164 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3165 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3166 req->offset = rte_cpu_to_le_32(offset); 3167 } 3168 3169 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3170 if (ret) { 3171 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3172 return ret; 3173 } 3174 3175 hns3_parse_cfg(hcfg, desc); 3176 3177 return 0; 3178 } 3179 3180 static int 3181 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3182 { 3183 switch (speed_cmd) { 3184 case HNS3_CFG_SPEED_10M: 3185 *speed = ETH_SPEED_NUM_10M; 3186 break; 3187 case HNS3_CFG_SPEED_100M: 3188 *speed = ETH_SPEED_NUM_100M; 3189 break; 3190 case HNS3_CFG_SPEED_1G: 3191 *speed = ETH_SPEED_NUM_1G; 3192 break; 3193 case HNS3_CFG_SPEED_10G: 3194 *speed = ETH_SPEED_NUM_10G; 3195 break; 3196 case HNS3_CFG_SPEED_25G: 3197 *speed = ETH_SPEED_NUM_25G; 3198 break; 3199 case HNS3_CFG_SPEED_40G: 3200 *speed = ETH_SPEED_NUM_40G; 3201 break; 3202 case HNS3_CFG_SPEED_50G: 3203 *speed = ETH_SPEED_NUM_50G; 3204 break; 3205 case HNS3_CFG_SPEED_100G: 3206 *speed = ETH_SPEED_NUM_100G; 3207 break; 3208 case HNS3_CFG_SPEED_200G: 3209 *speed = ETH_SPEED_NUM_200G; 3210 break; 3211 default: 3212 return -EINVAL; 3213 } 3214 3215 return 0; 3216 } 3217 3218 static void 3219 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3220 { 3221 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3222 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3223 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3224 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3225 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3226 } 3227 3228 static void 3229 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3230 { 3231 struct hns3_dev_specs_0_cmd *req0; 3232 3233 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3234 3235 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3236 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3237 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3238 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3239 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3240 } 3241 3242 static int 3243 hns3_check_dev_specifications(struct hns3_hw *hw) 3244 { 3245 if (hw->rss_ind_tbl_size == 0 || 3246 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3247 hns3_err(hw, "the size of hash lookup table configured (%u)" 3248 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3249 HNS3_RSS_IND_TBL_SIZE_MAX); 3250 return -EINVAL; 3251 } 3252 3253 return 0; 3254 } 3255 3256 static int 3257 hns3_query_dev_specifications(struct hns3_hw *hw) 3258 { 3259 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3260 int ret; 3261 int i; 3262 3263 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3264 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3265 true); 3266 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3267 } 3268 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3269 3270 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3271 if (ret) 3272 return ret; 3273 3274 hns3_parse_dev_specifications(hw, desc); 3275 3276 return hns3_check_dev_specifications(hw); 3277 } 3278 3279 static int 3280 hns3_get_capability(struct hns3_hw *hw) 3281 { 3282 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3283 struct rte_pci_device *pci_dev; 3284 struct hns3_pf *pf = &hns->pf; 3285 struct rte_eth_dev *eth_dev; 3286 uint16_t device_id; 3287 uint8_t revision; 3288 int ret; 3289 3290 eth_dev = &rte_eth_devices[hw->data->port_id]; 3291 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3292 device_id = pci_dev->id.device_id; 3293 3294 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3295 device_id == HNS3_DEV_ID_50GE_RDMA || 3296 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3297 device_id == HNS3_DEV_ID_200G_RDMA) 3298 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3299 3300 /* Get PCI revision id */ 3301 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3302 HNS3_PCI_REVISION_ID); 3303 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3304 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3305 ret); 3306 return -EIO; 3307 } 3308 hw->revision = revision; 3309 3310 if (revision < PCI_REVISION_ID_HIP09_A) { 3311 hns3_set_default_dev_specifications(hw); 3312 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3313 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3314 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3315 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3316 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3317 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3318 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3319 hw->rss_info.ipv6_sctp_offload_supported = false; 3320 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3321 return 0; 3322 } 3323 3324 ret = hns3_query_dev_specifications(hw); 3325 if (ret) { 3326 PMD_INIT_LOG(ERR, 3327 "failed to query dev specifications, ret = %d", 3328 ret); 3329 return ret; 3330 } 3331 3332 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3333 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3334 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3335 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3336 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3337 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3338 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3339 hw->rss_info.ipv6_sctp_offload_supported = true; 3340 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3341 3342 return 0; 3343 } 3344 3345 static int 3346 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3347 { 3348 int ret; 3349 3350 switch (media_type) { 3351 case HNS3_MEDIA_TYPE_COPPER: 3352 if (!hns3_dev_copper_supported(hw)) { 3353 PMD_INIT_LOG(ERR, 3354 "Media type is copper, not supported."); 3355 ret = -EOPNOTSUPP; 3356 } else { 3357 ret = 0; 3358 } 3359 break; 3360 case HNS3_MEDIA_TYPE_FIBER: 3361 ret = 0; 3362 break; 3363 case HNS3_MEDIA_TYPE_BACKPLANE: 3364 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3365 ret = -EOPNOTSUPP; 3366 break; 3367 default: 3368 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3369 ret = -EINVAL; 3370 break; 3371 } 3372 3373 return ret; 3374 } 3375 3376 static int 3377 hns3_get_board_configuration(struct hns3_hw *hw) 3378 { 3379 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3380 struct hns3_pf *pf = &hns->pf; 3381 struct hns3_cfg cfg; 3382 int ret; 3383 3384 ret = hns3_get_board_cfg(hw, &cfg); 3385 if (ret) { 3386 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3387 return ret; 3388 } 3389 3390 ret = hns3_check_media_type(hw, cfg.media_type); 3391 if (ret) 3392 return ret; 3393 3394 hw->mac.media_type = cfg.media_type; 3395 hw->rss_size_max = cfg.rss_size_max; 3396 hw->rss_dis_flag = false; 3397 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3398 hw->mac.phy_addr = cfg.phy_addr; 3399 hw->mac.default_addr_setted = false; 3400 hw->num_tx_desc = cfg.tqp_desc_num; 3401 hw->num_rx_desc = cfg.tqp_desc_num; 3402 hw->dcb_info.num_pg = 1; 3403 hw->dcb_info.hw_pfc_map = 0; 3404 3405 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3406 if (ret) { 3407 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3408 cfg.default_speed, ret); 3409 return ret; 3410 } 3411 3412 pf->tc_max = cfg.tc_num; 3413 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3414 PMD_INIT_LOG(WARNING, 3415 "Get TC num(%u) from flash, set TC num to 1", 3416 pf->tc_max); 3417 pf->tc_max = 1; 3418 } 3419 3420 /* Dev does not support DCB */ 3421 if (!hns3_dev_dcb_supported(hw)) { 3422 pf->tc_max = 1; 3423 pf->pfc_max = 0; 3424 } else 3425 pf->pfc_max = pf->tc_max; 3426 3427 hw->dcb_info.num_tc = 1; 3428 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3429 hw->tqps_num / hw->dcb_info.num_tc); 3430 hns3_set_bit(hw->hw_tc_map, 0, 1); 3431 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3432 3433 pf->wanted_umv_size = cfg.umv_space; 3434 3435 return ret; 3436 } 3437 3438 static int 3439 hns3_get_configuration(struct hns3_hw *hw) 3440 { 3441 int ret; 3442 3443 ret = hns3_query_function_status(hw); 3444 if (ret) { 3445 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3446 return ret; 3447 } 3448 3449 /* Get device capability */ 3450 ret = hns3_get_capability(hw); 3451 if (ret) { 3452 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3453 return ret; 3454 } 3455 3456 /* Get pf resource */ 3457 ret = hns3_query_pf_resource(hw); 3458 if (ret) { 3459 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3460 return ret; 3461 } 3462 3463 ret = hns3_get_board_configuration(hw); 3464 if (ret) { 3465 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3466 return ret; 3467 } 3468 3469 ret = hns3_query_dev_fec_info(hw); 3470 if (ret) 3471 PMD_INIT_LOG(ERR, 3472 "failed to query FEC information, ret = %d", ret); 3473 3474 return ret; 3475 } 3476 3477 static int 3478 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3479 uint16_t tqp_vid, bool is_pf) 3480 { 3481 struct hns3_tqp_map_cmd *req; 3482 struct hns3_cmd_desc desc; 3483 int ret; 3484 3485 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3486 3487 req = (struct hns3_tqp_map_cmd *)desc.data; 3488 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3489 req->tqp_vf = func_id; 3490 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3491 if (!is_pf) 3492 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3493 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3494 3495 ret = hns3_cmd_send(hw, &desc, 1); 3496 if (ret) 3497 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3498 3499 return ret; 3500 } 3501 3502 static int 3503 hns3_map_tqp(struct hns3_hw *hw) 3504 { 3505 int ret; 3506 int i; 3507 3508 /* 3509 * In current version, VF is not supported when PF is driven by DPDK 3510 * driver, so we assign total tqps_num tqps allocated to this port 3511 * to PF. 3512 */ 3513 for (i = 0; i < hw->total_tqps_num; i++) { 3514 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3515 if (ret) 3516 return ret; 3517 } 3518 3519 return 0; 3520 } 3521 3522 static int 3523 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3524 { 3525 struct hns3_config_mac_speed_dup_cmd *req; 3526 struct hns3_cmd_desc desc; 3527 int ret; 3528 3529 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3530 3531 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3532 3533 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3534 3535 switch (speed) { 3536 case ETH_SPEED_NUM_10M: 3537 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3538 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3539 break; 3540 case ETH_SPEED_NUM_100M: 3541 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3542 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3543 break; 3544 case ETH_SPEED_NUM_1G: 3545 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3546 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3547 break; 3548 case ETH_SPEED_NUM_10G: 3549 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3550 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3551 break; 3552 case ETH_SPEED_NUM_25G: 3553 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3554 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3555 break; 3556 case ETH_SPEED_NUM_40G: 3557 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3558 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3559 break; 3560 case ETH_SPEED_NUM_50G: 3561 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3562 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3563 break; 3564 case ETH_SPEED_NUM_100G: 3565 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3566 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3567 break; 3568 case ETH_SPEED_NUM_200G: 3569 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3570 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3571 break; 3572 default: 3573 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3574 return -EINVAL; 3575 } 3576 3577 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3578 3579 ret = hns3_cmd_send(hw, &desc, 1); 3580 if (ret) 3581 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3582 3583 return ret; 3584 } 3585 3586 static int 3587 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3588 { 3589 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3590 struct hns3_pf *pf = &hns->pf; 3591 struct hns3_priv_buf *priv; 3592 uint32_t i, total_size; 3593 3594 total_size = pf->pkt_buf_size; 3595 3596 /* alloc tx buffer for all enabled tc */ 3597 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3598 priv = &buf_alloc->priv_buf[i]; 3599 3600 if (hw->hw_tc_map & BIT(i)) { 3601 if (total_size < pf->tx_buf_size) 3602 return -ENOMEM; 3603 3604 priv->tx_buf_size = pf->tx_buf_size; 3605 } else 3606 priv->tx_buf_size = 0; 3607 3608 total_size -= priv->tx_buf_size; 3609 } 3610 3611 return 0; 3612 } 3613 3614 static int 3615 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3616 { 3617 /* TX buffer size is unit by 128 byte */ 3618 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3619 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3620 struct hns3_tx_buff_alloc_cmd *req; 3621 struct hns3_cmd_desc desc; 3622 uint32_t buf_size; 3623 uint32_t i; 3624 int ret; 3625 3626 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3627 3628 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3629 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3630 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3631 3632 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3633 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3634 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3635 } 3636 3637 ret = hns3_cmd_send(hw, &desc, 1); 3638 if (ret) 3639 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3640 3641 return ret; 3642 } 3643 3644 static int 3645 hns3_get_tc_num(struct hns3_hw *hw) 3646 { 3647 int cnt = 0; 3648 uint8_t i; 3649 3650 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3651 if (hw->hw_tc_map & BIT(i)) 3652 cnt++; 3653 return cnt; 3654 } 3655 3656 static uint32_t 3657 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3658 { 3659 struct hns3_priv_buf *priv; 3660 uint32_t rx_priv = 0; 3661 int i; 3662 3663 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3664 priv = &buf_alloc->priv_buf[i]; 3665 if (priv->enable) 3666 rx_priv += priv->buf_size; 3667 } 3668 return rx_priv; 3669 } 3670 3671 static uint32_t 3672 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3673 { 3674 uint32_t total_tx_size = 0; 3675 uint32_t i; 3676 3677 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3678 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3679 3680 return total_tx_size; 3681 } 3682 3683 /* Get the number of pfc enabled TCs, which have private buffer */ 3684 static int 3685 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3686 { 3687 struct hns3_priv_buf *priv; 3688 int cnt = 0; 3689 uint8_t i; 3690 3691 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3692 priv = &buf_alloc->priv_buf[i]; 3693 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3694 cnt++; 3695 } 3696 3697 return cnt; 3698 } 3699 3700 /* Get the number of pfc disabled TCs, which have private buffer */ 3701 static int 3702 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3703 struct hns3_pkt_buf_alloc *buf_alloc) 3704 { 3705 struct hns3_priv_buf *priv; 3706 int cnt = 0; 3707 uint8_t i; 3708 3709 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3710 priv = &buf_alloc->priv_buf[i]; 3711 if (hw->hw_tc_map & BIT(i) && 3712 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3713 cnt++; 3714 } 3715 3716 return cnt; 3717 } 3718 3719 static bool 3720 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3721 uint32_t rx_all) 3722 { 3723 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3724 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3725 struct hns3_pf *pf = &hns->pf; 3726 uint32_t shared_buf, aligned_mps; 3727 uint32_t rx_priv; 3728 uint8_t tc_num; 3729 uint8_t i; 3730 3731 tc_num = hns3_get_tc_num(hw); 3732 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3733 3734 if (hns3_dev_dcb_supported(hw)) 3735 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3736 pf->dv_buf_size; 3737 else 3738 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3739 + pf->dv_buf_size; 3740 3741 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3742 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3743 HNS3_BUF_SIZE_UNIT); 3744 3745 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3746 if (rx_all < rx_priv + shared_std) 3747 return false; 3748 3749 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3750 buf_alloc->s_buf.buf_size = shared_buf; 3751 if (hns3_dev_dcb_supported(hw)) { 3752 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3753 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3754 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3755 HNS3_BUF_SIZE_UNIT); 3756 } else { 3757 buf_alloc->s_buf.self.high = 3758 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3759 buf_alloc->s_buf.self.low = aligned_mps; 3760 } 3761 3762 if (hns3_dev_dcb_supported(hw)) { 3763 hi_thrd = shared_buf - pf->dv_buf_size; 3764 3765 if (tc_num <= NEED_RESERVE_TC_NUM) 3766 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3767 BUF_MAX_PERCENT; 3768 3769 if (tc_num) 3770 hi_thrd = hi_thrd / tc_num; 3771 3772 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3773 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3774 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3775 } else { 3776 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3777 lo_thrd = aligned_mps; 3778 } 3779 3780 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3781 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3782 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3783 } 3784 3785 return true; 3786 } 3787 3788 static bool 3789 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3790 struct hns3_pkt_buf_alloc *buf_alloc) 3791 { 3792 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3793 struct hns3_pf *pf = &hns->pf; 3794 struct hns3_priv_buf *priv; 3795 uint32_t aligned_mps; 3796 uint32_t rx_all; 3797 uint8_t i; 3798 3799 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3800 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3801 3802 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3803 priv = &buf_alloc->priv_buf[i]; 3804 3805 priv->enable = 0; 3806 priv->wl.low = 0; 3807 priv->wl.high = 0; 3808 priv->buf_size = 0; 3809 3810 if (!(hw->hw_tc_map & BIT(i))) 3811 continue; 3812 3813 priv->enable = 1; 3814 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3815 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3816 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3817 HNS3_BUF_SIZE_UNIT); 3818 } else { 3819 priv->wl.low = 0; 3820 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3821 aligned_mps; 3822 } 3823 3824 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3825 } 3826 3827 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3828 } 3829 3830 static bool 3831 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3832 struct hns3_pkt_buf_alloc *buf_alloc) 3833 { 3834 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3835 struct hns3_pf *pf = &hns->pf; 3836 struct hns3_priv_buf *priv; 3837 int no_pfc_priv_num; 3838 uint32_t rx_all; 3839 uint8_t mask; 3840 int i; 3841 3842 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3843 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3844 3845 /* let the last to be cleared first */ 3846 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3847 priv = &buf_alloc->priv_buf[i]; 3848 mask = BIT((uint8_t)i); 3849 3850 if (hw->hw_tc_map & mask && 3851 !(hw->dcb_info.hw_pfc_map & mask)) { 3852 /* Clear the no pfc TC private buffer */ 3853 priv->wl.low = 0; 3854 priv->wl.high = 0; 3855 priv->buf_size = 0; 3856 priv->enable = 0; 3857 no_pfc_priv_num--; 3858 } 3859 3860 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3861 no_pfc_priv_num == 0) 3862 break; 3863 } 3864 3865 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3866 } 3867 3868 static bool 3869 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3870 struct hns3_pkt_buf_alloc *buf_alloc) 3871 { 3872 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3873 struct hns3_pf *pf = &hns->pf; 3874 struct hns3_priv_buf *priv; 3875 uint32_t rx_all; 3876 int pfc_priv_num; 3877 uint8_t mask; 3878 int i; 3879 3880 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3881 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3882 3883 /* let the last to be cleared first */ 3884 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3885 priv = &buf_alloc->priv_buf[i]; 3886 mask = BIT((uint8_t)i); 3887 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3888 /* Reduce the number of pfc TC with private buffer */ 3889 priv->wl.low = 0; 3890 priv->enable = 0; 3891 priv->wl.high = 0; 3892 priv->buf_size = 0; 3893 pfc_priv_num--; 3894 } 3895 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3896 pfc_priv_num == 0) 3897 break; 3898 } 3899 3900 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3901 } 3902 3903 static bool 3904 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3905 struct hns3_pkt_buf_alloc *buf_alloc) 3906 { 3907 #define COMPENSATE_BUFFER 0x3C00 3908 #define COMPENSATE_HALF_MPS_NUM 5 3909 #define PRIV_WL_GAP 0x1800 3910 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3911 struct hns3_pf *pf = &hns->pf; 3912 uint32_t tc_num = hns3_get_tc_num(hw); 3913 uint32_t half_mps = pf->mps >> 1; 3914 struct hns3_priv_buf *priv; 3915 uint32_t min_rx_priv; 3916 uint32_t rx_priv; 3917 uint8_t i; 3918 3919 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3920 if (tc_num) 3921 rx_priv = rx_priv / tc_num; 3922 3923 if (tc_num <= NEED_RESERVE_TC_NUM) 3924 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3925 3926 /* 3927 * Minimum value of private buffer in rx direction (min_rx_priv) is 3928 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3929 * buffer if rx_priv is greater than min_rx_priv. 3930 */ 3931 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3932 COMPENSATE_HALF_MPS_NUM * half_mps; 3933 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3934 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3935 3936 if (rx_priv < min_rx_priv) 3937 return false; 3938 3939 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3940 priv = &buf_alloc->priv_buf[i]; 3941 priv->enable = 0; 3942 priv->wl.low = 0; 3943 priv->wl.high = 0; 3944 priv->buf_size = 0; 3945 3946 if (!(hw->hw_tc_map & BIT(i))) 3947 continue; 3948 3949 priv->enable = 1; 3950 priv->buf_size = rx_priv; 3951 priv->wl.high = rx_priv - pf->dv_buf_size; 3952 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3953 } 3954 3955 buf_alloc->s_buf.buf_size = 0; 3956 3957 return true; 3958 } 3959 3960 /* 3961 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3962 * @hw: pointer to struct hns3_hw 3963 * @buf_alloc: pointer to buffer calculation data 3964 * @return: 0: calculate sucessful, negative: fail 3965 */ 3966 static int 3967 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3968 { 3969 /* When DCB is not supported, rx private buffer is not allocated. */ 3970 if (!hns3_dev_dcb_supported(hw)) { 3971 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3972 struct hns3_pf *pf = &hns->pf; 3973 uint32_t rx_all = pf->pkt_buf_size; 3974 3975 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3976 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3977 return -ENOMEM; 3978 3979 return 0; 3980 } 3981 3982 /* 3983 * Try to allocate privated packet buffer for all TCs without share 3984 * buffer. 3985 */ 3986 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3987 return 0; 3988 3989 /* 3990 * Try to allocate privated packet buffer for all TCs with share 3991 * buffer. 3992 */ 3993 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3994 return 0; 3995 3996 /* 3997 * For different application scenes, the enabled port number, TC number 3998 * and no_drop TC number are different. In order to obtain the better 3999 * performance, software could allocate the buffer size and configure 4000 * the waterline by trying to decrease the private buffer size according 4001 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 4002 * enabled tc. 4003 */ 4004 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 4005 return 0; 4006 4007 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 4008 return 0; 4009 4010 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 4011 return 0; 4012 4013 return -ENOMEM; 4014 } 4015 4016 static int 4017 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4018 { 4019 struct hns3_rx_priv_buff_cmd *req; 4020 struct hns3_cmd_desc desc; 4021 uint32_t buf_size; 4022 int ret; 4023 int i; 4024 4025 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 4026 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 4027 4028 /* Alloc private buffer TCs */ 4029 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 4030 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 4031 4032 req->buf_num[i] = 4033 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 4034 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 4035 } 4036 4037 buf_size = buf_alloc->s_buf.buf_size; 4038 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 4039 (1 << HNS3_TC0_PRI_BUF_EN_B)); 4040 4041 ret = hns3_cmd_send(hw, &desc, 1); 4042 if (ret) 4043 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 4044 4045 return ret; 4046 } 4047 4048 static int 4049 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4050 { 4051 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 4052 struct hns3_rx_priv_wl_buf *req; 4053 struct hns3_priv_buf *priv; 4054 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 4055 int i, j; 4056 int ret; 4057 4058 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 4059 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 4060 false); 4061 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 4062 4063 /* The first descriptor set the NEXT bit to 1 */ 4064 if (i == 0) 4065 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4066 else 4067 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4068 4069 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4070 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 4071 4072 priv = &buf_alloc->priv_buf[idx]; 4073 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 4074 HNS3_BUF_UNIT_S); 4075 req->tc_wl[j].high |= 4076 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4077 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 4078 HNS3_BUF_UNIT_S); 4079 req->tc_wl[j].low |= 4080 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4081 } 4082 } 4083 4084 /* Send 2 descriptor at one time */ 4085 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4086 if (ret) 4087 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4088 ret); 4089 return ret; 4090 } 4091 4092 static int 4093 hns3_common_thrd_config(struct hns3_hw *hw, 4094 struct hns3_pkt_buf_alloc *buf_alloc) 4095 { 4096 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4097 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4098 struct hns3_rx_com_thrd *req; 4099 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4100 struct hns3_tc_thrd *tc; 4101 int tc_idx; 4102 int i, j; 4103 int ret; 4104 4105 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4106 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4107 false); 4108 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4109 4110 /* The first descriptor set the NEXT bit to 1 */ 4111 if (i == 0) 4112 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4113 else 4114 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4115 4116 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4117 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4118 tc = &s_buf->tc_thrd[tc_idx]; 4119 4120 req->com_thrd[j].high = 4121 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4122 req->com_thrd[j].high |= 4123 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4124 req->com_thrd[j].low = 4125 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4126 req->com_thrd[j].low |= 4127 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4128 } 4129 } 4130 4131 /* Send 2 descriptors at one time */ 4132 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4133 if (ret) 4134 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4135 4136 return ret; 4137 } 4138 4139 static int 4140 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4141 { 4142 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4143 struct hns3_rx_com_wl *req; 4144 struct hns3_cmd_desc desc; 4145 int ret; 4146 4147 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4148 4149 req = (struct hns3_rx_com_wl *)desc.data; 4150 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4151 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4152 4153 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4154 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4155 4156 ret = hns3_cmd_send(hw, &desc, 1); 4157 if (ret) 4158 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4159 4160 return ret; 4161 } 4162 4163 int 4164 hns3_buffer_alloc(struct hns3_hw *hw) 4165 { 4166 struct hns3_pkt_buf_alloc pkt_buf; 4167 int ret; 4168 4169 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4170 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4171 if (ret) { 4172 PMD_INIT_LOG(ERR, 4173 "could not calc tx buffer size for all TCs %d", 4174 ret); 4175 return ret; 4176 } 4177 4178 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4179 if (ret) { 4180 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4181 return ret; 4182 } 4183 4184 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4185 if (ret) { 4186 PMD_INIT_LOG(ERR, 4187 "could not calc rx priv buffer size for all TCs %d", 4188 ret); 4189 return ret; 4190 } 4191 4192 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4193 if (ret) { 4194 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4195 return ret; 4196 } 4197 4198 if (hns3_dev_dcb_supported(hw)) { 4199 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4200 if (ret) { 4201 PMD_INIT_LOG(ERR, 4202 "could not configure rx private waterline %d", 4203 ret); 4204 return ret; 4205 } 4206 4207 ret = hns3_common_thrd_config(hw, &pkt_buf); 4208 if (ret) { 4209 PMD_INIT_LOG(ERR, 4210 "could not configure common threshold %d", 4211 ret); 4212 return ret; 4213 } 4214 } 4215 4216 ret = hns3_common_wl_config(hw, &pkt_buf); 4217 if (ret) 4218 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4219 ret); 4220 4221 return ret; 4222 } 4223 4224 static int 4225 hns3_mac_init(struct hns3_hw *hw) 4226 { 4227 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4228 struct hns3_mac *mac = &hw->mac; 4229 struct hns3_pf *pf = &hns->pf; 4230 int ret; 4231 4232 pf->support_sfp_query = true; 4233 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 4234 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4235 if (ret) { 4236 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4237 return ret; 4238 } 4239 4240 mac->link_status = ETH_LINK_DOWN; 4241 4242 return hns3_config_mtu(hw, pf->mps); 4243 } 4244 4245 static int 4246 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4247 { 4248 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4249 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4250 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4251 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4252 int return_status; 4253 4254 if (cmdq_resp) { 4255 PMD_INIT_LOG(ERR, 4256 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4257 cmdq_resp); 4258 return -EIO; 4259 } 4260 4261 switch (resp_code) { 4262 case HNS3_ETHERTYPE_SUCCESS_ADD: 4263 case HNS3_ETHERTYPE_ALREADY_ADD: 4264 return_status = 0; 4265 break; 4266 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4267 PMD_INIT_LOG(ERR, 4268 "add mac ethertype failed for manager table overflow."); 4269 return_status = -EIO; 4270 break; 4271 case HNS3_ETHERTYPE_KEY_CONFLICT: 4272 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4273 return_status = -EIO; 4274 break; 4275 default: 4276 PMD_INIT_LOG(ERR, 4277 "add mac ethertype failed for undefined, code=%u.", 4278 resp_code); 4279 return_status = -EIO; 4280 break; 4281 } 4282 4283 return return_status; 4284 } 4285 4286 static int 4287 hns3_add_mgr_tbl(struct hns3_hw *hw, 4288 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4289 { 4290 struct hns3_cmd_desc desc; 4291 uint8_t resp_code; 4292 uint16_t retval; 4293 int ret; 4294 4295 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4296 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4297 4298 ret = hns3_cmd_send(hw, &desc, 1); 4299 if (ret) { 4300 PMD_INIT_LOG(ERR, 4301 "add mac ethertype failed for cmd_send, ret =%d.", 4302 ret); 4303 return ret; 4304 } 4305 4306 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4307 retval = rte_le_to_cpu_16(desc.retval); 4308 4309 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4310 } 4311 4312 static void 4313 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4314 int *table_item_num) 4315 { 4316 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4317 4318 /* 4319 * In current version, we add one item in management table as below: 4320 * 0x0180C200000E -- LLDP MC address 4321 */ 4322 tbl = mgr_table; 4323 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4324 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4325 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4326 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4327 tbl->i_port_bitmap = 0x1; 4328 *table_item_num = 1; 4329 } 4330 4331 static int 4332 hns3_init_mgr_tbl(struct hns3_hw *hw) 4333 { 4334 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4335 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4336 int table_item_num; 4337 int ret; 4338 int i; 4339 4340 memset(mgr_table, 0, sizeof(mgr_table)); 4341 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4342 for (i = 0; i < table_item_num; i++) { 4343 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4344 if (ret) { 4345 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4346 ret); 4347 return ret; 4348 } 4349 } 4350 4351 return 0; 4352 } 4353 4354 static void 4355 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4356 bool en_mc, bool en_bc, int vport_id) 4357 { 4358 if (!param) 4359 return; 4360 4361 memset(param, 0, sizeof(struct hns3_promisc_param)); 4362 if (en_uc) 4363 param->enable = HNS3_PROMISC_EN_UC; 4364 if (en_mc) 4365 param->enable |= HNS3_PROMISC_EN_MC; 4366 if (en_bc) 4367 param->enable |= HNS3_PROMISC_EN_BC; 4368 param->vf_id = vport_id; 4369 } 4370 4371 static int 4372 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4373 { 4374 struct hns3_promisc_cfg_cmd *req; 4375 struct hns3_cmd_desc desc; 4376 int ret; 4377 4378 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4379 4380 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4381 req->vf_id = param->vf_id; 4382 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4383 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4384 4385 ret = hns3_cmd_send(hw, &desc, 1); 4386 if (ret) 4387 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4388 4389 return ret; 4390 } 4391 4392 static int 4393 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4394 { 4395 struct hns3_promisc_param param; 4396 bool en_bc_pmc = true; 4397 uint8_t vf_id; 4398 4399 /* 4400 * In current version VF is not supported when PF is driven by DPDK 4401 * driver, just need to configure parameters for PF vport. 4402 */ 4403 vf_id = HNS3_PF_FUNC_ID; 4404 4405 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4406 return hns3_cmd_set_promisc_mode(hw, ¶m); 4407 } 4408 4409 static int 4410 hns3_promisc_init(struct hns3_hw *hw) 4411 { 4412 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4413 struct hns3_pf *pf = &hns->pf; 4414 struct hns3_promisc_param param; 4415 uint16_t func_id; 4416 int ret; 4417 4418 ret = hns3_set_promisc_mode(hw, false, false); 4419 if (ret) { 4420 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4421 return ret; 4422 } 4423 4424 /* 4425 * In current version VFs are not supported when PF is driven by DPDK 4426 * driver. After PF has been taken over by DPDK, the original VF will 4427 * be invalid. So, there is a possibility of entry residues. It should 4428 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4429 * during init. 4430 */ 4431 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4432 hns3_promisc_param_init(¶m, false, false, false, func_id); 4433 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4434 if (ret) { 4435 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4436 " ret = %d", func_id, ret); 4437 return ret; 4438 } 4439 } 4440 4441 return 0; 4442 } 4443 4444 static void 4445 hns3_promisc_uninit(struct hns3_hw *hw) 4446 { 4447 struct hns3_promisc_param param; 4448 uint16_t func_id; 4449 int ret; 4450 4451 func_id = HNS3_PF_FUNC_ID; 4452 4453 /* 4454 * In current version VFs are not supported when PF is driven by 4455 * DPDK driver, and VFs' promisc mode status has been cleared during 4456 * init and their status will not change. So just clear PF's promisc 4457 * mode status during uninit. 4458 */ 4459 hns3_promisc_param_init(¶m, false, false, false, func_id); 4460 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4461 if (ret) 4462 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4463 " uninit, ret = %d", ret); 4464 } 4465 4466 static int 4467 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4468 { 4469 bool allmulti = dev->data->all_multicast ? true : false; 4470 struct hns3_adapter *hns = dev->data->dev_private; 4471 struct hns3_hw *hw = &hns->hw; 4472 uint64_t offloads; 4473 int err; 4474 int ret; 4475 4476 rte_spinlock_lock(&hw->lock); 4477 ret = hns3_set_promisc_mode(hw, true, true); 4478 if (ret) { 4479 rte_spinlock_unlock(&hw->lock); 4480 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4481 ret); 4482 return ret; 4483 } 4484 4485 /* 4486 * When promiscuous mode was enabled, disable the vlan filter to let 4487 * all packets coming in in the receiving direction. 4488 */ 4489 offloads = dev->data->dev_conf.rxmode.offloads; 4490 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4491 ret = hns3_enable_vlan_filter(hns, false); 4492 if (ret) { 4493 hns3_err(hw, "failed to enable promiscuous mode due to " 4494 "failure to disable vlan filter, ret = %d", 4495 ret); 4496 err = hns3_set_promisc_mode(hw, false, allmulti); 4497 if (err) 4498 hns3_err(hw, "failed to restore promiscuous " 4499 "status after disable vlan filter " 4500 "failed during enabling promiscuous " 4501 "mode, ret = %d", ret); 4502 } 4503 } 4504 4505 rte_spinlock_unlock(&hw->lock); 4506 4507 return ret; 4508 } 4509 4510 static int 4511 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4512 { 4513 bool allmulti = dev->data->all_multicast ? true : false; 4514 struct hns3_adapter *hns = dev->data->dev_private; 4515 struct hns3_hw *hw = &hns->hw; 4516 uint64_t offloads; 4517 int err; 4518 int ret; 4519 4520 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4521 rte_spinlock_lock(&hw->lock); 4522 ret = hns3_set_promisc_mode(hw, false, allmulti); 4523 if (ret) { 4524 rte_spinlock_unlock(&hw->lock); 4525 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4526 ret); 4527 return ret; 4528 } 4529 /* when promiscuous mode was disabled, restore the vlan filter status */ 4530 offloads = dev->data->dev_conf.rxmode.offloads; 4531 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4532 ret = hns3_enable_vlan_filter(hns, true); 4533 if (ret) { 4534 hns3_err(hw, "failed to disable promiscuous mode due to" 4535 " failure to restore vlan filter, ret = %d", 4536 ret); 4537 err = hns3_set_promisc_mode(hw, true, true); 4538 if (err) 4539 hns3_err(hw, "failed to restore promiscuous " 4540 "status after enabling vlan filter " 4541 "failed during disabling promiscuous " 4542 "mode, ret = %d", ret); 4543 } 4544 } 4545 rte_spinlock_unlock(&hw->lock); 4546 4547 return ret; 4548 } 4549 4550 static int 4551 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4552 { 4553 struct hns3_adapter *hns = dev->data->dev_private; 4554 struct hns3_hw *hw = &hns->hw; 4555 int ret; 4556 4557 if (dev->data->promiscuous) 4558 return 0; 4559 4560 rte_spinlock_lock(&hw->lock); 4561 ret = hns3_set_promisc_mode(hw, false, true); 4562 rte_spinlock_unlock(&hw->lock); 4563 if (ret) 4564 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4565 ret); 4566 4567 return ret; 4568 } 4569 4570 static int 4571 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4572 { 4573 struct hns3_adapter *hns = dev->data->dev_private; 4574 struct hns3_hw *hw = &hns->hw; 4575 int ret; 4576 4577 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4578 if (dev->data->promiscuous) 4579 return 0; 4580 4581 rte_spinlock_lock(&hw->lock); 4582 ret = hns3_set_promisc_mode(hw, false, false); 4583 rte_spinlock_unlock(&hw->lock); 4584 if (ret) 4585 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4586 ret); 4587 4588 return ret; 4589 } 4590 4591 static int 4592 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4593 { 4594 struct hns3_hw *hw = &hns->hw; 4595 bool allmulti = hw->data->all_multicast ? true : false; 4596 int ret; 4597 4598 if (hw->data->promiscuous) { 4599 ret = hns3_set_promisc_mode(hw, true, true); 4600 if (ret) 4601 hns3_err(hw, "failed to restore promiscuous mode, " 4602 "ret = %d", ret); 4603 return ret; 4604 } 4605 4606 ret = hns3_set_promisc_mode(hw, false, allmulti); 4607 if (ret) 4608 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4609 ret); 4610 return ret; 4611 } 4612 4613 static int 4614 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4615 { 4616 struct hns3_sfp_info_cmd *resp; 4617 struct hns3_cmd_desc desc; 4618 int ret; 4619 4620 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4621 resp = (struct hns3_sfp_info_cmd *)desc.data; 4622 resp->query_type = HNS3_ACTIVE_QUERY; 4623 4624 ret = hns3_cmd_send(hw, &desc, 1); 4625 if (ret == -EOPNOTSUPP) { 4626 hns3_warn(hw, "firmware does not support get SFP info," 4627 " ret = %d.", ret); 4628 return ret; 4629 } else if (ret) { 4630 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4631 return ret; 4632 } 4633 4634 /* 4635 * In some case, the speed of MAC obtained from firmware may be 0, it 4636 * shouldn't be set to mac->speed. 4637 */ 4638 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4639 return 0; 4640 4641 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4642 /* 4643 * if resp->supported_speed is 0, it means it's an old version 4644 * firmware, do not update these params. 4645 */ 4646 if (resp->supported_speed) { 4647 mac_info->query_type = HNS3_ACTIVE_QUERY; 4648 mac_info->supported_speed = 4649 rte_le_to_cpu_32(resp->supported_speed); 4650 mac_info->support_autoneg = resp->autoneg_ability; 4651 mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED 4652 : ETH_LINK_AUTONEG; 4653 } else { 4654 mac_info->query_type = HNS3_DEFAULT_QUERY; 4655 } 4656 4657 return 0; 4658 } 4659 4660 static uint8_t 4661 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4662 { 4663 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4664 duplex = ETH_LINK_FULL_DUPLEX; 4665 4666 return duplex; 4667 } 4668 4669 static int 4670 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4671 { 4672 struct hns3_mac *mac = &hw->mac; 4673 int ret; 4674 4675 duplex = hns3_check_speed_dup(duplex, speed); 4676 if (mac->link_speed == speed && mac->link_duplex == duplex) 4677 return 0; 4678 4679 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4680 if (ret) 4681 return ret; 4682 4683 ret = hns3_port_shaper_update(hw, speed); 4684 if (ret) 4685 return ret; 4686 4687 mac->link_speed = speed; 4688 mac->link_duplex = duplex; 4689 4690 return 0; 4691 } 4692 4693 static int 4694 hns3_update_fiber_link_info(struct hns3_hw *hw) 4695 { 4696 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4697 struct hns3_mac *mac = &hw->mac; 4698 struct hns3_mac mac_info; 4699 int ret; 4700 4701 /* If firmware do not support get SFP/qSFP speed, return directly */ 4702 if (!pf->support_sfp_query) 4703 return 0; 4704 4705 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4706 ret = hns3_get_sfp_info(hw, &mac_info); 4707 if (ret == -EOPNOTSUPP) { 4708 pf->support_sfp_query = false; 4709 return ret; 4710 } else if (ret) 4711 return ret; 4712 4713 /* Do nothing if no SFP */ 4714 if (mac_info.link_speed == ETH_SPEED_NUM_NONE) 4715 return 0; 4716 4717 /* 4718 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4719 * to reconfigure the speed of MAC. Otherwise, it indicates 4720 * that the current firmware only supports to obtain the 4721 * speed of the SFP, and the speed of MAC needs to reconfigure. 4722 */ 4723 mac->query_type = mac_info.query_type; 4724 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4725 if (mac_info.link_speed != mac->link_speed) { 4726 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4727 if (ret) 4728 return ret; 4729 } 4730 4731 mac->link_speed = mac_info.link_speed; 4732 mac->supported_speed = mac_info.supported_speed; 4733 mac->support_autoneg = mac_info.support_autoneg; 4734 mac->link_autoneg = mac_info.link_autoneg; 4735 4736 return 0; 4737 } 4738 4739 /* Config full duplex for SFP */ 4740 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4741 ETH_LINK_FULL_DUPLEX); 4742 } 4743 4744 static void 4745 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4746 { 4747 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4748 4749 struct hns3_phy_params_bd0_cmd *req; 4750 uint32_t supported; 4751 4752 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4753 mac->link_speed = rte_le_to_cpu_32(req->speed); 4754 mac->link_duplex = hns3_get_bit(req->duplex, 4755 HNS3_PHY_DUPLEX_CFG_B); 4756 mac->link_autoneg = hns3_get_bit(req->autoneg, 4757 HNS3_PHY_AUTONEG_CFG_B); 4758 mac->advertising = rte_le_to_cpu_32(req->advertising); 4759 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4760 supported = rte_le_to_cpu_32(req->supported); 4761 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4762 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4763 } 4764 4765 static int 4766 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4767 { 4768 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4769 uint16_t i; 4770 int ret; 4771 4772 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4773 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4774 true); 4775 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4776 } 4777 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4778 4779 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4780 if (ret) { 4781 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4782 return ret; 4783 } 4784 4785 hns3_parse_copper_phy_params(desc, mac); 4786 4787 return 0; 4788 } 4789 4790 static int 4791 hns3_update_copper_link_info(struct hns3_hw *hw) 4792 { 4793 struct hns3_mac *mac = &hw->mac; 4794 struct hns3_mac mac_info; 4795 int ret; 4796 4797 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4798 ret = hns3_get_copper_phy_params(hw, &mac_info); 4799 if (ret) 4800 return ret; 4801 4802 if (mac_info.link_speed != mac->link_speed) { 4803 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4804 if (ret) 4805 return ret; 4806 } 4807 4808 mac->link_speed = mac_info.link_speed; 4809 mac->link_duplex = mac_info.link_duplex; 4810 mac->link_autoneg = mac_info.link_autoneg; 4811 mac->supported_speed = mac_info.supported_speed; 4812 mac->advertising = mac_info.advertising; 4813 mac->lp_advertising = mac_info.lp_advertising; 4814 mac->support_autoneg = mac_info.support_autoneg; 4815 4816 return 0; 4817 } 4818 4819 static int 4820 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4821 { 4822 struct hns3_adapter *hns = eth_dev->data->dev_private; 4823 struct hns3_hw *hw = &hns->hw; 4824 int ret = 0; 4825 4826 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4827 ret = hns3_update_copper_link_info(hw); 4828 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4829 ret = hns3_update_fiber_link_info(hw); 4830 4831 return ret; 4832 } 4833 4834 static int 4835 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4836 { 4837 struct hns3_config_mac_mode_cmd *req; 4838 struct hns3_cmd_desc desc; 4839 uint32_t loop_en = 0; 4840 uint8_t val = 0; 4841 int ret; 4842 4843 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4844 4845 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4846 if (enable) 4847 val = 1; 4848 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4849 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4850 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4851 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4852 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4853 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4854 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4855 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4856 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4857 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4858 4859 /* 4860 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4861 * when receiving frames. Otherwise, CRC will be stripped. 4862 */ 4863 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4864 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4865 else 4866 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4867 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4868 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4869 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4870 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4871 4872 ret = hns3_cmd_send(hw, &desc, 1); 4873 if (ret) 4874 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4875 4876 return ret; 4877 } 4878 4879 static int 4880 hns3_get_mac_link_status(struct hns3_hw *hw) 4881 { 4882 struct hns3_link_status_cmd *req; 4883 struct hns3_cmd_desc desc; 4884 int link_status; 4885 int ret; 4886 4887 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4888 ret = hns3_cmd_send(hw, &desc, 1); 4889 if (ret) { 4890 hns3_err(hw, "get link status cmd failed %d", ret); 4891 return ETH_LINK_DOWN; 4892 } 4893 4894 req = (struct hns3_link_status_cmd *)desc.data; 4895 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4896 4897 return !!link_status; 4898 } 4899 4900 static bool 4901 hns3_update_link_status(struct hns3_hw *hw) 4902 { 4903 int state; 4904 4905 state = hns3_get_mac_link_status(hw); 4906 if (state != hw->mac.link_status) { 4907 hw->mac.link_status = state; 4908 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4909 return true; 4910 } 4911 4912 return false; 4913 } 4914 4915 void 4916 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4917 { 4918 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4919 struct rte_eth_link new_link; 4920 int ret; 4921 4922 if (query) 4923 hns3_update_port_link_info(dev); 4924 4925 memset(&new_link, 0, sizeof(new_link)); 4926 hns3_setup_linkstatus(dev, &new_link); 4927 4928 ret = rte_eth_linkstatus_set(dev, &new_link); 4929 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4930 hns3_start_report_lse(dev); 4931 } 4932 4933 static void 4934 hns3_service_handler(void *param) 4935 { 4936 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4937 struct hns3_adapter *hns = eth_dev->data->dev_private; 4938 struct hns3_hw *hw = &hns->hw; 4939 4940 if (!hns3_is_reset_pending(hns)) 4941 hns3_update_linkstatus_and_event(hw, true); 4942 else 4943 hns3_warn(hw, "Cancel the query when reset is pending"); 4944 4945 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4946 } 4947 4948 static int 4949 hns3_init_hardware(struct hns3_adapter *hns) 4950 { 4951 struct hns3_hw *hw = &hns->hw; 4952 int ret; 4953 4954 ret = hns3_map_tqp(hw); 4955 if (ret) { 4956 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4957 return ret; 4958 } 4959 4960 ret = hns3_init_umv_space(hw); 4961 if (ret) { 4962 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4963 return ret; 4964 } 4965 4966 ret = hns3_mac_init(hw); 4967 if (ret) { 4968 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4969 goto err_mac_init; 4970 } 4971 4972 ret = hns3_init_mgr_tbl(hw); 4973 if (ret) { 4974 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4975 goto err_mac_init; 4976 } 4977 4978 ret = hns3_promisc_init(hw); 4979 if (ret) { 4980 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4981 ret); 4982 goto err_mac_init; 4983 } 4984 4985 ret = hns3_init_vlan_config(hns); 4986 if (ret) { 4987 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4988 goto err_mac_init; 4989 } 4990 4991 ret = hns3_dcb_init(hw); 4992 if (ret) { 4993 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4994 goto err_mac_init; 4995 } 4996 4997 ret = hns3_init_fd_config(hns); 4998 if (ret) { 4999 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 5000 goto err_mac_init; 5001 } 5002 5003 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 5004 if (ret) { 5005 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 5006 goto err_mac_init; 5007 } 5008 5009 ret = hns3_config_gro(hw, false); 5010 if (ret) { 5011 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 5012 goto err_mac_init; 5013 } 5014 5015 /* 5016 * In the initialization clearing the all hardware mapping relationship 5017 * configurations between queues and interrupt vectors is needed, so 5018 * some error caused by the residual configurations, such as the 5019 * unexpected interrupt, can be avoid. 5020 */ 5021 ret = hns3_init_ring_with_vector(hw); 5022 if (ret) { 5023 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 5024 goto err_mac_init; 5025 } 5026 5027 return 0; 5028 5029 err_mac_init: 5030 hns3_uninit_umv_space(hw); 5031 return ret; 5032 } 5033 5034 static int 5035 hns3_clear_hw(struct hns3_hw *hw) 5036 { 5037 struct hns3_cmd_desc desc; 5038 int ret; 5039 5040 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 5041 5042 ret = hns3_cmd_send(hw, &desc, 1); 5043 if (ret && ret != -EOPNOTSUPP) 5044 return ret; 5045 5046 return 0; 5047 } 5048 5049 static void 5050 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 5051 { 5052 uint32_t val; 5053 5054 /* 5055 * The new firmware support report more hardware error types by 5056 * msix mode. These errors are defined as RAS errors in hardware 5057 * and belong to a different type from the MSI-x errors processed 5058 * by the network driver. 5059 * 5060 * Network driver should open the new error report on initialization. 5061 */ 5062 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5063 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 5064 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5065 } 5066 5067 static uint32_t 5068 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 5069 { 5070 struct hns3_mac *mac = &hw->mac; 5071 5072 switch (mac->link_speed) { 5073 case ETH_SPEED_NUM_1G: 5074 return HNS3_FIBER_LINK_SPEED_1G_BIT; 5075 case ETH_SPEED_NUM_10G: 5076 return HNS3_FIBER_LINK_SPEED_10G_BIT; 5077 case ETH_SPEED_NUM_25G: 5078 return HNS3_FIBER_LINK_SPEED_25G_BIT; 5079 case ETH_SPEED_NUM_40G: 5080 return HNS3_FIBER_LINK_SPEED_40G_BIT; 5081 case ETH_SPEED_NUM_50G: 5082 return HNS3_FIBER_LINK_SPEED_50G_BIT; 5083 case ETH_SPEED_NUM_100G: 5084 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5085 case ETH_SPEED_NUM_200G: 5086 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5087 default: 5088 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5089 return 0; 5090 } 5091 } 5092 5093 /* 5094 * Validity of supported_speed for firber and copper media type can be 5095 * guaranteed by the following policy: 5096 * Copper: 5097 * Although the initialization of the phy in the firmware may not be 5098 * completed, the firmware can guarantees that the supported_speed is 5099 * an valid value. 5100 * Firber: 5101 * If the version of firmware supports the acitive query way of the 5102 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5103 * through it. If unsupported, use the SFP's speed as the value of the 5104 * supported_speed. 5105 */ 5106 static int 5107 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5108 { 5109 struct hns3_adapter *hns = eth_dev->data->dev_private; 5110 struct hns3_hw *hw = &hns->hw; 5111 struct hns3_mac *mac = &hw->mac; 5112 int ret; 5113 5114 ret = hns3_update_link_info(eth_dev); 5115 if (ret) 5116 return ret; 5117 5118 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5119 /* 5120 * Some firmware does not support the report of supported_speed, 5121 * and only report the effective speed of SFP. In this case, it 5122 * is necessary to use the SFP's speed as the supported_speed. 5123 */ 5124 if (mac->supported_speed == 0) 5125 mac->supported_speed = 5126 hns3_set_firber_default_support_speed(hw); 5127 } 5128 5129 return 0; 5130 } 5131 5132 static void 5133 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5134 { 5135 struct hns3_mac *mac = &hns->hw.mac; 5136 5137 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5138 hns->pf.support_fc_autoneg = true; 5139 return; 5140 } 5141 5142 /* 5143 * Flow control auto-negotiation requires the cooperation of the driver 5144 * and firmware. Currently, the optical port does not support flow 5145 * control auto-negotiation. 5146 */ 5147 hns->pf.support_fc_autoneg = false; 5148 } 5149 5150 static int 5151 hns3_init_pf(struct rte_eth_dev *eth_dev) 5152 { 5153 struct rte_device *dev = eth_dev->device; 5154 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5155 struct hns3_adapter *hns = eth_dev->data->dev_private; 5156 struct hns3_hw *hw = &hns->hw; 5157 int ret; 5158 5159 PMD_INIT_FUNC_TRACE(); 5160 5161 /* Get hardware io base address from pcie BAR2 IO space */ 5162 hw->io_base = pci_dev->mem_resource[2].addr; 5163 5164 /* Firmware command queue initialize */ 5165 ret = hns3_cmd_init_queue(hw); 5166 if (ret) { 5167 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5168 goto err_cmd_init_queue; 5169 } 5170 5171 hns3_clear_all_event_cause(hw); 5172 5173 /* Firmware command initialize */ 5174 ret = hns3_cmd_init(hw); 5175 if (ret) { 5176 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5177 goto err_cmd_init; 5178 } 5179 5180 /* 5181 * To ensure that the hardware environment is clean during 5182 * initialization, the driver actively clear the hardware environment 5183 * during initialization, including PF and corresponding VFs' vlan, mac, 5184 * flow table configurations, etc. 5185 */ 5186 ret = hns3_clear_hw(hw); 5187 if (ret) { 5188 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5189 goto err_cmd_init; 5190 } 5191 5192 /* Hardware statistics of imissed registers cleared. */ 5193 ret = hns3_update_imissed_stats(hw, true); 5194 if (ret) { 5195 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5196 goto err_cmd_init; 5197 } 5198 5199 hns3_config_all_msix_error(hw, true); 5200 5201 ret = rte_intr_callback_register(&pci_dev->intr_handle, 5202 hns3_interrupt_handler, 5203 eth_dev); 5204 if (ret) { 5205 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5206 goto err_intr_callback_register; 5207 } 5208 5209 ret = hns3_ptp_init(hw); 5210 if (ret) 5211 goto err_get_config; 5212 5213 /* Enable interrupt */ 5214 rte_intr_enable(&pci_dev->intr_handle); 5215 hns3_pf_enable_irq0(hw); 5216 5217 /* Get configuration */ 5218 ret = hns3_get_configuration(hw); 5219 if (ret) { 5220 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5221 goto err_get_config; 5222 } 5223 5224 ret = hns3_tqp_stats_init(hw); 5225 if (ret) 5226 goto err_get_config; 5227 5228 ret = hns3_init_hardware(hns); 5229 if (ret) { 5230 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5231 goto err_init_hw; 5232 } 5233 5234 /* Initialize flow director filter list & hash */ 5235 ret = hns3_fdir_filter_init(hns); 5236 if (ret) { 5237 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5238 goto err_fdir; 5239 } 5240 5241 hns3_rss_set_default_args(hw); 5242 5243 ret = hns3_enable_hw_error_intr(hns, true); 5244 if (ret) { 5245 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5246 ret); 5247 goto err_enable_intr; 5248 } 5249 5250 ret = hns3_get_port_supported_speed(eth_dev); 5251 if (ret) { 5252 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5253 "by device, ret = %d.", ret); 5254 goto err_supported_speed; 5255 } 5256 5257 hns3_get_fc_autoneg_capability(hns); 5258 5259 hns3_tm_conf_init(eth_dev); 5260 5261 return 0; 5262 5263 err_supported_speed: 5264 (void)hns3_enable_hw_error_intr(hns, false); 5265 err_enable_intr: 5266 hns3_fdir_filter_uninit(hns); 5267 err_fdir: 5268 hns3_uninit_umv_space(hw); 5269 err_init_hw: 5270 hns3_tqp_stats_uninit(hw); 5271 err_get_config: 5272 hns3_pf_disable_irq0(hw); 5273 rte_intr_disable(&pci_dev->intr_handle); 5274 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5275 eth_dev); 5276 err_intr_callback_register: 5277 err_cmd_init: 5278 hns3_cmd_uninit(hw); 5279 hns3_cmd_destroy_queue(hw); 5280 err_cmd_init_queue: 5281 hw->io_base = NULL; 5282 5283 return ret; 5284 } 5285 5286 static void 5287 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5288 { 5289 struct hns3_adapter *hns = eth_dev->data->dev_private; 5290 struct rte_device *dev = eth_dev->device; 5291 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5292 struct hns3_hw *hw = &hns->hw; 5293 5294 PMD_INIT_FUNC_TRACE(); 5295 5296 hns3_tm_conf_uninit(eth_dev); 5297 hns3_enable_hw_error_intr(hns, false); 5298 hns3_rss_uninit(hns); 5299 (void)hns3_config_gro(hw, false); 5300 hns3_promisc_uninit(hw); 5301 hns3_fdir_filter_uninit(hns); 5302 hns3_uninit_umv_space(hw); 5303 hns3_tqp_stats_uninit(hw); 5304 hns3_config_mac_tnl_int(hw, false); 5305 hns3_pf_disable_irq0(hw); 5306 rte_intr_disable(&pci_dev->intr_handle); 5307 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5308 eth_dev); 5309 hns3_config_all_msix_error(hw, false); 5310 hns3_cmd_uninit(hw); 5311 hns3_cmd_destroy_queue(hw); 5312 hw->io_base = NULL; 5313 } 5314 5315 static uint32_t 5316 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5317 { 5318 uint32_t speed_bit; 5319 5320 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5321 case ETH_LINK_SPEED_10M: 5322 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5323 break; 5324 case ETH_LINK_SPEED_10M_HD: 5325 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5326 break; 5327 case ETH_LINK_SPEED_100M: 5328 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5329 break; 5330 case ETH_LINK_SPEED_100M_HD: 5331 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5332 break; 5333 case ETH_LINK_SPEED_1G: 5334 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5335 break; 5336 default: 5337 speed_bit = 0; 5338 break; 5339 } 5340 5341 return speed_bit; 5342 } 5343 5344 static uint32_t 5345 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5346 { 5347 uint32_t speed_bit; 5348 5349 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5350 case ETH_LINK_SPEED_1G: 5351 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5352 break; 5353 case ETH_LINK_SPEED_10G: 5354 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5355 break; 5356 case ETH_LINK_SPEED_25G: 5357 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5358 break; 5359 case ETH_LINK_SPEED_40G: 5360 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5361 break; 5362 case ETH_LINK_SPEED_50G: 5363 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5364 break; 5365 case ETH_LINK_SPEED_100G: 5366 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5367 break; 5368 case ETH_LINK_SPEED_200G: 5369 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5370 break; 5371 default: 5372 speed_bit = 0; 5373 break; 5374 } 5375 5376 return speed_bit; 5377 } 5378 5379 static int 5380 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5381 { 5382 struct hns3_mac *mac = &hw->mac; 5383 uint32_t supported_speed = mac->supported_speed; 5384 uint32_t speed_bit = 0; 5385 5386 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5387 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5388 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5389 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5390 5391 if (!(speed_bit & supported_speed)) { 5392 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5393 link_speeds); 5394 return -EINVAL; 5395 } 5396 5397 return 0; 5398 } 5399 5400 static inline uint32_t 5401 hns3_get_link_speed(uint32_t link_speeds) 5402 { 5403 uint32_t speed = ETH_SPEED_NUM_NONE; 5404 5405 if (link_speeds & ETH_LINK_SPEED_10M || 5406 link_speeds & ETH_LINK_SPEED_10M_HD) 5407 speed = ETH_SPEED_NUM_10M; 5408 if (link_speeds & ETH_LINK_SPEED_100M || 5409 link_speeds & ETH_LINK_SPEED_100M_HD) 5410 speed = ETH_SPEED_NUM_100M; 5411 if (link_speeds & ETH_LINK_SPEED_1G) 5412 speed = ETH_SPEED_NUM_1G; 5413 if (link_speeds & ETH_LINK_SPEED_10G) 5414 speed = ETH_SPEED_NUM_10G; 5415 if (link_speeds & ETH_LINK_SPEED_25G) 5416 speed = ETH_SPEED_NUM_25G; 5417 if (link_speeds & ETH_LINK_SPEED_40G) 5418 speed = ETH_SPEED_NUM_40G; 5419 if (link_speeds & ETH_LINK_SPEED_50G) 5420 speed = ETH_SPEED_NUM_50G; 5421 if (link_speeds & ETH_LINK_SPEED_100G) 5422 speed = ETH_SPEED_NUM_100G; 5423 if (link_speeds & ETH_LINK_SPEED_200G) 5424 speed = ETH_SPEED_NUM_200G; 5425 5426 return speed; 5427 } 5428 5429 static uint8_t 5430 hns3_get_link_duplex(uint32_t link_speeds) 5431 { 5432 if ((link_speeds & ETH_LINK_SPEED_10M_HD) || 5433 (link_speeds & ETH_LINK_SPEED_100M_HD)) 5434 return ETH_LINK_HALF_DUPLEX; 5435 else 5436 return ETH_LINK_FULL_DUPLEX; 5437 } 5438 5439 static int 5440 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5441 struct hns3_set_link_speed_cfg *cfg) 5442 { 5443 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5444 struct hns3_phy_params_bd0_cmd *req; 5445 uint16_t i; 5446 5447 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5448 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5449 false); 5450 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5451 } 5452 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5453 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5454 req->autoneg = cfg->autoneg; 5455 5456 /* 5457 * The full speed capability is used to negotiate when 5458 * auto-negotiation is enabled. 5459 */ 5460 if (cfg->autoneg) { 5461 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5462 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5463 HNS3_PHY_LINK_SPEED_100M_BIT | 5464 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5465 HNS3_PHY_LINK_SPEED_1000M_BIT; 5466 } else { 5467 req->speed = cfg->speed; 5468 req->duplex = cfg->duplex; 5469 } 5470 5471 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5472 } 5473 5474 static int 5475 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5476 { 5477 struct hns3_config_auto_neg_cmd *req; 5478 struct hns3_cmd_desc desc; 5479 uint32_t flag = 0; 5480 int ret; 5481 5482 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5483 5484 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5485 if (enable) 5486 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5487 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5488 5489 ret = hns3_cmd_send(hw, &desc, 1); 5490 if (ret) 5491 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5492 5493 return ret; 5494 } 5495 5496 static int 5497 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5498 struct hns3_set_link_speed_cfg *cfg) 5499 { 5500 int ret; 5501 5502 if (hw->mac.support_autoneg) { 5503 ret = hns3_set_autoneg(hw, cfg->autoneg); 5504 if (ret) { 5505 hns3_err(hw, "failed to configure auto-negotiation."); 5506 return ret; 5507 } 5508 5509 /* 5510 * To enable auto-negotiation, we only need to open the switch 5511 * of auto-negotiation, then firmware sets all speed 5512 * capabilities. 5513 */ 5514 if (cfg->autoneg) 5515 return 0; 5516 } 5517 5518 /* 5519 * Some hardware doesn't support auto-negotiation, but users may not 5520 * configure link_speeds (default 0), which means auto-negotiation. 5521 * In this case, it should return success. 5522 */ 5523 if (cfg->autoneg) 5524 return 0; 5525 5526 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5527 } 5528 5529 static int 5530 hns3_set_port_link_speed(struct hns3_hw *hw, 5531 struct hns3_set_link_speed_cfg *cfg) 5532 { 5533 int ret; 5534 5535 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5536 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5537 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5538 if (pf->is_tmp_phy) 5539 return 0; 5540 #endif 5541 5542 ret = hns3_set_copper_port_link_speed(hw, cfg); 5543 if (ret) { 5544 hns3_err(hw, "failed to set copper port link speed," 5545 "ret = %d.", ret); 5546 return ret; 5547 } 5548 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5549 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5550 if (ret) { 5551 hns3_err(hw, "failed to set fiber port link speed," 5552 "ret = %d.", ret); 5553 return ret; 5554 } 5555 } 5556 5557 return 0; 5558 } 5559 5560 static int 5561 hns3_apply_link_speed(struct hns3_hw *hw) 5562 { 5563 struct rte_eth_conf *conf = &hw->data->dev_conf; 5564 struct hns3_set_link_speed_cfg cfg; 5565 5566 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5567 cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ? 5568 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 5569 if (cfg.autoneg != ETH_LINK_AUTONEG) { 5570 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5571 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5572 } 5573 5574 return hns3_set_port_link_speed(hw, &cfg); 5575 } 5576 5577 static int 5578 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5579 { 5580 struct hns3_hw *hw = &hns->hw; 5581 int ret; 5582 5583 ret = hns3_update_queue_map_configure(hns); 5584 if (ret) { 5585 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5586 ret); 5587 return ret; 5588 } 5589 5590 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5591 ret = hns3_tm_conf_update(hw); 5592 if (ret) { 5593 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5594 return ret; 5595 } 5596 5597 hns3_enable_rxd_adv_layout(hw); 5598 5599 ret = hns3_init_queues(hns, reset_queue); 5600 if (ret) { 5601 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5602 return ret; 5603 } 5604 5605 ret = hns3_cfg_mac_mode(hw, true); 5606 if (ret) { 5607 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5608 goto err_config_mac_mode; 5609 } 5610 5611 ret = hns3_apply_link_speed(hw); 5612 if (ret) 5613 goto err_set_link_speed; 5614 5615 return 0; 5616 5617 err_set_link_speed: 5618 (void)hns3_cfg_mac_mode(hw, false); 5619 5620 err_config_mac_mode: 5621 hns3_dev_release_mbufs(hns); 5622 /* 5623 * Here is exception handling, hns3_reset_all_tqps will have the 5624 * corresponding error message if it is handled incorrectly, so it is 5625 * not necessary to check hns3_reset_all_tqps return value, here keep 5626 * ret as the error code causing the exception. 5627 */ 5628 (void)hns3_reset_all_tqps(hns); 5629 return ret; 5630 } 5631 5632 static int 5633 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5634 { 5635 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5636 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5637 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5638 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5639 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5640 uint32_t intr_vector; 5641 uint16_t q_id; 5642 int ret; 5643 5644 /* 5645 * hns3 needs a separate interrupt to be used as event interrupt which 5646 * could not be shared with task queue pair, so KERNEL drivers need 5647 * support multiple interrupt vectors. 5648 */ 5649 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5650 !rte_intr_cap_multiple(intr_handle)) 5651 return 0; 5652 5653 rte_intr_disable(intr_handle); 5654 intr_vector = hw->used_rx_queues; 5655 /* creates event fd for each intr vector when MSIX is used */ 5656 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5657 return -EINVAL; 5658 5659 if (intr_handle->intr_vec == NULL) { 5660 intr_handle->intr_vec = 5661 rte_zmalloc("intr_vec", 5662 hw->used_rx_queues * sizeof(int), 0); 5663 if (intr_handle->intr_vec == NULL) { 5664 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5665 hw->used_rx_queues); 5666 ret = -ENOMEM; 5667 goto alloc_intr_vec_error; 5668 } 5669 } 5670 5671 if (rte_intr_allow_others(intr_handle)) { 5672 vec = RTE_INTR_VEC_RXTX_OFFSET; 5673 base = RTE_INTR_VEC_RXTX_OFFSET; 5674 } 5675 5676 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5677 ret = hns3_bind_ring_with_vector(hw, vec, true, 5678 HNS3_RING_TYPE_RX, q_id); 5679 if (ret) 5680 goto bind_vector_error; 5681 intr_handle->intr_vec[q_id] = vec; 5682 /* 5683 * If there are not enough efds (e.g. not enough interrupt), 5684 * remaining queues will be bond to the last interrupt. 5685 */ 5686 if (vec < base + intr_handle->nb_efd - 1) 5687 vec++; 5688 } 5689 rte_intr_enable(intr_handle); 5690 return 0; 5691 5692 bind_vector_error: 5693 rte_free(intr_handle->intr_vec); 5694 intr_handle->intr_vec = NULL; 5695 alloc_intr_vec_error: 5696 rte_intr_efd_disable(intr_handle); 5697 return ret; 5698 } 5699 5700 static int 5701 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5702 { 5703 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5704 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5705 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5706 uint16_t q_id; 5707 int ret; 5708 5709 if (dev->data->dev_conf.intr_conf.rxq == 0) 5710 return 0; 5711 5712 if (rte_intr_dp_is_en(intr_handle)) { 5713 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5714 ret = hns3_bind_ring_with_vector(hw, 5715 intr_handle->intr_vec[q_id], true, 5716 HNS3_RING_TYPE_RX, q_id); 5717 if (ret) 5718 return ret; 5719 } 5720 } 5721 5722 return 0; 5723 } 5724 5725 static void 5726 hns3_restore_filter(struct rte_eth_dev *dev) 5727 { 5728 hns3_restore_rss_filter(dev); 5729 } 5730 5731 static int 5732 hns3_dev_start(struct rte_eth_dev *dev) 5733 { 5734 struct hns3_adapter *hns = dev->data->dev_private; 5735 struct hns3_hw *hw = &hns->hw; 5736 int ret; 5737 5738 PMD_INIT_FUNC_TRACE(); 5739 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5740 return -EBUSY; 5741 5742 rte_spinlock_lock(&hw->lock); 5743 hw->adapter_state = HNS3_NIC_STARTING; 5744 5745 ret = hns3_do_start(hns, true); 5746 if (ret) { 5747 hw->adapter_state = HNS3_NIC_CONFIGURED; 5748 rte_spinlock_unlock(&hw->lock); 5749 return ret; 5750 } 5751 ret = hns3_map_rx_interrupt(dev); 5752 if (ret) 5753 goto map_rx_inter_err; 5754 5755 /* 5756 * There are three register used to control the status of a TQP 5757 * (contains a pair of Tx queue and Rx queue) in the new version network 5758 * engine. One is used to control the enabling of Tx queue, the other is 5759 * used to control the enabling of Rx queue, and the last is the master 5760 * switch used to control the enabling of the tqp. The Tx register and 5761 * TQP register must be enabled at the same time to enable a Tx queue. 5762 * The same applies to the Rx queue. For the older network engine, this 5763 * function only refresh the enabled flag, and it is used to update the 5764 * status of queue in the dpdk framework. 5765 */ 5766 ret = hns3_start_all_txqs(dev); 5767 if (ret) 5768 goto map_rx_inter_err; 5769 5770 ret = hns3_start_all_rxqs(dev); 5771 if (ret) 5772 goto start_all_rxqs_fail; 5773 5774 hw->adapter_state = HNS3_NIC_STARTED; 5775 rte_spinlock_unlock(&hw->lock); 5776 5777 hns3_rx_scattered_calc(dev); 5778 hns3_set_rxtx_function(dev); 5779 hns3_mp_req_start_rxtx(dev); 5780 5781 hns3_restore_filter(dev); 5782 5783 /* Enable interrupt of all rx queues before enabling queues */ 5784 hns3_dev_all_rx_queue_intr_enable(hw, true); 5785 5786 /* 5787 * After finished the initialization, enable tqps to receive/transmit 5788 * packets and refresh all queue status. 5789 */ 5790 hns3_start_tqps(hw); 5791 5792 hns3_tm_dev_start_proc(hw); 5793 5794 if (dev->data->dev_conf.intr_conf.lsc != 0) 5795 hns3_dev_link_update(dev, 0); 5796 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5797 5798 hns3_info(hw, "hns3 dev start successful!"); 5799 5800 return 0; 5801 5802 start_all_rxqs_fail: 5803 hns3_stop_all_txqs(dev); 5804 map_rx_inter_err: 5805 (void)hns3_do_stop(hns); 5806 hw->adapter_state = HNS3_NIC_CONFIGURED; 5807 rte_spinlock_unlock(&hw->lock); 5808 5809 return ret; 5810 } 5811 5812 static int 5813 hns3_do_stop(struct hns3_adapter *hns) 5814 { 5815 struct hns3_hw *hw = &hns->hw; 5816 int ret; 5817 5818 /* 5819 * The "hns3_do_stop" function will also be called by .stop_service to 5820 * prepare reset. At the time of global or IMP reset, the command cannot 5821 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5822 * accessed during the reset process. So the mbuf can not be released 5823 * during reset and is required to be released after the reset is 5824 * completed. 5825 */ 5826 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5827 hns3_dev_release_mbufs(hns); 5828 5829 ret = hns3_cfg_mac_mode(hw, false); 5830 if (ret) 5831 return ret; 5832 hw->mac.link_status = ETH_LINK_DOWN; 5833 5834 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5835 hns3_configure_all_mac_addr(hns, true); 5836 ret = hns3_reset_all_tqps(hns); 5837 if (ret) { 5838 hns3_err(hw, "failed to reset all queues ret = %d.", 5839 ret); 5840 return ret; 5841 } 5842 } 5843 hw->mac.default_addr_setted = false; 5844 return 0; 5845 } 5846 5847 static void 5848 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5849 { 5850 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5851 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5852 struct hns3_adapter *hns = dev->data->dev_private; 5853 struct hns3_hw *hw = &hns->hw; 5854 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5855 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5856 uint16_t q_id; 5857 5858 if (dev->data->dev_conf.intr_conf.rxq == 0) 5859 return; 5860 5861 /* unmap the ring with vector */ 5862 if (rte_intr_allow_others(intr_handle)) { 5863 vec = RTE_INTR_VEC_RXTX_OFFSET; 5864 base = RTE_INTR_VEC_RXTX_OFFSET; 5865 } 5866 if (rte_intr_dp_is_en(intr_handle)) { 5867 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5868 (void)hns3_bind_ring_with_vector(hw, vec, false, 5869 HNS3_RING_TYPE_RX, 5870 q_id); 5871 if (vec < base + intr_handle->nb_efd - 1) 5872 vec++; 5873 } 5874 } 5875 /* Clean datapath event and queue/vec mapping */ 5876 rte_intr_efd_disable(intr_handle); 5877 if (intr_handle->intr_vec) { 5878 rte_free(intr_handle->intr_vec); 5879 intr_handle->intr_vec = NULL; 5880 } 5881 } 5882 5883 static int 5884 hns3_dev_stop(struct rte_eth_dev *dev) 5885 { 5886 struct hns3_adapter *hns = dev->data->dev_private; 5887 struct hns3_hw *hw = &hns->hw; 5888 5889 PMD_INIT_FUNC_TRACE(); 5890 dev->data->dev_started = 0; 5891 5892 hw->adapter_state = HNS3_NIC_STOPPING; 5893 hns3_set_rxtx_function(dev); 5894 rte_wmb(); 5895 /* Disable datapath on secondary process. */ 5896 hns3_mp_req_stop_rxtx(dev); 5897 /* Prevent crashes when queues are still in use. */ 5898 rte_delay_ms(hw->tqps_num); 5899 5900 rte_spinlock_lock(&hw->lock); 5901 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5902 hns3_tm_dev_stop_proc(hw); 5903 hns3_config_mac_tnl_int(hw, false); 5904 hns3_stop_tqps(hw); 5905 hns3_do_stop(hns); 5906 hns3_unmap_rx_interrupt(dev); 5907 hw->adapter_state = HNS3_NIC_CONFIGURED; 5908 } 5909 hns3_rx_scattered_reset(dev); 5910 rte_eal_alarm_cancel(hns3_service_handler, dev); 5911 hns3_stop_report_lse(dev); 5912 rte_spinlock_unlock(&hw->lock); 5913 5914 return 0; 5915 } 5916 5917 static int 5918 hns3_dev_close(struct rte_eth_dev *eth_dev) 5919 { 5920 struct hns3_adapter *hns = eth_dev->data->dev_private; 5921 struct hns3_hw *hw = &hns->hw; 5922 int ret = 0; 5923 5924 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5925 rte_free(eth_dev->process_private); 5926 eth_dev->process_private = NULL; 5927 return 0; 5928 } 5929 5930 if (hw->adapter_state == HNS3_NIC_STARTED) 5931 ret = hns3_dev_stop(eth_dev); 5932 5933 hw->adapter_state = HNS3_NIC_CLOSING; 5934 hns3_reset_abort(hns); 5935 hw->adapter_state = HNS3_NIC_CLOSED; 5936 5937 hns3_configure_all_mc_mac_addr(hns, true); 5938 hns3_remove_all_vlan_table(hns); 5939 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5940 hns3_uninit_pf(eth_dev); 5941 hns3_free_all_queues(eth_dev); 5942 rte_free(hw->reset.wait_data); 5943 rte_free(eth_dev->process_private); 5944 eth_dev->process_private = NULL; 5945 hns3_mp_uninit_primary(); 5946 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5947 5948 return ret; 5949 } 5950 5951 static void 5952 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5953 bool *tx_pause) 5954 { 5955 struct hns3_mac *mac = &hw->mac; 5956 uint32_t advertising = mac->advertising; 5957 uint32_t lp_advertising = mac->lp_advertising; 5958 *rx_pause = false; 5959 *tx_pause = false; 5960 5961 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5962 *rx_pause = true; 5963 *tx_pause = true; 5964 } else if (advertising & lp_advertising & 5965 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5966 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5967 *rx_pause = true; 5968 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5969 *tx_pause = true; 5970 } 5971 } 5972 5973 static enum hns3_fc_mode 5974 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5975 { 5976 enum hns3_fc_mode current_mode; 5977 bool rx_pause = false; 5978 bool tx_pause = false; 5979 5980 switch (hw->mac.media_type) { 5981 case HNS3_MEDIA_TYPE_COPPER: 5982 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 5983 break; 5984 5985 /* 5986 * Flow control auto-negotiation is not supported for fiber and 5987 * backpalne media type. 5988 */ 5989 case HNS3_MEDIA_TYPE_FIBER: 5990 case HNS3_MEDIA_TYPE_BACKPLANE: 5991 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 5992 current_mode = hw->requested_fc_mode; 5993 goto out; 5994 default: 5995 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 5996 hw->mac.media_type); 5997 current_mode = HNS3_FC_NONE; 5998 goto out; 5999 } 6000 6001 if (rx_pause && tx_pause) 6002 current_mode = HNS3_FC_FULL; 6003 else if (rx_pause) 6004 current_mode = HNS3_FC_RX_PAUSE; 6005 else if (tx_pause) 6006 current_mode = HNS3_FC_TX_PAUSE; 6007 else 6008 current_mode = HNS3_FC_NONE; 6009 6010 out: 6011 return current_mode; 6012 } 6013 6014 static enum hns3_fc_mode 6015 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 6016 { 6017 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6018 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6019 struct hns3_mac *mac = &hw->mac; 6020 6021 /* 6022 * When the flow control mode is obtained, the device may not complete 6023 * auto-negotiation. It is necessary to wait for link establishment. 6024 */ 6025 (void)hns3_dev_link_update(dev, 1); 6026 6027 /* 6028 * If the link auto-negotiation of the nic is disabled, or the flow 6029 * control auto-negotiation is not supported, the forced flow control 6030 * mode is used. 6031 */ 6032 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 6033 return hw->requested_fc_mode; 6034 6035 return hns3_get_autoneg_fc_mode(hw); 6036 } 6037 6038 static int 6039 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6040 { 6041 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6042 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6043 enum hns3_fc_mode current_mode; 6044 6045 current_mode = hns3_get_current_fc_mode(dev); 6046 switch (current_mode) { 6047 case HNS3_FC_FULL: 6048 fc_conf->mode = RTE_FC_FULL; 6049 break; 6050 case HNS3_FC_TX_PAUSE: 6051 fc_conf->mode = RTE_FC_TX_PAUSE; 6052 break; 6053 case HNS3_FC_RX_PAUSE: 6054 fc_conf->mode = RTE_FC_RX_PAUSE; 6055 break; 6056 case HNS3_FC_NONE: 6057 default: 6058 fc_conf->mode = RTE_FC_NONE; 6059 break; 6060 } 6061 6062 fc_conf->pause_time = pf->pause_time; 6063 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 6064 6065 return 0; 6066 } 6067 6068 static int 6069 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 6070 { 6071 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 6072 6073 if (!pf->support_fc_autoneg) { 6074 if (autoneg != 0) { 6075 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6076 return -EOPNOTSUPP; 6077 } 6078 6079 /* 6080 * Flow control auto-negotiation of the NIC is not supported, 6081 * but other auto-negotiation features may be supported. 6082 */ 6083 if (autoneg != hw->mac.link_autoneg) { 6084 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6085 return -EOPNOTSUPP; 6086 } 6087 6088 return 0; 6089 } 6090 6091 /* 6092 * If flow control auto-negotiation of the NIC is supported, all 6093 * auto-negotiation features are supported. 6094 */ 6095 if (autoneg != hw->mac.link_autoneg) { 6096 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6097 return -EOPNOTSUPP; 6098 } 6099 6100 return 0; 6101 } 6102 6103 static int 6104 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6105 { 6106 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6107 int ret; 6108 6109 if (fc_conf->high_water || fc_conf->low_water || 6110 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6111 hns3_err(hw, "Unsupported flow control settings specified, " 6112 "high_water(%u), low_water(%u), send_xon(%u) and " 6113 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6114 fc_conf->high_water, fc_conf->low_water, 6115 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6116 return -EINVAL; 6117 } 6118 6119 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6120 if (ret) 6121 return ret; 6122 6123 if (!fc_conf->pause_time) { 6124 hns3_err(hw, "Invalid pause time %u setting.", 6125 fc_conf->pause_time); 6126 return -EINVAL; 6127 } 6128 6129 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6130 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6131 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6132 "current_fc_status = %d", hw->current_fc_status); 6133 return -EOPNOTSUPP; 6134 } 6135 6136 if (hw->num_tc > 1) { 6137 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6138 return -EOPNOTSUPP; 6139 } 6140 6141 rte_spinlock_lock(&hw->lock); 6142 ret = hns3_fc_enable(dev, fc_conf); 6143 rte_spinlock_unlock(&hw->lock); 6144 6145 return ret; 6146 } 6147 6148 static int 6149 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6150 struct rte_eth_pfc_conf *pfc_conf) 6151 { 6152 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6153 int ret; 6154 6155 if (!hns3_dev_dcb_supported(hw)) { 6156 hns3_err(hw, "This port does not support dcb configurations."); 6157 return -EOPNOTSUPP; 6158 } 6159 6160 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6161 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6162 hns3_err(hw, "Unsupported flow control settings specified, " 6163 "high_water(%u), low_water(%u), send_xon(%u) and " 6164 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6165 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6166 pfc_conf->fc.send_xon, 6167 pfc_conf->fc.mac_ctrl_frame_fwd); 6168 return -EINVAL; 6169 } 6170 if (pfc_conf->fc.autoneg) { 6171 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6172 return -EINVAL; 6173 } 6174 if (pfc_conf->fc.pause_time == 0) { 6175 hns3_err(hw, "Invalid pause time %u setting.", 6176 pfc_conf->fc.pause_time); 6177 return -EINVAL; 6178 } 6179 6180 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6181 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6182 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6183 "current_fc_status = %d", hw->current_fc_status); 6184 return -EOPNOTSUPP; 6185 } 6186 6187 rte_spinlock_lock(&hw->lock); 6188 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6189 rte_spinlock_unlock(&hw->lock); 6190 6191 return ret; 6192 } 6193 6194 static int 6195 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6196 { 6197 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6198 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6199 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6200 int i; 6201 6202 rte_spinlock_lock(&hw->lock); 6203 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 6204 dcb_info->nb_tcs = pf->local_max_tc; 6205 else 6206 dcb_info->nb_tcs = 1; 6207 6208 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6209 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6210 for (i = 0; i < dcb_info->nb_tcs; i++) 6211 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6212 6213 for (i = 0; i < hw->num_tc; i++) { 6214 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6215 dcb_info->tc_queue.tc_txq[0][i].base = 6216 hw->tc_queue[i].tqp_offset; 6217 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6218 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6219 hw->tc_queue[i].tqp_count; 6220 } 6221 rte_spinlock_unlock(&hw->lock); 6222 6223 return 0; 6224 } 6225 6226 static int 6227 hns3_reinit_dev(struct hns3_adapter *hns) 6228 { 6229 struct hns3_hw *hw = &hns->hw; 6230 int ret; 6231 6232 ret = hns3_cmd_init(hw); 6233 if (ret) { 6234 hns3_err(hw, "Failed to init cmd: %d", ret); 6235 return ret; 6236 } 6237 6238 ret = hns3_reset_all_tqps(hns); 6239 if (ret) { 6240 hns3_err(hw, "Failed to reset all queues: %d", ret); 6241 return ret; 6242 } 6243 6244 ret = hns3_init_hardware(hns); 6245 if (ret) { 6246 hns3_err(hw, "Failed to init hardware: %d", ret); 6247 return ret; 6248 } 6249 6250 ret = hns3_enable_hw_error_intr(hns, true); 6251 if (ret) { 6252 hns3_err(hw, "fail to enable hw error interrupts: %d", 6253 ret); 6254 return ret; 6255 } 6256 hns3_info(hw, "Reset done, driver initialization finished."); 6257 6258 return 0; 6259 } 6260 6261 static bool 6262 is_pf_reset_done(struct hns3_hw *hw) 6263 { 6264 uint32_t val, reg, reg_bit; 6265 6266 switch (hw->reset.level) { 6267 case HNS3_IMP_RESET: 6268 reg = HNS3_GLOBAL_RESET_REG; 6269 reg_bit = HNS3_IMP_RESET_BIT; 6270 break; 6271 case HNS3_GLOBAL_RESET: 6272 reg = HNS3_GLOBAL_RESET_REG; 6273 reg_bit = HNS3_GLOBAL_RESET_BIT; 6274 break; 6275 case HNS3_FUNC_RESET: 6276 reg = HNS3_FUN_RST_ING; 6277 reg_bit = HNS3_FUN_RST_ING_B; 6278 break; 6279 case HNS3_FLR_RESET: 6280 default: 6281 hns3_err(hw, "Wait for unsupported reset level: %d", 6282 hw->reset.level); 6283 return true; 6284 } 6285 val = hns3_read_dev(hw, reg); 6286 if (hns3_get_bit(val, reg_bit)) 6287 return false; 6288 else 6289 return true; 6290 } 6291 6292 bool 6293 hns3_is_reset_pending(struct hns3_adapter *hns) 6294 { 6295 struct hns3_hw *hw = &hns->hw; 6296 enum hns3_reset_level reset; 6297 6298 hns3_check_event_cause(hns, NULL); 6299 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6300 6301 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6302 hw->reset.level < reset) { 6303 hns3_warn(hw, "High level reset %d is pending", reset); 6304 return true; 6305 } 6306 reset = hns3_get_reset_level(hns, &hw->reset.request); 6307 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6308 hw->reset.level < reset) { 6309 hns3_warn(hw, "High level reset %d is request", reset); 6310 return true; 6311 } 6312 return false; 6313 } 6314 6315 static int 6316 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6317 { 6318 struct hns3_hw *hw = &hns->hw; 6319 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6320 struct timeval tv; 6321 6322 if (wait_data->result == HNS3_WAIT_SUCCESS) 6323 return 0; 6324 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6325 hns3_clock_gettime(&tv); 6326 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6327 tv.tv_sec, tv.tv_usec); 6328 return -ETIME; 6329 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6330 return -EAGAIN; 6331 6332 wait_data->hns = hns; 6333 wait_data->check_completion = is_pf_reset_done; 6334 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6335 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 6336 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6337 wait_data->count = HNS3_RESET_WAIT_CNT; 6338 wait_data->result = HNS3_WAIT_REQUEST; 6339 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6340 return -EAGAIN; 6341 } 6342 6343 static int 6344 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6345 { 6346 struct hns3_cmd_desc desc; 6347 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6348 6349 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6350 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6351 req->fun_reset_vfid = func_id; 6352 6353 return hns3_cmd_send(hw, &desc, 1); 6354 } 6355 6356 static int 6357 hns3_imp_reset_cmd(struct hns3_hw *hw) 6358 { 6359 struct hns3_cmd_desc desc; 6360 6361 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6362 desc.data[0] = 0xeedd; 6363 6364 return hns3_cmd_send(hw, &desc, 1); 6365 } 6366 6367 static void 6368 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6369 { 6370 struct hns3_hw *hw = &hns->hw; 6371 struct timeval tv; 6372 uint32_t val; 6373 6374 hns3_clock_gettime(&tv); 6375 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6376 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6377 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6378 tv.tv_sec, tv.tv_usec); 6379 return; 6380 } 6381 6382 switch (reset_level) { 6383 case HNS3_IMP_RESET: 6384 hns3_imp_reset_cmd(hw); 6385 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6386 tv.tv_sec, tv.tv_usec); 6387 break; 6388 case HNS3_GLOBAL_RESET: 6389 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6390 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6391 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6392 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6393 tv.tv_sec, tv.tv_usec); 6394 break; 6395 case HNS3_FUNC_RESET: 6396 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6397 tv.tv_sec, tv.tv_usec); 6398 /* schedule again to check later */ 6399 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6400 hns3_schedule_reset(hns); 6401 break; 6402 default: 6403 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6404 return; 6405 } 6406 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6407 } 6408 6409 static enum hns3_reset_level 6410 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6411 { 6412 struct hns3_hw *hw = &hns->hw; 6413 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6414 6415 /* Return the highest priority reset level amongst all */ 6416 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6417 reset_level = HNS3_IMP_RESET; 6418 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6419 reset_level = HNS3_GLOBAL_RESET; 6420 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6421 reset_level = HNS3_FUNC_RESET; 6422 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6423 reset_level = HNS3_FLR_RESET; 6424 6425 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6426 return HNS3_NONE_RESET; 6427 6428 return reset_level; 6429 } 6430 6431 static void 6432 hns3_record_imp_error(struct hns3_adapter *hns) 6433 { 6434 struct hns3_hw *hw = &hns->hw; 6435 uint32_t reg_val; 6436 6437 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6438 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6439 hns3_warn(hw, "Detected IMP RD poison!"); 6440 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6441 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6442 } 6443 6444 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6445 hns3_warn(hw, "Detected IMP CMDQ error!"); 6446 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6447 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6448 } 6449 } 6450 6451 static int 6452 hns3_prepare_reset(struct hns3_adapter *hns) 6453 { 6454 struct hns3_hw *hw = &hns->hw; 6455 uint32_t reg_val; 6456 int ret; 6457 6458 switch (hw->reset.level) { 6459 case HNS3_FUNC_RESET: 6460 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6461 if (ret) 6462 return ret; 6463 6464 /* 6465 * After performaning pf reset, it is not necessary to do the 6466 * mailbox handling or send any command to firmware, because 6467 * any mailbox handling or command to firmware is only valid 6468 * after hns3_cmd_init is called. 6469 */ 6470 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6471 hw->reset.stats.request_cnt++; 6472 break; 6473 case HNS3_IMP_RESET: 6474 hns3_record_imp_error(hns); 6475 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6476 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6477 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6478 break; 6479 default: 6480 break; 6481 } 6482 return 0; 6483 } 6484 6485 static int 6486 hns3_set_rst_done(struct hns3_hw *hw) 6487 { 6488 struct hns3_pf_rst_done_cmd *req; 6489 struct hns3_cmd_desc desc; 6490 6491 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6492 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6493 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6494 return hns3_cmd_send(hw, &desc, 1); 6495 } 6496 6497 static int 6498 hns3_stop_service(struct hns3_adapter *hns) 6499 { 6500 struct hns3_hw *hw = &hns->hw; 6501 struct rte_eth_dev *eth_dev; 6502 6503 eth_dev = &rte_eth_devices[hw->data->port_id]; 6504 hw->mac.link_status = ETH_LINK_DOWN; 6505 if (hw->adapter_state == HNS3_NIC_STARTED) { 6506 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6507 hns3_update_linkstatus_and_event(hw, false); 6508 } 6509 6510 hns3_set_rxtx_function(eth_dev); 6511 rte_wmb(); 6512 /* Disable datapath on secondary process. */ 6513 hns3_mp_req_stop_rxtx(eth_dev); 6514 rte_delay_ms(hw->tqps_num); 6515 6516 rte_spinlock_lock(&hw->lock); 6517 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6518 hw->adapter_state == HNS3_NIC_STOPPING) { 6519 hns3_enable_all_queues(hw, false); 6520 hns3_do_stop(hns); 6521 hw->reset.mbuf_deferred_free = true; 6522 } else 6523 hw->reset.mbuf_deferred_free = false; 6524 6525 /* 6526 * It is cumbersome for hardware to pick-and-choose entries for deletion 6527 * from table space. Hence, for function reset software intervention is 6528 * required to delete the entries 6529 */ 6530 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6531 hns3_configure_all_mc_mac_addr(hns, true); 6532 rte_spinlock_unlock(&hw->lock); 6533 6534 return 0; 6535 } 6536 6537 static int 6538 hns3_start_service(struct hns3_adapter *hns) 6539 { 6540 struct hns3_hw *hw = &hns->hw; 6541 struct rte_eth_dev *eth_dev; 6542 6543 if (hw->reset.level == HNS3_IMP_RESET || 6544 hw->reset.level == HNS3_GLOBAL_RESET) 6545 hns3_set_rst_done(hw); 6546 eth_dev = &rte_eth_devices[hw->data->port_id]; 6547 hns3_set_rxtx_function(eth_dev); 6548 hns3_mp_req_start_rxtx(eth_dev); 6549 if (hw->adapter_state == HNS3_NIC_STARTED) { 6550 /* 6551 * This API parent function already hold the hns3_hw.lock, the 6552 * hns3_service_handler may report lse, in bonding application 6553 * it will call driver's ops which may acquire the hns3_hw.lock 6554 * again, thus lead to deadlock. 6555 * We defer calls hns3_service_handler to avoid the deadlock. 6556 */ 6557 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6558 hns3_service_handler, eth_dev); 6559 6560 /* Enable interrupt of all rx queues before enabling queues */ 6561 hns3_dev_all_rx_queue_intr_enable(hw, true); 6562 /* 6563 * Enable state of each rxq and txq will be recovered after 6564 * reset, so we need to restore them before enable all tqps; 6565 */ 6566 hns3_restore_tqp_enable_state(hw); 6567 /* 6568 * When finished the initialization, enable queues to receive 6569 * and transmit packets. 6570 */ 6571 hns3_enable_all_queues(hw, true); 6572 } 6573 6574 return 0; 6575 } 6576 6577 static int 6578 hns3_restore_conf(struct hns3_adapter *hns) 6579 { 6580 struct hns3_hw *hw = &hns->hw; 6581 int ret; 6582 6583 ret = hns3_configure_all_mac_addr(hns, false); 6584 if (ret) 6585 return ret; 6586 6587 ret = hns3_configure_all_mc_mac_addr(hns, false); 6588 if (ret) 6589 goto err_mc_mac; 6590 6591 ret = hns3_dev_promisc_restore(hns); 6592 if (ret) 6593 goto err_promisc; 6594 6595 ret = hns3_restore_vlan_table(hns); 6596 if (ret) 6597 goto err_promisc; 6598 6599 ret = hns3_restore_vlan_conf(hns); 6600 if (ret) 6601 goto err_promisc; 6602 6603 ret = hns3_restore_all_fdir_filter(hns); 6604 if (ret) 6605 goto err_promisc; 6606 6607 ret = hns3_restore_ptp(hns); 6608 if (ret) 6609 goto err_promisc; 6610 6611 ret = hns3_restore_rx_interrupt(hw); 6612 if (ret) 6613 goto err_promisc; 6614 6615 ret = hns3_restore_gro_conf(hw); 6616 if (ret) 6617 goto err_promisc; 6618 6619 ret = hns3_restore_fec(hw); 6620 if (ret) 6621 goto err_promisc; 6622 6623 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6624 ret = hns3_do_start(hns, false); 6625 if (ret) 6626 goto err_promisc; 6627 hns3_info(hw, "hns3 dev restart successful!"); 6628 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6629 hw->adapter_state = HNS3_NIC_CONFIGURED; 6630 return 0; 6631 6632 err_promisc: 6633 hns3_configure_all_mc_mac_addr(hns, true); 6634 err_mc_mac: 6635 hns3_configure_all_mac_addr(hns, true); 6636 return ret; 6637 } 6638 6639 static void 6640 hns3_reset_service(void *param) 6641 { 6642 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6643 struct hns3_hw *hw = &hns->hw; 6644 enum hns3_reset_level reset_level; 6645 struct timeval tv_delta; 6646 struct timeval tv_start; 6647 struct timeval tv; 6648 uint64_t msec; 6649 int ret; 6650 6651 /* 6652 * The interrupt is not triggered within the delay time. 6653 * The interrupt may have been lost. It is necessary to handle 6654 * the interrupt to recover from the error. 6655 */ 6656 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6657 SCHEDULE_DEFERRED) { 6658 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6659 __ATOMIC_RELAXED); 6660 hns3_err(hw, "Handling interrupts in delayed tasks"); 6661 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6662 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6663 if (reset_level == HNS3_NONE_RESET) { 6664 hns3_err(hw, "No reset level is set, try IMP reset"); 6665 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6666 } 6667 } 6668 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6669 6670 /* 6671 * Check if there is any ongoing reset in the hardware. This status can 6672 * be checked from reset_pending. If there is then, we need to wait for 6673 * hardware to complete reset. 6674 * a. If we are able to figure out in reasonable time that hardware 6675 * has fully resetted then, we can proceed with driver, client 6676 * reset. 6677 * b. else, we can come back later to check this status so re-sched 6678 * now. 6679 */ 6680 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6681 if (reset_level != HNS3_NONE_RESET) { 6682 hns3_clock_gettime(&tv_start); 6683 ret = hns3_reset_process(hns, reset_level); 6684 hns3_clock_gettime(&tv); 6685 timersub(&tv, &tv_start, &tv_delta); 6686 msec = hns3_clock_calctime_ms(&tv_delta); 6687 if (msec > HNS3_RESET_PROCESS_MS) 6688 hns3_err(hw, "%d handle long time delta %" PRIu64 6689 " ms time=%ld.%.6ld", 6690 hw->reset.level, msec, 6691 tv.tv_sec, tv.tv_usec); 6692 if (ret == -EAGAIN) 6693 return; 6694 } 6695 6696 /* Check if we got any *new* reset requests to be honored */ 6697 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6698 if (reset_level != HNS3_NONE_RESET) 6699 hns3_msix_process(hns, reset_level); 6700 } 6701 6702 static unsigned int 6703 hns3_get_speed_capa_num(uint16_t device_id) 6704 { 6705 unsigned int num; 6706 6707 switch (device_id) { 6708 case HNS3_DEV_ID_25GE: 6709 case HNS3_DEV_ID_25GE_RDMA: 6710 num = 2; 6711 break; 6712 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6713 case HNS3_DEV_ID_200G_RDMA: 6714 num = 1; 6715 break; 6716 default: 6717 num = 0; 6718 break; 6719 } 6720 6721 return num; 6722 } 6723 6724 static int 6725 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6726 uint16_t device_id) 6727 { 6728 switch (device_id) { 6729 case HNS3_DEV_ID_25GE: 6730 /* fallthrough */ 6731 case HNS3_DEV_ID_25GE_RDMA: 6732 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6733 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6734 6735 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6736 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6737 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6738 break; 6739 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6740 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6741 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6742 break; 6743 case HNS3_DEV_ID_200G_RDMA: 6744 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6745 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6746 break; 6747 default: 6748 return -ENOTSUP; 6749 } 6750 6751 return 0; 6752 } 6753 6754 static int 6755 hns3_fec_get_capability(struct rte_eth_dev *dev, 6756 struct rte_eth_fec_capa *speed_fec_capa, 6757 unsigned int num) 6758 { 6759 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6760 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6761 uint16_t device_id = pci_dev->id.device_id; 6762 unsigned int capa_num; 6763 int ret; 6764 6765 capa_num = hns3_get_speed_capa_num(device_id); 6766 if (capa_num == 0) { 6767 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6768 device_id); 6769 return -ENOTSUP; 6770 } 6771 6772 if (speed_fec_capa == NULL || num < capa_num) 6773 return capa_num; 6774 6775 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6776 if (ret) 6777 return -ENOTSUP; 6778 6779 return capa_num; 6780 } 6781 6782 static int 6783 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6784 { 6785 struct hns3_config_fec_cmd *req; 6786 struct hns3_cmd_desc desc; 6787 int ret; 6788 6789 /* 6790 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6791 * in device of link speed 6792 * below 10 Gbps. 6793 */ 6794 if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { 6795 *state = 0; 6796 return 0; 6797 } 6798 6799 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6800 req = (struct hns3_config_fec_cmd *)desc.data; 6801 ret = hns3_cmd_send(hw, &desc, 1); 6802 if (ret) { 6803 hns3_err(hw, "get current fec auto state failed, ret = %d", 6804 ret); 6805 return ret; 6806 } 6807 6808 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6809 return 0; 6810 } 6811 6812 static int 6813 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6814 { 6815 struct hns3_sfp_info_cmd *resp; 6816 uint32_t tmp_fec_capa; 6817 uint8_t auto_state; 6818 struct hns3_cmd_desc desc; 6819 int ret; 6820 6821 /* 6822 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6823 * configured FEC mode is returned. 6824 * If link is up, current FEC mode is returned. 6825 */ 6826 if (hw->mac.link_status == ETH_LINK_DOWN) { 6827 ret = get_current_fec_auto_state(hw, &auto_state); 6828 if (ret) 6829 return ret; 6830 6831 if (auto_state == 0x1) { 6832 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6833 return 0; 6834 } 6835 } 6836 6837 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6838 resp = (struct hns3_sfp_info_cmd *)desc.data; 6839 resp->query_type = HNS3_ACTIVE_QUERY; 6840 6841 ret = hns3_cmd_send(hw, &desc, 1); 6842 if (ret == -EOPNOTSUPP) { 6843 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6844 return ret; 6845 } else if (ret) { 6846 hns3_err(hw, "get FEC failed, ret = %d", ret); 6847 return ret; 6848 } 6849 6850 /* 6851 * FEC mode order defined in hns3 hardware is inconsistend with 6852 * that defined in the ethdev library. So the sequence needs 6853 * to be converted. 6854 */ 6855 switch (resp->active_fec) { 6856 case HNS3_HW_FEC_MODE_NOFEC: 6857 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6858 break; 6859 case HNS3_HW_FEC_MODE_BASER: 6860 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6861 break; 6862 case HNS3_HW_FEC_MODE_RS: 6863 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6864 break; 6865 default: 6866 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6867 break; 6868 } 6869 6870 *fec_capa = tmp_fec_capa; 6871 return 0; 6872 } 6873 6874 static int 6875 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6876 { 6877 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6878 6879 return hns3_fec_get_internal(hw, fec_capa); 6880 } 6881 6882 static int 6883 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6884 { 6885 struct hns3_config_fec_cmd *req; 6886 struct hns3_cmd_desc desc; 6887 int ret; 6888 6889 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6890 6891 req = (struct hns3_config_fec_cmd *)desc.data; 6892 switch (mode) { 6893 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6894 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6895 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6896 break; 6897 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6898 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6899 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6900 break; 6901 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6902 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6903 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6904 break; 6905 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6906 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6907 break; 6908 default: 6909 return 0; 6910 } 6911 ret = hns3_cmd_send(hw, &desc, 1); 6912 if (ret) 6913 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6914 6915 return ret; 6916 } 6917 6918 static uint32_t 6919 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6920 { 6921 struct hns3_mac *mac = &hw->mac; 6922 uint32_t cur_capa; 6923 6924 switch (mac->link_speed) { 6925 case ETH_SPEED_NUM_10G: 6926 cur_capa = fec_capa[1].capa; 6927 break; 6928 case ETH_SPEED_NUM_25G: 6929 case ETH_SPEED_NUM_100G: 6930 case ETH_SPEED_NUM_200G: 6931 cur_capa = fec_capa[0].capa; 6932 break; 6933 default: 6934 cur_capa = 0; 6935 break; 6936 } 6937 6938 return cur_capa; 6939 } 6940 6941 static bool 6942 is_fec_mode_one_bit_set(uint32_t mode) 6943 { 6944 int cnt = 0; 6945 uint8_t i; 6946 6947 for (i = 0; i < sizeof(mode); i++) 6948 if (mode >> i & 0x1) 6949 cnt++; 6950 6951 return cnt == 1 ? true : false; 6952 } 6953 6954 static int 6955 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6956 { 6957 #define FEC_CAPA_NUM 2 6958 struct hns3_adapter *hns = dev->data->dev_private; 6959 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6960 struct hns3_pf *pf = &hns->pf; 6961 6962 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6963 uint32_t cur_capa; 6964 uint32_t num = FEC_CAPA_NUM; 6965 int ret; 6966 6967 ret = hns3_fec_get_capability(dev, fec_capa, num); 6968 if (ret < 0) 6969 return ret; 6970 6971 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 6972 if (!is_fec_mode_one_bit_set(mode)) { 6973 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 6974 "FEC mode should be only one bit set", mode); 6975 return -EINVAL; 6976 } 6977 6978 /* 6979 * Check whether the configured mode is within the FEC capability. 6980 * If not, the configured mode will not be supported. 6981 */ 6982 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6983 if (!(cur_capa & mode)) { 6984 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6985 return -EINVAL; 6986 } 6987 6988 rte_spinlock_lock(&hw->lock); 6989 ret = hns3_set_fec_hw(hw, mode); 6990 if (ret) { 6991 rte_spinlock_unlock(&hw->lock); 6992 return ret; 6993 } 6994 6995 pf->fec_mode = mode; 6996 rte_spinlock_unlock(&hw->lock); 6997 6998 return 0; 6999 } 7000 7001 static int 7002 hns3_restore_fec(struct hns3_hw *hw) 7003 { 7004 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7005 struct hns3_pf *pf = &hns->pf; 7006 uint32_t mode = pf->fec_mode; 7007 int ret; 7008 7009 ret = hns3_set_fec_hw(hw, mode); 7010 if (ret) 7011 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 7012 mode, ret); 7013 7014 return ret; 7015 } 7016 7017 static int 7018 hns3_query_dev_fec_info(struct hns3_hw *hw) 7019 { 7020 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7021 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 7022 int ret; 7023 7024 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 7025 if (ret) 7026 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 7027 7028 return ret; 7029 } 7030 7031 static bool 7032 hns3_optical_module_existed(struct hns3_hw *hw) 7033 { 7034 struct hns3_cmd_desc desc; 7035 bool existed; 7036 int ret; 7037 7038 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 7039 ret = hns3_cmd_send(hw, &desc, 1); 7040 if (ret) { 7041 hns3_err(hw, 7042 "fail to get optical module exist state, ret = %d.\n", 7043 ret); 7044 return false; 7045 } 7046 existed = !!desc.data[0]; 7047 7048 return existed; 7049 } 7050 7051 static int 7052 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 7053 uint32_t len, uint8_t *data) 7054 { 7055 #define HNS3_SFP_INFO_CMD_NUM 6 7056 #define HNS3_SFP_INFO_MAX_LEN \ 7057 (HNS3_SFP_INFO_BD0_LEN + \ 7058 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 7059 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 7060 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 7061 uint16_t read_len; 7062 uint16_t copy_len; 7063 int ret; 7064 int i; 7065 7066 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7067 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 7068 true); 7069 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 7070 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 7071 } 7072 7073 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7074 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7075 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7076 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7077 7078 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7079 if (ret) { 7080 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7081 ret); 7082 return ret; 7083 } 7084 7085 /* The data format in BD0 is different with the others. */ 7086 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7087 memcpy(data, sfp_info_bd0->data, copy_len); 7088 read_len = copy_len; 7089 7090 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7091 if (read_len >= len) 7092 break; 7093 7094 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7095 memcpy(data + read_len, desc[i].data, copy_len); 7096 read_len += copy_len; 7097 } 7098 7099 return (int)read_len; 7100 } 7101 7102 static int 7103 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7104 struct rte_dev_eeprom_info *info) 7105 { 7106 struct hns3_adapter *hns = dev->data->dev_private; 7107 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7108 uint32_t offset = info->offset; 7109 uint32_t len = info->length; 7110 uint8_t *data = info->data; 7111 uint32_t read_len = 0; 7112 7113 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7114 return -ENOTSUP; 7115 7116 if (!hns3_optical_module_existed(hw)) { 7117 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7118 return -EIO; 7119 } 7120 7121 while (read_len < len) { 7122 int ret; 7123 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7124 len - read_len, 7125 data + read_len); 7126 if (ret < 0) 7127 return -EIO; 7128 read_len += ret; 7129 } 7130 7131 return 0; 7132 } 7133 7134 static int 7135 hns3_get_module_info(struct rte_eth_dev *dev, 7136 struct rte_eth_dev_module_info *modinfo) 7137 { 7138 #define HNS3_SFF8024_ID_SFP 0x03 7139 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7140 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7141 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7142 #define HNS3_SFF_8636_V1_3 0x03 7143 struct hns3_adapter *hns = dev->data->dev_private; 7144 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7145 struct rte_dev_eeprom_info info; 7146 struct hns3_sfp_type sfp_type; 7147 int ret; 7148 7149 memset(&sfp_type, 0, sizeof(sfp_type)); 7150 memset(&info, 0, sizeof(info)); 7151 info.data = (uint8_t *)&sfp_type; 7152 info.length = sizeof(sfp_type); 7153 ret = hns3_get_module_eeprom(dev, &info); 7154 if (ret) 7155 return ret; 7156 7157 switch (sfp_type.type) { 7158 case HNS3_SFF8024_ID_SFP: 7159 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7160 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7161 break; 7162 case HNS3_SFF8024_ID_QSFP_8438: 7163 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7164 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7165 break; 7166 case HNS3_SFF8024_ID_QSFP_8436_8636: 7167 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7168 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7169 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7170 } else { 7171 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7172 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7173 } 7174 break; 7175 case HNS3_SFF8024_ID_QSFP28_8636: 7176 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7177 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7178 break; 7179 default: 7180 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7181 sfp_type.type, sfp_type.ext_type); 7182 return -EINVAL; 7183 } 7184 7185 return 0; 7186 } 7187 7188 void 7189 hns3_clock_gettime(struct timeval *tv) 7190 { 7191 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 7192 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW 7193 #else 7194 #define CLOCK_TYPE CLOCK_MONOTONIC 7195 #endif 7196 #define NSEC_TO_USEC_DIV 1000 7197 7198 struct timespec spec; 7199 (void)clock_gettime(CLOCK_TYPE, &spec); 7200 7201 tv->tv_sec = spec.tv_sec; 7202 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; 7203 } 7204 7205 uint64_t 7206 hns3_clock_calctime_ms(struct timeval *tv) 7207 { 7208 return (uint64_t)tv->tv_sec * MSEC_PER_SEC + 7209 tv->tv_usec / USEC_PER_MSEC; 7210 } 7211 7212 uint64_t 7213 hns3_clock_gettime_ms(void) 7214 { 7215 struct timeval tv; 7216 7217 hns3_clock_gettime(&tv); 7218 return hns3_clock_calctime_ms(&tv); 7219 } 7220 7221 static int 7222 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7223 { 7224 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7225 7226 RTE_SET_USED(key); 7227 7228 if (strcmp(value, "vec") == 0) 7229 hint = HNS3_IO_FUNC_HINT_VEC; 7230 else if (strcmp(value, "sve") == 0) 7231 hint = HNS3_IO_FUNC_HINT_SVE; 7232 else if (strcmp(value, "simple") == 0) 7233 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7234 else if (strcmp(value, "common") == 0) 7235 hint = HNS3_IO_FUNC_HINT_COMMON; 7236 7237 /* If the hint is valid then update output parameters */ 7238 if (hint != HNS3_IO_FUNC_HINT_NONE) 7239 *(uint32_t *)extra_args = hint; 7240 7241 return 0; 7242 } 7243 7244 static const char * 7245 hns3_get_io_hint_func_name(uint32_t hint) 7246 { 7247 switch (hint) { 7248 case HNS3_IO_FUNC_HINT_VEC: 7249 return "vec"; 7250 case HNS3_IO_FUNC_HINT_SVE: 7251 return "sve"; 7252 case HNS3_IO_FUNC_HINT_SIMPLE: 7253 return "simple"; 7254 case HNS3_IO_FUNC_HINT_COMMON: 7255 return "common"; 7256 default: 7257 return "none"; 7258 } 7259 } 7260 7261 static int 7262 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) 7263 { 7264 uint64_t val; 7265 7266 RTE_SET_USED(key); 7267 7268 val = strtoull(value, NULL, 16); 7269 *(uint64_t *)extra_args = val; 7270 7271 return 0; 7272 } 7273 7274 void 7275 hns3_parse_devargs(struct rte_eth_dev *dev) 7276 { 7277 struct hns3_adapter *hns = dev->data->dev_private; 7278 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7279 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7280 struct hns3_hw *hw = &hns->hw; 7281 uint64_t dev_caps_mask = 0; 7282 struct rte_kvargs *kvlist; 7283 7284 if (dev->device->devargs == NULL) 7285 return; 7286 7287 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7288 if (!kvlist) 7289 return; 7290 7291 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7292 &hns3_parse_io_hint_func, &rx_func_hint); 7293 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7294 &hns3_parse_io_hint_func, &tx_func_hint); 7295 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, 7296 &hns3_parse_dev_caps_mask, &dev_caps_mask); 7297 rte_kvargs_free(kvlist); 7298 7299 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7300 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7301 hns3_get_io_hint_func_name(rx_func_hint)); 7302 hns->rx_func_hint = rx_func_hint; 7303 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7304 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7305 hns3_get_io_hint_func_name(tx_func_hint)); 7306 hns->tx_func_hint = tx_func_hint; 7307 7308 if (dev_caps_mask != 0) 7309 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", 7310 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); 7311 hns->dev_caps_mask = dev_caps_mask; 7312 } 7313 7314 static const struct eth_dev_ops hns3_eth_dev_ops = { 7315 .dev_configure = hns3_dev_configure, 7316 .dev_start = hns3_dev_start, 7317 .dev_stop = hns3_dev_stop, 7318 .dev_close = hns3_dev_close, 7319 .promiscuous_enable = hns3_dev_promiscuous_enable, 7320 .promiscuous_disable = hns3_dev_promiscuous_disable, 7321 .allmulticast_enable = hns3_dev_allmulticast_enable, 7322 .allmulticast_disable = hns3_dev_allmulticast_disable, 7323 .mtu_set = hns3_dev_mtu_set, 7324 .stats_get = hns3_stats_get, 7325 .stats_reset = hns3_stats_reset, 7326 .xstats_get = hns3_dev_xstats_get, 7327 .xstats_get_names = hns3_dev_xstats_get_names, 7328 .xstats_reset = hns3_dev_xstats_reset, 7329 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7330 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7331 .dev_infos_get = hns3_dev_infos_get, 7332 .fw_version_get = hns3_fw_version_get, 7333 .rx_queue_setup = hns3_rx_queue_setup, 7334 .tx_queue_setup = hns3_tx_queue_setup, 7335 .rx_queue_release = hns3_dev_rx_queue_release, 7336 .tx_queue_release = hns3_dev_tx_queue_release, 7337 .rx_queue_start = hns3_dev_rx_queue_start, 7338 .rx_queue_stop = hns3_dev_rx_queue_stop, 7339 .tx_queue_start = hns3_dev_tx_queue_start, 7340 .tx_queue_stop = hns3_dev_tx_queue_stop, 7341 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7342 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7343 .rxq_info_get = hns3_rxq_info_get, 7344 .txq_info_get = hns3_txq_info_get, 7345 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7346 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7347 .flow_ctrl_get = hns3_flow_ctrl_get, 7348 .flow_ctrl_set = hns3_flow_ctrl_set, 7349 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7350 .mac_addr_add = hns3_add_mac_addr, 7351 .mac_addr_remove = hns3_remove_mac_addr, 7352 .mac_addr_set = hns3_set_default_mac_addr, 7353 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7354 .link_update = hns3_dev_link_update, 7355 .rss_hash_update = hns3_dev_rss_hash_update, 7356 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7357 .reta_update = hns3_dev_rss_reta_update, 7358 .reta_query = hns3_dev_rss_reta_query, 7359 .flow_ops_get = hns3_dev_flow_ops_get, 7360 .vlan_filter_set = hns3_vlan_filter_set, 7361 .vlan_tpid_set = hns3_vlan_tpid_set, 7362 .vlan_offload_set = hns3_vlan_offload_set, 7363 .vlan_pvid_set = hns3_vlan_pvid_set, 7364 .get_reg = hns3_get_regs, 7365 .get_module_info = hns3_get_module_info, 7366 .get_module_eeprom = hns3_get_module_eeprom, 7367 .get_dcb_info = hns3_get_dcb_info, 7368 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7369 .fec_get_capability = hns3_fec_get_capability, 7370 .fec_get = hns3_fec_get, 7371 .fec_set = hns3_fec_set, 7372 .tm_ops_get = hns3_tm_ops_get, 7373 .tx_done_cleanup = hns3_tx_done_cleanup, 7374 .timesync_enable = hns3_timesync_enable, 7375 .timesync_disable = hns3_timesync_disable, 7376 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7377 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7378 .timesync_adjust_time = hns3_timesync_adjust_time, 7379 .timesync_read_time = hns3_timesync_read_time, 7380 .timesync_write_time = hns3_timesync_write_time, 7381 }; 7382 7383 static const struct hns3_reset_ops hns3_reset_ops = { 7384 .reset_service = hns3_reset_service, 7385 .stop_service = hns3_stop_service, 7386 .prepare_reset = hns3_prepare_reset, 7387 .wait_hardware_ready = hns3_wait_hardware_ready, 7388 .reinit_dev = hns3_reinit_dev, 7389 .restore_conf = hns3_restore_conf, 7390 .start_service = hns3_start_service, 7391 }; 7392 7393 static int 7394 hns3_dev_init(struct rte_eth_dev *eth_dev) 7395 { 7396 struct hns3_adapter *hns = eth_dev->data->dev_private; 7397 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7398 struct rte_ether_addr *eth_addr; 7399 struct hns3_hw *hw = &hns->hw; 7400 int ret; 7401 7402 PMD_INIT_FUNC_TRACE(); 7403 7404 eth_dev->process_private = (struct hns3_process_private *) 7405 rte_zmalloc_socket("hns3_filter_list", 7406 sizeof(struct hns3_process_private), 7407 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 7408 if (eth_dev->process_private == NULL) { 7409 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 7410 return -ENOMEM; 7411 } 7412 7413 hns3_flow_init(eth_dev); 7414 7415 hns3_set_rxtx_function(eth_dev); 7416 eth_dev->dev_ops = &hns3_eth_dev_ops; 7417 eth_dev->rx_queue_count = hns3_rx_queue_count; 7418 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7419 ret = hns3_mp_init_secondary(); 7420 if (ret) { 7421 PMD_INIT_LOG(ERR, "Failed to init for secondary " 7422 "process, ret = %d", ret); 7423 goto err_mp_init_secondary; 7424 } 7425 7426 hw->secondary_cnt++; 7427 return 0; 7428 } 7429 7430 ret = hns3_mp_init_primary(); 7431 if (ret) { 7432 PMD_INIT_LOG(ERR, 7433 "Failed to init for primary process, ret = %d", 7434 ret); 7435 goto err_mp_init_primary; 7436 } 7437 7438 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7439 hns->is_vf = false; 7440 hw->data = eth_dev->data; 7441 hns3_parse_devargs(eth_dev); 7442 7443 /* 7444 * Set default max packet size according to the mtu 7445 * default vale in DPDK frame. 7446 */ 7447 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7448 7449 ret = hns3_reset_init(hw); 7450 if (ret) 7451 goto err_init_reset; 7452 hw->reset.ops = &hns3_reset_ops; 7453 7454 ret = hns3_init_pf(eth_dev); 7455 if (ret) { 7456 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7457 goto err_init_pf; 7458 } 7459 7460 /* Allocate memory for storing MAC addresses */ 7461 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7462 sizeof(struct rte_ether_addr) * 7463 HNS3_UC_MACADDR_NUM, 0); 7464 if (eth_dev->data->mac_addrs == NULL) { 7465 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7466 "to store MAC addresses", 7467 sizeof(struct rte_ether_addr) * 7468 HNS3_UC_MACADDR_NUM); 7469 ret = -ENOMEM; 7470 goto err_rte_zmalloc; 7471 } 7472 7473 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7474 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7475 rte_eth_random_addr(hw->mac.mac_addr); 7476 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7477 (struct rte_ether_addr *)hw->mac.mac_addr); 7478 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7479 "unicast address, using random MAC address %s", 7480 mac_str); 7481 } 7482 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7483 ð_dev->data->mac_addrs[0]); 7484 7485 hw->adapter_state = HNS3_NIC_INITIALIZED; 7486 7487 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7488 SCHEDULE_PENDING) { 7489 hns3_err(hw, "Reschedule reset service after dev_init"); 7490 hns3_schedule_reset(hns); 7491 } else { 7492 /* IMP will wait ready flag before reset */ 7493 hns3_notify_reset_ready(hw, false); 7494 } 7495 7496 hns3_info(hw, "hns3 dev initialization successful!"); 7497 return 0; 7498 7499 err_rte_zmalloc: 7500 hns3_uninit_pf(eth_dev); 7501 7502 err_init_pf: 7503 rte_free(hw->reset.wait_data); 7504 7505 err_init_reset: 7506 hns3_mp_uninit_primary(); 7507 7508 err_mp_init_primary: 7509 err_mp_init_secondary: 7510 eth_dev->dev_ops = NULL; 7511 eth_dev->rx_pkt_burst = NULL; 7512 eth_dev->rx_descriptor_status = NULL; 7513 eth_dev->tx_pkt_burst = NULL; 7514 eth_dev->tx_pkt_prepare = NULL; 7515 eth_dev->tx_descriptor_status = NULL; 7516 rte_free(eth_dev->process_private); 7517 eth_dev->process_private = NULL; 7518 return ret; 7519 } 7520 7521 static int 7522 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7523 { 7524 struct hns3_adapter *hns = eth_dev->data->dev_private; 7525 struct hns3_hw *hw = &hns->hw; 7526 7527 PMD_INIT_FUNC_TRACE(); 7528 7529 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7530 rte_free(eth_dev->process_private); 7531 eth_dev->process_private = NULL; 7532 return 0; 7533 } 7534 7535 if (hw->adapter_state < HNS3_NIC_CLOSING) 7536 hns3_dev_close(eth_dev); 7537 7538 hw->adapter_state = HNS3_NIC_REMOVED; 7539 return 0; 7540 } 7541 7542 static int 7543 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7544 struct rte_pci_device *pci_dev) 7545 { 7546 return rte_eth_dev_pci_generic_probe(pci_dev, 7547 sizeof(struct hns3_adapter), 7548 hns3_dev_init); 7549 } 7550 7551 static int 7552 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7553 { 7554 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7555 } 7556 7557 static const struct rte_pci_id pci_id_hns3_map[] = { 7558 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7559 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7560 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7561 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7562 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7563 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7564 { .vendor_id = 0, }, /* sentinel */ 7565 }; 7566 7567 static struct rte_pci_driver rte_hns3_pmd = { 7568 .id_table = pci_id_hns3_map, 7569 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7570 .probe = eth_hns3_pci_probe, 7571 .remove = eth_hns3_pci_remove, 7572 }; 7573 7574 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7575 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7576 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7577 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7578 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7579 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 7580 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "); 7581 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 7582 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 7583