1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 20 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 21 22 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 23 #define HNS3_SERVICE_QUICK_INTERVAL 10 24 #define HNS3_INVALID_PVID 0xFFFF 25 26 #define HNS3_FILTER_TYPE_VF 0 27 #define HNS3_FILTER_TYPE_PORT 1 28 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 29 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 30 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 31 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 32 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 33 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 34 | HNS3_FILTER_FE_ROCE_EGRESS_B) 35 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 36 | HNS3_FILTER_FE_ROCE_INGRESS_B) 37 38 /* Reset related Registers */ 39 #define HNS3_GLOBAL_RESET_BIT 0 40 #define HNS3_CORE_RESET_BIT 1 41 #define HNS3_IMP_RESET_BIT 2 42 #define HNS3_FUN_RST_ING_B 0 43 44 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 45 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 46 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 47 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 48 49 #define HNS3_RESET_WAIT_MS 100 50 #define HNS3_RESET_WAIT_CNT 200 51 52 /* FEC mode order defined in HNS3 hardware */ 53 #define HNS3_HW_FEC_MODE_NOFEC 0 54 #define HNS3_HW_FEC_MODE_BASER 1 55 #define HNS3_HW_FEC_MODE_RS 2 56 57 enum hns3_evt_cause { 58 HNS3_VECTOR0_EVENT_RST, 59 HNS3_VECTOR0_EVENT_MBX, 60 HNS3_VECTOR0_EVENT_ERR, 61 HNS3_VECTOR0_EVENT_PTP, 62 HNS3_VECTOR0_EVENT_OTHER, 63 }; 64 65 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 66 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 67 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 69 70 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 71 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 72 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 74 75 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 76 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 78 79 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 80 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 81 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 83 84 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 85 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 87 88 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 89 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 90 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 91 }; 92 93 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 94 uint64_t *levels); 95 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 96 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 97 int on); 98 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 99 static bool hns3_update_link_status(struct hns3_hw *hw); 100 101 static int hns3_add_mc_addr(struct hns3_hw *hw, 102 struct rte_ether_addr *mac_addr); 103 static int hns3_remove_mc_addr(struct hns3_hw *hw, 104 struct rte_ether_addr *mac_addr); 105 static int hns3_restore_fec(struct hns3_hw *hw); 106 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 107 static int hns3_do_stop(struct hns3_adapter *hns); 108 109 void hns3_ether_format_addr(char *buf, uint16_t size, 110 const struct rte_ether_addr *ether_addr) 111 { 112 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 113 ether_addr->addr_bytes[0], 114 ether_addr->addr_bytes[4], 115 ether_addr->addr_bytes[5]); 116 } 117 118 static void 119 hns3_pf_disable_irq0(struct hns3_hw *hw) 120 { 121 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 122 } 123 124 static void 125 hns3_pf_enable_irq0(struct hns3_hw *hw) 126 { 127 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 128 } 129 130 static enum hns3_evt_cause 131 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 132 uint32_t *vec_val) 133 { 134 struct hns3_hw *hw = &hns->hw; 135 136 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 137 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 138 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 139 if (!is_delay) { 140 hw->reset.stats.imp_cnt++; 141 hns3_warn(hw, "IMP reset detected, clear reset status"); 142 } else { 143 hns3_schedule_delayed_reset(hns); 144 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 145 } 146 147 return HNS3_VECTOR0_EVENT_RST; 148 } 149 150 static enum hns3_evt_cause 151 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 152 uint32_t *vec_val) 153 { 154 struct hns3_hw *hw = &hns->hw; 155 156 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 157 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 158 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 159 if (!is_delay) { 160 hw->reset.stats.global_cnt++; 161 hns3_warn(hw, "Global reset detected, clear reset status"); 162 } else { 163 hns3_schedule_delayed_reset(hns); 164 hns3_warn(hw, 165 "Global reset detected, don't clear reset status"); 166 } 167 168 return HNS3_VECTOR0_EVENT_RST; 169 } 170 171 static enum hns3_evt_cause 172 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 173 { 174 struct hns3_hw *hw = &hns->hw; 175 uint32_t vector0_int_stats; 176 uint32_t cmdq_src_val; 177 uint32_t hw_err_src_reg; 178 uint32_t val; 179 enum hns3_evt_cause ret; 180 bool is_delay; 181 182 /* fetch the events from their corresponding regs */ 183 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 184 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 185 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 186 187 is_delay = clearval == NULL ? true : false; 188 /* 189 * Assumption: If by any chance reset and mailbox events are reported 190 * together then we will only process reset event and defer the 191 * processing of the mailbox events. Since, we would have not cleared 192 * RX CMDQ event this time we would receive again another interrupt 193 * from H/W just for the mailbox. 194 */ 195 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 196 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 197 goto out; 198 } 199 200 /* Global reset */ 201 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 202 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 203 goto out; 204 } 205 206 /* Check for vector0 1588 event source */ 207 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 208 val = BIT(HNS3_VECTOR0_1588_INT_B); 209 ret = HNS3_VECTOR0_EVENT_PTP; 210 goto out; 211 } 212 213 /* check for vector0 msix event source */ 214 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 215 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 216 val = vector0_int_stats | hw_err_src_reg; 217 ret = HNS3_VECTOR0_EVENT_ERR; 218 goto out; 219 } 220 221 /* check for vector0 mailbox(=CMDQ RX) event source */ 222 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 223 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 224 val = cmdq_src_val; 225 ret = HNS3_VECTOR0_EVENT_MBX; 226 goto out; 227 } 228 229 val = vector0_int_stats; 230 ret = HNS3_VECTOR0_EVENT_OTHER; 231 out: 232 233 if (clearval) 234 *clearval = val; 235 return ret; 236 } 237 238 static bool 239 hns3_is_1588_event_type(uint32_t event_type) 240 { 241 return (event_type == HNS3_VECTOR0_EVENT_PTP); 242 } 243 244 static void 245 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 246 { 247 if (event_type == HNS3_VECTOR0_EVENT_RST || 248 hns3_is_1588_event_type(event_type)) 249 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 250 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 251 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 252 } 253 254 static void 255 hns3_clear_all_event_cause(struct hns3_hw *hw) 256 { 257 uint32_t vector0_int_stats; 258 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 259 260 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 261 hns3_warn(hw, "Probe during IMP reset interrupt"); 262 263 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 264 hns3_warn(hw, "Probe during Global reset interrupt"); 265 266 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 267 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 268 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 269 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 270 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 271 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 272 BIT(HNS3_VECTOR0_1588_INT_B)); 273 } 274 275 static void 276 hns3_handle_mac_tnl(struct hns3_hw *hw) 277 { 278 struct hns3_cmd_desc desc; 279 uint32_t status; 280 int ret; 281 282 /* query and clear mac tnl interruptions */ 283 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 284 ret = hns3_cmd_send(hw, &desc, 1); 285 if (ret) { 286 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 287 return; 288 } 289 290 status = rte_le_to_cpu_32(desc.data[0]); 291 if (status) { 292 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 293 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 294 false); 295 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 296 ret = hns3_cmd_send(hw, &desc, 1); 297 if (ret) 298 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 299 ret); 300 } 301 } 302 303 static void 304 hns3_interrupt_handler(void *param) 305 { 306 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 307 struct hns3_adapter *hns = dev->data->dev_private; 308 struct hns3_hw *hw = &hns->hw; 309 enum hns3_evt_cause event_cause; 310 uint32_t clearval = 0; 311 uint32_t vector0_int; 312 uint32_t ras_int; 313 uint32_t cmdq_int; 314 315 /* Disable interrupt */ 316 hns3_pf_disable_irq0(hw); 317 318 event_cause = hns3_check_event_cause(hns, &clearval); 319 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 320 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 321 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 322 /* vector 0 interrupt is shared with reset and mailbox source events. */ 323 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 324 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 325 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 326 vector0_int, ras_int, cmdq_int); 327 hns3_handle_mac_tnl(hw); 328 hns3_handle_error(hns); 329 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 330 hns3_warn(hw, "received reset interrupt"); 331 hns3_schedule_reset(hns); 332 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 333 hns3_dev_handle_mbx_msg(hw); 334 } else { 335 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 336 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 337 vector0_int, ras_int, cmdq_int); 338 } 339 340 hns3_clear_event_cause(hw, event_cause, clearval); 341 /* Enable interrupt if it is not cause by reset */ 342 hns3_pf_enable_irq0(hw); 343 } 344 345 static int 346 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 347 { 348 #define HNS3_VLAN_ID_OFFSET_STEP 160 349 #define HNS3_VLAN_BYTE_SIZE 8 350 struct hns3_vlan_filter_pf_cfg_cmd *req; 351 struct hns3_hw *hw = &hns->hw; 352 uint8_t vlan_offset_byte_val; 353 struct hns3_cmd_desc desc; 354 uint8_t vlan_offset_byte; 355 uint8_t vlan_offset_base; 356 int ret; 357 358 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 359 360 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 361 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 362 HNS3_VLAN_BYTE_SIZE; 363 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 364 365 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 366 req->vlan_offset = vlan_offset_base; 367 req->vlan_cfg = on ? 0 : 1; 368 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 369 370 ret = hns3_cmd_send(hw, &desc, 1); 371 if (ret) 372 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 373 vlan_id, ret); 374 375 return ret; 376 } 377 378 static void 379 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 380 { 381 struct hns3_user_vlan_table *vlan_entry; 382 struct hns3_pf *pf = &hns->pf; 383 384 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 385 if (vlan_entry->vlan_id == vlan_id) { 386 if (vlan_entry->hd_tbl_status) 387 hns3_set_port_vlan_filter(hns, vlan_id, 0); 388 LIST_REMOVE(vlan_entry, next); 389 rte_free(vlan_entry); 390 break; 391 } 392 } 393 } 394 395 static void 396 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 397 bool writen_to_tbl) 398 { 399 struct hns3_user_vlan_table *vlan_entry; 400 struct hns3_hw *hw = &hns->hw; 401 struct hns3_pf *pf = &hns->pf; 402 403 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 404 if (vlan_entry->vlan_id == vlan_id) 405 return; 406 } 407 408 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 409 if (vlan_entry == NULL) { 410 hns3_err(hw, "Failed to malloc hns3 vlan table"); 411 return; 412 } 413 414 vlan_entry->hd_tbl_status = writen_to_tbl; 415 vlan_entry->vlan_id = vlan_id; 416 417 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 418 } 419 420 static int 421 hns3_restore_vlan_table(struct hns3_adapter *hns) 422 { 423 struct hns3_user_vlan_table *vlan_entry; 424 struct hns3_hw *hw = &hns->hw; 425 struct hns3_pf *pf = &hns->pf; 426 uint16_t vlan_id; 427 int ret = 0; 428 429 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 430 return hns3_vlan_pvid_configure(hns, 431 hw->port_base_vlan_cfg.pvid, 1); 432 433 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 434 if (vlan_entry->hd_tbl_status) { 435 vlan_id = vlan_entry->vlan_id; 436 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 437 if (ret) 438 break; 439 } 440 } 441 442 return ret; 443 } 444 445 static int 446 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 447 { 448 struct hns3_hw *hw = &hns->hw; 449 bool writen_to_tbl = false; 450 int ret = 0; 451 452 /* 453 * When vlan filter is enabled, hardware regards packets without vlan 454 * as packets with vlan 0. So, to receive packets without vlan, vlan id 455 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 456 */ 457 if (on == 0 && vlan_id == 0) 458 return 0; 459 460 /* 461 * When port base vlan enabled, we use port base vlan as the vlan 462 * filter condition. In this case, we don't update vlan filter table 463 * when user add new vlan or remove exist vlan, just update the 464 * vlan list. The vlan id in vlan list will be writen in vlan filter 465 * table until port base vlan disabled 466 */ 467 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 468 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 469 writen_to_tbl = true; 470 } 471 472 if (ret == 0) { 473 if (on) 474 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 475 else 476 hns3_rm_dev_vlan_table(hns, vlan_id); 477 } 478 return ret; 479 } 480 481 static int 482 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 483 { 484 struct hns3_adapter *hns = dev->data->dev_private; 485 struct hns3_hw *hw = &hns->hw; 486 int ret; 487 488 rte_spinlock_lock(&hw->lock); 489 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 490 rte_spinlock_unlock(&hw->lock); 491 return ret; 492 } 493 494 static int 495 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 496 uint16_t tpid) 497 { 498 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 499 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 500 struct hns3_hw *hw = &hns->hw; 501 struct hns3_cmd_desc desc; 502 int ret; 503 504 if ((vlan_type != ETH_VLAN_TYPE_INNER && 505 vlan_type != ETH_VLAN_TYPE_OUTER)) { 506 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 507 return -EINVAL; 508 } 509 510 if (tpid != RTE_ETHER_TYPE_VLAN) { 511 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 512 return -EINVAL; 513 } 514 515 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 516 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 517 518 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 519 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 520 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 521 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 522 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 524 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 525 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 526 } 527 528 ret = hns3_cmd_send(hw, &desc, 1); 529 if (ret) { 530 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 531 ret); 532 return ret; 533 } 534 535 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 536 537 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 538 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 539 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 540 541 ret = hns3_cmd_send(hw, &desc, 1); 542 if (ret) 543 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 544 ret); 545 return ret; 546 } 547 548 static int 549 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 550 uint16_t tpid) 551 { 552 struct hns3_adapter *hns = dev->data->dev_private; 553 struct hns3_hw *hw = &hns->hw; 554 int ret; 555 556 rte_spinlock_lock(&hw->lock); 557 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 558 rte_spinlock_unlock(&hw->lock); 559 return ret; 560 } 561 562 static int 563 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 564 struct hns3_rx_vtag_cfg *vcfg) 565 { 566 struct hns3_vport_vtag_rx_cfg_cmd *req; 567 struct hns3_hw *hw = &hns->hw; 568 struct hns3_cmd_desc desc; 569 uint16_t vport_id; 570 uint8_t bitmap; 571 int ret; 572 573 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 574 575 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 576 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 577 vcfg->strip_tag1_en ? 1 : 0); 578 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 579 vcfg->strip_tag2_en ? 1 : 0); 580 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 581 vcfg->vlan1_vlan_prionly ? 1 : 0); 582 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 583 vcfg->vlan2_vlan_prionly ? 1 : 0); 584 585 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 586 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 587 vcfg->strip_tag1_discard_en ? 1 : 0); 588 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 589 vcfg->strip_tag2_discard_en ? 1 : 0); 590 /* 591 * In current version VF is not supported when PF is driven by DPDK 592 * driver, just need to configure parameters for PF vport. 593 */ 594 vport_id = HNS3_PF_FUNC_ID; 595 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 596 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 597 req->vf_bitmap[req->vf_offset] = bitmap; 598 599 ret = hns3_cmd_send(hw, &desc, 1); 600 if (ret) 601 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 602 return ret; 603 } 604 605 static void 606 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 607 struct hns3_rx_vtag_cfg *vcfg) 608 { 609 struct hns3_pf *pf = &hns->pf; 610 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 611 } 612 613 static void 614 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 615 struct hns3_tx_vtag_cfg *vcfg) 616 { 617 struct hns3_pf *pf = &hns->pf; 618 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 619 } 620 621 static int 622 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 623 { 624 struct hns3_rx_vtag_cfg rxvlan_cfg; 625 struct hns3_hw *hw = &hns->hw; 626 int ret; 627 628 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 629 rxvlan_cfg.strip_tag1_en = false; 630 rxvlan_cfg.strip_tag2_en = enable; 631 rxvlan_cfg.strip_tag2_discard_en = false; 632 } else { 633 rxvlan_cfg.strip_tag1_en = enable; 634 rxvlan_cfg.strip_tag2_en = true; 635 rxvlan_cfg.strip_tag2_discard_en = true; 636 } 637 638 rxvlan_cfg.strip_tag1_discard_en = false; 639 rxvlan_cfg.vlan1_vlan_prionly = false; 640 rxvlan_cfg.vlan2_vlan_prionly = false; 641 rxvlan_cfg.rx_vlan_offload_en = enable; 642 643 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 644 if (ret) { 645 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret); 646 return ret; 647 } 648 649 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 650 651 return ret; 652 } 653 654 static int 655 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 656 uint8_t fe_type, bool filter_en, uint8_t vf_id) 657 { 658 struct hns3_vlan_filter_ctrl_cmd *req; 659 struct hns3_cmd_desc desc; 660 int ret; 661 662 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 663 664 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 665 req->vlan_type = vlan_type; 666 req->vlan_fe = filter_en ? fe_type : 0; 667 req->vf_id = vf_id; 668 669 ret = hns3_cmd_send(hw, &desc, 1); 670 if (ret) 671 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 672 673 return ret; 674 } 675 676 static int 677 hns3_vlan_filter_init(struct hns3_adapter *hns) 678 { 679 struct hns3_hw *hw = &hns->hw; 680 int ret; 681 682 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 683 HNS3_FILTER_FE_EGRESS, false, 684 HNS3_PF_FUNC_ID); 685 if (ret) { 686 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 687 return ret; 688 } 689 690 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 691 HNS3_FILTER_FE_INGRESS, false, 692 HNS3_PF_FUNC_ID); 693 if (ret) 694 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 695 696 return ret; 697 } 698 699 static int 700 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 701 { 702 struct hns3_hw *hw = &hns->hw; 703 int ret; 704 705 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 706 HNS3_FILTER_FE_INGRESS, enable, 707 HNS3_PF_FUNC_ID); 708 if (ret) 709 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 710 enable ? "enable" : "disable", ret); 711 712 return ret; 713 } 714 715 static int 716 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 717 { 718 struct hns3_adapter *hns = dev->data->dev_private; 719 struct hns3_hw *hw = &hns->hw; 720 struct rte_eth_rxmode *rxmode; 721 unsigned int tmp_mask; 722 bool enable; 723 int ret = 0; 724 725 rte_spinlock_lock(&hw->lock); 726 rxmode = &dev->data->dev_conf.rxmode; 727 tmp_mask = (unsigned int)mask; 728 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 729 /* ignore vlan filter configuration during promiscuous mode */ 730 if (!dev->data->promiscuous) { 731 /* Enable or disable VLAN filter */ 732 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 733 true : false; 734 735 ret = hns3_enable_vlan_filter(hns, enable); 736 if (ret) { 737 rte_spinlock_unlock(&hw->lock); 738 hns3_err(hw, "failed to %s rx filter, ret = %d", 739 enable ? "enable" : "disable", ret); 740 return ret; 741 } 742 } 743 } 744 745 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 746 /* Enable or disable VLAN stripping */ 747 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 748 true : false; 749 750 ret = hns3_en_hw_strip_rxvtag(hns, enable); 751 if (ret) { 752 rte_spinlock_unlock(&hw->lock); 753 hns3_err(hw, "failed to %s rx strip, ret = %d", 754 enable ? "enable" : "disable", ret); 755 return ret; 756 } 757 } 758 759 rte_spinlock_unlock(&hw->lock); 760 761 return ret; 762 } 763 764 static int 765 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 766 struct hns3_tx_vtag_cfg *vcfg) 767 { 768 struct hns3_vport_vtag_tx_cfg_cmd *req; 769 struct hns3_cmd_desc desc; 770 struct hns3_hw *hw = &hns->hw; 771 uint16_t vport_id; 772 uint8_t bitmap; 773 int ret; 774 775 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 776 777 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 778 req->def_vlan_tag1 = vcfg->default_tag1; 779 req->def_vlan_tag2 = vcfg->default_tag2; 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 781 vcfg->accept_tag1 ? 1 : 0); 782 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 783 vcfg->accept_untag1 ? 1 : 0); 784 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 785 vcfg->accept_tag2 ? 1 : 0); 786 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 787 vcfg->accept_untag2 ? 1 : 0); 788 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 789 vcfg->insert_tag1_en ? 1 : 0); 790 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 791 vcfg->insert_tag2_en ? 1 : 0); 792 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 793 794 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 795 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 796 vcfg->tag_shift_mode_en ? 1 : 0); 797 798 /* 799 * In current version VF is not supported when PF is driven by DPDK 800 * driver, just need to configure parameters for PF vport. 801 */ 802 vport_id = HNS3_PF_FUNC_ID; 803 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 804 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 805 req->vf_bitmap[req->vf_offset] = bitmap; 806 807 ret = hns3_cmd_send(hw, &desc, 1); 808 if (ret) 809 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 810 811 return ret; 812 } 813 814 static int 815 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 816 uint16_t pvid) 817 { 818 struct hns3_hw *hw = &hns->hw; 819 struct hns3_tx_vtag_cfg txvlan_cfg; 820 int ret; 821 822 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 823 txvlan_cfg.accept_tag1 = true; 824 txvlan_cfg.insert_tag1_en = false; 825 txvlan_cfg.default_tag1 = 0; 826 } else { 827 txvlan_cfg.accept_tag1 = 828 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 829 txvlan_cfg.insert_tag1_en = true; 830 txvlan_cfg.default_tag1 = pvid; 831 } 832 833 txvlan_cfg.accept_untag1 = true; 834 txvlan_cfg.accept_tag2 = true; 835 txvlan_cfg.accept_untag2 = true; 836 txvlan_cfg.insert_tag2_en = false; 837 txvlan_cfg.default_tag2 = 0; 838 txvlan_cfg.tag_shift_mode_en = true; 839 840 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 841 if (ret) { 842 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 843 ret); 844 return ret; 845 } 846 847 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 848 return ret; 849 } 850 851 852 static void 853 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 854 { 855 struct hns3_user_vlan_table *vlan_entry; 856 struct hns3_pf *pf = &hns->pf; 857 858 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 859 if (vlan_entry->hd_tbl_status) { 860 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 861 vlan_entry->hd_tbl_status = false; 862 } 863 } 864 865 if (is_del_list) { 866 vlan_entry = LIST_FIRST(&pf->vlan_list); 867 while (vlan_entry) { 868 LIST_REMOVE(vlan_entry, next); 869 rte_free(vlan_entry); 870 vlan_entry = LIST_FIRST(&pf->vlan_list); 871 } 872 } 873 } 874 875 static void 876 hns3_add_all_vlan_table(struct hns3_adapter *hns) 877 { 878 struct hns3_user_vlan_table *vlan_entry; 879 struct hns3_pf *pf = &hns->pf; 880 881 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 882 if (!vlan_entry->hd_tbl_status) { 883 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 884 vlan_entry->hd_tbl_status = true; 885 } 886 } 887 } 888 889 static void 890 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 891 { 892 struct hns3_hw *hw = &hns->hw; 893 int ret; 894 895 hns3_rm_all_vlan_table(hns, true); 896 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 897 ret = hns3_set_port_vlan_filter(hns, 898 hw->port_base_vlan_cfg.pvid, 0); 899 if (ret) { 900 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 901 ret); 902 return; 903 } 904 } 905 } 906 907 static int 908 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 909 uint16_t port_base_vlan_state, uint16_t new_pvid) 910 { 911 struct hns3_hw *hw = &hns->hw; 912 uint16_t old_pvid; 913 int ret; 914 915 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 916 old_pvid = hw->port_base_vlan_cfg.pvid; 917 if (old_pvid != HNS3_INVALID_PVID) { 918 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 919 if (ret) { 920 hns3_err(hw, "failed to remove old pvid %u, " 921 "ret = %d", old_pvid, ret); 922 return ret; 923 } 924 } 925 926 hns3_rm_all_vlan_table(hns, false); 927 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 928 if (ret) { 929 hns3_err(hw, "failed to add new pvid %u, ret = %d", 930 new_pvid, ret); 931 return ret; 932 } 933 } else { 934 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 935 if (ret) { 936 hns3_err(hw, "failed to remove pvid %u, ret = %d", 937 new_pvid, ret); 938 return ret; 939 } 940 941 hns3_add_all_vlan_table(hns); 942 } 943 return 0; 944 } 945 946 static int 947 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 948 { 949 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 950 struct hns3_rx_vtag_cfg rx_vlan_cfg; 951 bool rx_strip_en; 952 int ret; 953 954 rx_strip_en = old_cfg->rx_vlan_offload_en; 955 if (on) { 956 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 957 rx_vlan_cfg.strip_tag2_en = true; 958 rx_vlan_cfg.strip_tag2_discard_en = true; 959 } else { 960 rx_vlan_cfg.strip_tag1_en = false; 961 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 962 rx_vlan_cfg.strip_tag2_discard_en = false; 963 } 964 rx_vlan_cfg.strip_tag1_discard_en = false; 965 rx_vlan_cfg.vlan1_vlan_prionly = false; 966 rx_vlan_cfg.vlan2_vlan_prionly = false; 967 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 968 969 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 970 if (ret) 971 return ret; 972 973 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 974 return ret; 975 } 976 977 static int 978 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 979 { 980 struct hns3_hw *hw = &hns->hw; 981 uint16_t port_base_vlan_state; 982 int ret, err; 983 984 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 985 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 986 hns3_warn(hw, "Invalid operation! As current pvid set " 987 "is %u, disable pvid %u is invalid", 988 hw->port_base_vlan_cfg.pvid, pvid); 989 return 0; 990 } 991 992 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 993 HNS3_PORT_BASE_VLAN_DISABLE; 994 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 995 if (ret) { 996 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 997 ret); 998 return ret; 999 } 1000 1001 ret = hns3_en_pvid_strip(hns, on); 1002 if (ret) { 1003 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1004 "ret = %d", ret); 1005 goto pvid_vlan_strip_fail; 1006 } 1007 1008 if (pvid == HNS3_INVALID_PVID) 1009 goto out; 1010 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1011 if (ret) { 1012 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1013 ret); 1014 goto vlan_filter_set_fail; 1015 } 1016 1017 out: 1018 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1019 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1020 return ret; 1021 1022 vlan_filter_set_fail: 1023 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1024 HNS3_PORT_BASE_VLAN_ENABLE); 1025 if (err) 1026 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1027 1028 pvid_vlan_strip_fail: 1029 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1030 hw->port_base_vlan_cfg.pvid); 1031 if (err) 1032 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1033 1034 return ret; 1035 } 1036 1037 static int 1038 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1039 { 1040 struct hns3_adapter *hns = dev->data->dev_private; 1041 struct hns3_hw *hw = &hns->hw; 1042 bool pvid_en_state_change; 1043 uint16_t pvid_state; 1044 int ret; 1045 1046 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1047 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1048 RTE_ETHER_MAX_VLAN_ID); 1049 return -EINVAL; 1050 } 1051 1052 /* 1053 * If PVID configuration state change, should refresh the PVID 1054 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1055 */ 1056 pvid_state = hw->port_base_vlan_cfg.state; 1057 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1058 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1059 pvid_en_state_change = false; 1060 else 1061 pvid_en_state_change = true; 1062 1063 rte_spinlock_lock(&hw->lock); 1064 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1065 rte_spinlock_unlock(&hw->lock); 1066 if (ret) 1067 return ret; 1068 /* 1069 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1070 * need be processed by PMD driver. 1071 */ 1072 if (pvid_en_state_change && 1073 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1074 hns3_update_all_queues_pvid_proc_en(hw); 1075 1076 return 0; 1077 } 1078 1079 static int 1080 hns3_default_vlan_config(struct hns3_adapter *hns) 1081 { 1082 struct hns3_hw *hw = &hns->hw; 1083 int ret; 1084 1085 /* 1086 * When vlan filter is enabled, hardware regards packets without vlan 1087 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1088 * table, packets without vlan won't be received. So, add vlan 0 as 1089 * the default vlan. 1090 */ 1091 ret = hns3_vlan_filter_configure(hns, 0, 1); 1092 if (ret) 1093 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1094 return ret; 1095 } 1096 1097 static int 1098 hns3_init_vlan_config(struct hns3_adapter *hns) 1099 { 1100 struct hns3_hw *hw = &hns->hw; 1101 int ret; 1102 1103 /* 1104 * This function can be called in the initialization and reset process, 1105 * when in reset process, it means that hardware had been reseted 1106 * successfully and we need to restore the hardware configuration to 1107 * ensure that the hardware configuration remains unchanged before and 1108 * after reset. 1109 */ 1110 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1111 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1112 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1113 } 1114 1115 ret = hns3_vlan_filter_init(hns); 1116 if (ret) { 1117 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1118 return ret; 1119 } 1120 1121 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1122 RTE_ETHER_TYPE_VLAN); 1123 if (ret) { 1124 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1125 return ret; 1126 } 1127 1128 /* 1129 * When in the reinit dev stage of the reset process, the following 1130 * vlan-related configurations may differ from those at initialization, 1131 * we will restore configurations to hardware in hns3_restore_vlan_table 1132 * and hns3_restore_vlan_conf later. 1133 */ 1134 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1135 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1136 if (ret) { 1137 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 ret = hns3_en_hw_strip_rxvtag(hns, false); 1142 if (ret) { 1143 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1144 ret); 1145 return ret; 1146 } 1147 } 1148 1149 return hns3_default_vlan_config(hns); 1150 } 1151 1152 static int 1153 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1154 { 1155 struct hns3_pf *pf = &hns->pf; 1156 struct hns3_hw *hw = &hns->hw; 1157 uint64_t offloads; 1158 bool enable; 1159 int ret; 1160 1161 if (!hw->data->promiscuous) { 1162 /* restore vlan filter states */ 1163 offloads = hw->data->dev_conf.rxmode.offloads; 1164 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1165 ret = hns3_enable_vlan_filter(hns, enable); 1166 if (ret) { 1167 hns3_err(hw, "failed to restore vlan rx filter conf, " 1168 "ret = %d", ret); 1169 return ret; 1170 } 1171 } 1172 1173 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1174 if (ret) { 1175 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1176 return ret; 1177 } 1178 1179 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1180 if (ret) 1181 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1182 1183 return ret; 1184 } 1185 1186 static int 1187 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1188 { 1189 struct hns3_adapter *hns = dev->data->dev_private; 1190 struct rte_eth_dev_data *data = dev->data; 1191 struct rte_eth_txmode *txmode; 1192 struct hns3_hw *hw = &hns->hw; 1193 int mask; 1194 int ret; 1195 1196 txmode = &data->dev_conf.txmode; 1197 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1198 hns3_warn(hw, 1199 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1200 "configuration is not supported! Ignore these two " 1201 "parameters: hw_vlan_reject_tagged(%u), " 1202 "hw_vlan_reject_untagged(%u)", 1203 txmode->hw_vlan_reject_tagged, 1204 txmode->hw_vlan_reject_untagged); 1205 1206 /* Apply vlan offload setting */ 1207 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1208 ret = hns3_vlan_offload_set(dev, mask); 1209 if (ret) { 1210 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1211 ret); 1212 return ret; 1213 } 1214 1215 /* 1216 * If pvid config is not set in rte_eth_conf, driver needn't to set 1217 * VLAN pvid related configuration to hardware. 1218 */ 1219 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1220 return 0; 1221 1222 /* Apply pvid setting */ 1223 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1224 txmode->hw_vlan_insert_pvid); 1225 if (ret) 1226 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1227 txmode->pvid, ret); 1228 1229 return ret; 1230 } 1231 1232 static int 1233 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1234 unsigned int tso_mss_max) 1235 { 1236 struct hns3_cfg_tso_status_cmd *req; 1237 struct hns3_cmd_desc desc; 1238 uint16_t tso_mss; 1239 1240 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1241 1242 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1243 1244 tso_mss = 0; 1245 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1246 tso_mss_min); 1247 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1248 1249 tso_mss = 0; 1250 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1251 tso_mss_max); 1252 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1253 1254 return hns3_cmd_send(hw, &desc, 1); 1255 } 1256 1257 static int 1258 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1259 uint16_t *allocated_size, bool is_alloc) 1260 { 1261 struct hns3_umv_spc_alc_cmd *req; 1262 struct hns3_cmd_desc desc; 1263 int ret; 1264 1265 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1267 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1268 req->space_size = rte_cpu_to_le_32(space_size); 1269 1270 ret = hns3_cmd_send(hw, &desc, 1); 1271 if (ret) { 1272 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1273 is_alloc ? "allocate" : "free", ret); 1274 return ret; 1275 } 1276 1277 if (is_alloc && allocated_size) 1278 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1279 1280 return 0; 1281 } 1282 1283 static int 1284 hns3_init_umv_space(struct hns3_hw *hw) 1285 { 1286 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1287 struct hns3_pf *pf = &hns->pf; 1288 uint16_t allocated_size = 0; 1289 int ret; 1290 1291 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1292 true); 1293 if (ret) 1294 return ret; 1295 1296 if (allocated_size < pf->wanted_umv_size) 1297 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1298 pf->wanted_umv_size, allocated_size); 1299 1300 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1301 pf->wanted_umv_size; 1302 pf->used_umv_size = 0; 1303 return 0; 1304 } 1305 1306 static int 1307 hns3_uninit_umv_space(struct hns3_hw *hw) 1308 { 1309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1310 struct hns3_pf *pf = &hns->pf; 1311 int ret; 1312 1313 if (pf->max_umv_size == 0) 1314 return 0; 1315 1316 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1317 if (ret) 1318 return ret; 1319 1320 pf->max_umv_size = 0; 1321 1322 return 0; 1323 } 1324 1325 static bool 1326 hns3_is_umv_space_full(struct hns3_hw *hw) 1327 { 1328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1329 struct hns3_pf *pf = &hns->pf; 1330 bool is_full; 1331 1332 is_full = (pf->used_umv_size >= pf->max_umv_size); 1333 1334 return is_full; 1335 } 1336 1337 static void 1338 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1339 { 1340 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1341 struct hns3_pf *pf = &hns->pf; 1342 1343 if (is_free) { 1344 if (pf->used_umv_size > 0) 1345 pf->used_umv_size--; 1346 } else 1347 pf->used_umv_size++; 1348 } 1349 1350 static void 1351 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1352 const uint8_t *addr, bool is_mc) 1353 { 1354 const unsigned char *mac_addr = addr; 1355 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1356 ((uint32_t)mac_addr[2] << 16) | 1357 ((uint32_t)mac_addr[1] << 8) | 1358 (uint32_t)mac_addr[0]; 1359 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1360 1361 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1362 if (is_mc) { 1363 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1364 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1365 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1366 } 1367 1368 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1369 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1370 } 1371 1372 static int 1373 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1374 uint8_t resp_code, 1375 enum hns3_mac_vlan_tbl_opcode op) 1376 { 1377 if (cmdq_resp) { 1378 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1379 cmdq_resp); 1380 return -EIO; 1381 } 1382 1383 if (op == HNS3_MAC_VLAN_ADD) { 1384 if (resp_code == 0 || resp_code == 1) { 1385 return 0; 1386 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1387 hns3_err(hw, "add mac addr failed for uc_overflow"); 1388 return -ENOSPC; 1389 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1390 hns3_err(hw, "add mac addr failed for mc_overflow"); 1391 return -ENOSPC; 1392 } 1393 1394 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1395 resp_code); 1396 return -EIO; 1397 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1398 if (resp_code == 0) { 1399 return 0; 1400 } else if (resp_code == 1) { 1401 hns3_dbg(hw, "remove mac addr failed for miss"); 1402 return -ENOENT; 1403 } 1404 1405 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1406 resp_code); 1407 return -EIO; 1408 } else if (op == HNS3_MAC_VLAN_LKUP) { 1409 if (resp_code == 0) { 1410 return 0; 1411 } else if (resp_code == 1) { 1412 hns3_dbg(hw, "lookup mac addr failed for miss"); 1413 return -ENOENT; 1414 } 1415 1416 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1417 resp_code); 1418 return -EIO; 1419 } 1420 1421 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1422 op); 1423 1424 return -EINVAL; 1425 } 1426 1427 static int 1428 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1429 struct hns3_mac_vlan_tbl_entry_cmd *req, 1430 struct hns3_cmd_desc *desc, bool is_mc) 1431 { 1432 uint8_t resp_code; 1433 uint16_t retval; 1434 int ret; 1435 1436 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1437 if (is_mc) { 1438 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1439 memcpy(desc[0].data, req, 1440 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1441 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1442 true); 1443 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1444 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1445 true); 1446 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1447 } else { 1448 memcpy(desc[0].data, req, 1449 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1450 ret = hns3_cmd_send(hw, desc, 1); 1451 } 1452 if (ret) { 1453 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1454 ret); 1455 return ret; 1456 } 1457 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1458 retval = rte_le_to_cpu_16(desc[0].retval); 1459 1460 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1461 HNS3_MAC_VLAN_LKUP); 1462 } 1463 1464 static int 1465 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1466 struct hns3_mac_vlan_tbl_entry_cmd *req, 1467 struct hns3_cmd_desc *mc_desc) 1468 { 1469 uint8_t resp_code; 1470 uint16_t retval; 1471 int cfg_status; 1472 int ret; 1473 1474 if (mc_desc == NULL) { 1475 struct hns3_cmd_desc desc; 1476 1477 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1478 memcpy(desc.data, req, 1479 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1480 ret = hns3_cmd_send(hw, &desc, 1); 1481 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1482 retval = rte_le_to_cpu_16(desc.retval); 1483 1484 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1485 HNS3_MAC_VLAN_ADD); 1486 } else { 1487 hns3_cmd_reuse_desc(&mc_desc[0], false); 1488 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1489 hns3_cmd_reuse_desc(&mc_desc[1], false); 1490 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1491 hns3_cmd_reuse_desc(&mc_desc[2], false); 1492 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1493 memcpy(mc_desc[0].data, req, 1494 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1495 mc_desc[0].retval = 0; 1496 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1497 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1498 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1499 1500 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1501 HNS3_MAC_VLAN_ADD); 1502 } 1503 1504 if (ret) { 1505 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1506 return ret; 1507 } 1508 1509 return cfg_status; 1510 } 1511 1512 static int 1513 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1514 struct hns3_mac_vlan_tbl_entry_cmd *req) 1515 { 1516 struct hns3_cmd_desc desc; 1517 uint8_t resp_code; 1518 uint16_t retval; 1519 int ret; 1520 1521 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1522 1523 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1524 1525 ret = hns3_cmd_send(hw, &desc, 1); 1526 if (ret) { 1527 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1528 return ret; 1529 } 1530 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1531 retval = rte_le_to_cpu_16(desc.retval); 1532 1533 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1534 HNS3_MAC_VLAN_REMOVE); 1535 } 1536 1537 static int 1538 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1539 { 1540 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1541 struct hns3_mac_vlan_tbl_entry_cmd req; 1542 struct hns3_pf *pf = &hns->pf; 1543 struct hns3_cmd_desc desc[3]; 1544 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1545 uint16_t egress_port = 0; 1546 uint8_t vf_id; 1547 int ret; 1548 1549 /* check if mac addr is valid */ 1550 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1551 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1552 mac_addr); 1553 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1554 mac_str); 1555 return -EINVAL; 1556 } 1557 1558 memset(&req, 0, sizeof(req)); 1559 1560 /* 1561 * In current version VF is not supported when PF is driven by DPDK 1562 * driver, just need to configure parameters for PF vport. 1563 */ 1564 vf_id = HNS3_PF_FUNC_ID; 1565 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1566 HNS3_MAC_EPORT_VFID_S, vf_id); 1567 1568 req.egress_port = rte_cpu_to_le_16(egress_port); 1569 1570 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1571 1572 /* 1573 * Lookup the mac address in the mac_vlan table, and add 1574 * it if the entry is inexistent. Repeated unicast entry 1575 * is not allowed in the mac vlan table. 1576 */ 1577 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1578 if (ret == -ENOENT) { 1579 if (!hns3_is_umv_space_full(hw)) { 1580 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1581 if (!ret) 1582 hns3_update_umv_space(hw, false); 1583 return ret; 1584 } 1585 1586 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1587 1588 return -ENOSPC; 1589 } 1590 1591 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1592 1593 /* check if we just hit the duplicate */ 1594 if (ret == 0) { 1595 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1596 return 0; 1597 } 1598 1599 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1600 mac_str); 1601 1602 return ret; 1603 } 1604 1605 static int 1606 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1607 { 1608 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1609 struct rte_ether_addr *addr; 1610 int ret; 1611 int i; 1612 1613 for (i = 0; i < hw->mc_addrs_num; i++) { 1614 addr = &hw->mc_addrs[i]; 1615 /* Check if there are duplicate addresses */ 1616 if (rte_is_same_ether_addr(addr, mac_addr)) { 1617 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1618 addr); 1619 hns3_err(hw, "failed to add mc mac addr, same addrs" 1620 "(%s) is added by the set_mc_mac_addr_list " 1621 "API", mac_str); 1622 return -EINVAL; 1623 } 1624 } 1625 1626 ret = hns3_add_mc_addr(hw, mac_addr); 1627 if (ret) { 1628 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1629 mac_addr); 1630 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1631 mac_str, ret); 1632 } 1633 return ret; 1634 } 1635 1636 static int 1637 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1638 { 1639 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1640 int ret; 1641 1642 ret = hns3_remove_mc_addr(hw, mac_addr); 1643 if (ret) { 1644 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1645 mac_addr); 1646 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1647 mac_str, ret); 1648 } 1649 return ret; 1650 } 1651 1652 static int 1653 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1654 uint32_t idx, __rte_unused uint32_t pool) 1655 { 1656 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1657 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1658 int ret; 1659 1660 rte_spinlock_lock(&hw->lock); 1661 1662 /* 1663 * In hns3 network engine adding UC and MC mac address with different 1664 * commands with firmware. We need to determine whether the input 1665 * address is a UC or a MC address to call different commands. 1666 * By the way, it is recommended calling the API function named 1667 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1668 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1669 * may affect the specifications of UC mac addresses. 1670 */ 1671 if (rte_is_multicast_ether_addr(mac_addr)) 1672 ret = hns3_add_mc_addr_common(hw, mac_addr); 1673 else 1674 ret = hns3_add_uc_addr_common(hw, mac_addr); 1675 1676 if (ret) { 1677 rte_spinlock_unlock(&hw->lock); 1678 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1679 mac_addr); 1680 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1681 ret); 1682 return ret; 1683 } 1684 1685 if (idx == 0) 1686 hw->mac.default_addr_setted = true; 1687 rte_spinlock_unlock(&hw->lock); 1688 1689 return ret; 1690 } 1691 1692 static int 1693 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1694 { 1695 struct hns3_mac_vlan_tbl_entry_cmd req; 1696 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1697 int ret; 1698 1699 /* check if mac addr is valid */ 1700 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1701 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1702 mac_addr); 1703 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1704 mac_str); 1705 return -EINVAL; 1706 } 1707 1708 memset(&req, 0, sizeof(req)); 1709 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1710 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1711 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1712 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1713 return 0; 1714 else if (ret == 0) 1715 hns3_update_umv_space(hw, true); 1716 1717 return ret; 1718 } 1719 1720 static void 1721 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1722 { 1723 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1724 /* index will be checked by upper level rte interface */ 1725 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1726 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1727 int ret; 1728 1729 rte_spinlock_lock(&hw->lock); 1730 1731 if (rte_is_multicast_ether_addr(mac_addr)) 1732 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1733 else 1734 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1735 rte_spinlock_unlock(&hw->lock); 1736 if (ret) { 1737 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1738 mac_addr); 1739 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1740 ret); 1741 } 1742 } 1743 1744 static int 1745 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1746 struct rte_ether_addr *mac_addr) 1747 { 1748 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1749 struct rte_ether_addr *oaddr; 1750 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1751 bool default_addr_setted; 1752 bool rm_succes = false; 1753 int ret, ret_val; 1754 1755 /* 1756 * It has been guaranteed that input parameter named mac_addr is valid 1757 * address in the rte layer of DPDK framework. 1758 */ 1759 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1760 default_addr_setted = hw->mac.default_addr_setted; 1761 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1762 return 0; 1763 1764 rte_spinlock_lock(&hw->lock); 1765 if (default_addr_setted) { 1766 ret = hns3_remove_uc_addr_common(hw, oaddr); 1767 if (ret) { 1768 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1769 oaddr); 1770 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1771 mac_str, ret); 1772 rm_succes = false; 1773 } else 1774 rm_succes = true; 1775 } 1776 1777 ret = hns3_add_uc_addr_common(hw, mac_addr); 1778 if (ret) { 1779 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1780 mac_addr); 1781 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1782 goto err_add_uc_addr; 1783 } 1784 1785 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1786 if (ret) { 1787 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1788 goto err_pause_addr_cfg; 1789 } 1790 1791 rte_ether_addr_copy(mac_addr, 1792 (struct rte_ether_addr *)hw->mac.mac_addr); 1793 hw->mac.default_addr_setted = true; 1794 rte_spinlock_unlock(&hw->lock); 1795 1796 return 0; 1797 1798 err_pause_addr_cfg: 1799 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1800 if (ret_val) { 1801 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1802 mac_addr); 1803 hns3_warn(hw, 1804 "Failed to roll back to del setted mac addr(%s): %d", 1805 mac_str, ret_val); 1806 } 1807 1808 err_add_uc_addr: 1809 if (rm_succes) { 1810 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1811 if (ret_val) { 1812 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1813 oaddr); 1814 hns3_warn(hw, 1815 "Failed to restore old uc mac addr(%s): %d", 1816 mac_str, ret_val); 1817 hw->mac.default_addr_setted = false; 1818 } 1819 } 1820 rte_spinlock_unlock(&hw->lock); 1821 1822 return ret; 1823 } 1824 1825 static int 1826 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1827 { 1828 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1829 struct hns3_hw *hw = &hns->hw; 1830 struct rte_ether_addr *addr; 1831 int err = 0; 1832 int ret; 1833 int i; 1834 1835 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1836 addr = &hw->data->mac_addrs[i]; 1837 if (rte_is_zero_ether_addr(addr)) 1838 continue; 1839 if (rte_is_multicast_ether_addr(addr)) 1840 ret = del ? hns3_remove_mc_addr(hw, addr) : 1841 hns3_add_mc_addr(hw, addr); 1842 else 1843 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1844 hns3_add_uc_addr_common(hw, addr); 1845 1846 if (ret) { 1847 err = ret; 1848 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1849 addr); 1850 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1851 "ret = %d.", del ? "remove" : "restore", 1852 mac_str, i, ret); 1853 } 1854 } 1855 return err; 1856 } 1857 1858 static void 1859 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1860 { 1861 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1862 uint8_t word_num; 1863 uint8_t bit_num; 1864 1865 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1866 word_num = vfid / 32; 1867 bit_num = vfid % 32; 1868 if (clr) 1869 desc[1].data[word_num] &= 1870 rte_cpu_to_le_32(~(1UL << bit_num)); 1871 else 1872 desc[1].data[word_num] |= 1873 rte_cpu_to_le_32(1UL << bit_num); 1874 } else { 1875 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1876 bit_num = vfid % 32; 1877 if (clr) 1878 desc[2].data[word_num] &= 1879 rte_cpu_to_le_32(~(1UL << bit_num)); 1880 else 1881 desc[2].data[word_num] |= 1882 rte_cpu_to_le_32(1UL << bit_num); 1883 } 1884 } 1885 1886 static int 1887 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1888 { 1889 struct hns3_mac_vlan_tbl_entry_cmd req; 1890 struct hns3_cmd_desc desc[3]; 1891 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1892 uint8_t vf_id; 1893 int ret; 1894 1895 /* Check if mac addr is valid */ 1896 if (!rte_is_multicast_ether_addr(mac_addr)) { 1897 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1898 mac_addr); 1899 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1900 mac_str); 1901 return -EINVAL; 1902 } 1903 1904 memset(&req, 0, sizeof(req)); 1905 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1906 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1907 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1908 if (ret) { 1909 /* This mac addr do not exist, add new entry for it */ 1910 memset(desc[0].data, 0, sizeof(desc[0].data)); 1911 memset(desc[1].data, 0, sizeof(desc[0].data)); 1912 memset(desc[2].data, 0, sizeof(desc[0].data)); 1913 } 1914 1915 /* 1916 * In current version VF is not supported when PF is driven by DPDK 1917 * driver, just need to configure parameters for PF vport. 1918 */ 1919 vf_id = HNS3_PF_FUNC_ID; 1920 hns3_update_desc_vfid(desc, vf_id, false); 1921 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1922 if (ret) { 1923 if (ret == -ENOSPC) 1924 hns3_err(hw, "mc mac vlan table is full"); 1925 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1926 mac_addr); 1927 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1928 } 1929 1930 return ret; 1931 } 1932 1933 static int 1934 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1935 { 1936 struct hns3_mac_vlan_tbl_entry_cmd req; 1937 struct hns3_cmd_desc desc[3]; 1938 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1939 uint8_t vf_id; 1940 int ret; 1941 1942 /* Check if mac addr is valid */ 1943 if (!rte_is_multicast_ether_addr(mac_addr)) { 1944 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1945 mac_addr); 1946 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1947 mac_str); 1948 return -EINVAL; 1949 } 1950 1951 memset(&req, 0, sizeof(req)); 1952 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1953 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1954 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1955 if (ret == 0) { 1956 /* 1957 * This mac addr exist, remove this handle's VFID for it. 1958 * In current version VF is not supported when PF is driven by 1959 * DPDK driver, just need to configure parameters for PF vport. 1960 */ 1961 vf_id = HNS3_PF_FUNC_ID; 1962 hns3_update_desc_vfid(desc, vf_id, true); 1963 1964 /* All the vfid is zero, so need to delete this entry */ 1965 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1966 } else if (ret == -ENOENT) { 1967 /* This mac addr doesn't exist. */ 1968 return 0; 1969 } 1970 1971 if (ret) { 1972 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1973 mac_addr); 1974 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1975 } 1976 1977 return ret; 1978 } 1979 1980 static int 1981 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1982 struct rte_ether_addr *mc_addr_set, 1983 uint32_t nb_mc_addr) 1984 { 1985 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1986 struct rte_ether_addr *addr; 1987 uint32_t i; 1988 uint32_t j; 1989 1990 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1991 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1992 "invalid. valid range: 0~%d", 1993 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1994 return -EINVAL; 1995 } 1996 1997 /* Check if input mac addresses are valid */ 1998 for (i = 0; i < nb_mc_addr; i++) { 1999 addr = &mc_addr_set[i]; 2000 if (!rte_is_multicast_ether_addr(addr)) { 2001 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2002 addr); 2003 hns3_err(hw, 2004 "failed to set mc mac addr, addr(%s) invalid.", 2005 mac_str); 2006 return -EINVAL; 2007 } 2008 2009 /* Check if there are duplicate addresses */ 2010 for (j = i + 1; j < nb_mc_addr; j++) { 2011 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2012 hns3_ether_format_addr(mac_str, 2013 RTE_ETHER_ADDR_FMT_SIZE, 2014 addr); 2015 hns3_err(hw, "failed to set mc mac addr, " 2016 "addrs invalid. two same addrs(%s).", 2017 mac_str); 2018 return -EINVAL; 2019 } 2020 } 2021 2022 /* 2023 * Check if there are duplicate addresses between mac_addrs 2024 * and mc_addr_set 2025 */ 2026 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 2027 if (rte_is_same_ether_addr(addr, 2028 &hw->data->mac_addrs[j])) { 2029 hns3_ether_format_addr(mac_str, 2030 RTE_ETHER_ADDR_FMT_SIZE, 2031 addr); 2032 hns3_err(hw, "failed to set mc mac addr, " 2033 "addrs invalid. addrs(%s) has already " 2034 "configured in mac_addr add API", 2035 mac_str); 2036 return -EINVAL; 2037 } 2038 } 2039 } 2040 2041 return 0; 2042 } 2043 2044 static void 2045 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2046 struct rte_ether_addr *mc_addr_set, 2047 int mc_addr_num, 2048 struct rte_ether_addr *reserved_addr_list, 2049 int *reserved_addr_num, 2050 struct rte_ether_addr *add_addr_list, 2051 int *add_addr_num, 2052 struct rte_ether_addr *rm_addr_list, 2053 int *rm_addr_num) 2054 { 2055 struct rte_ether_addr *addr; 2056 int current_addr_num; 2057 int reserved_num = 0; 2058 int add_num = 0; 2059 int rm_num = 0; 2060 int num; 2061 int i; 2062 int j; 2063 bool same_addr; 2064 2065 /* Calculate the mc mac address list that should be removed */ 2066 current_addr_num = hw->mc_addrs_num; 2067 for (i = 0; i < current_addr_num; i++) { 2068 addr = &hw->mc_addrs[i]; 2069 same_addr = false; 2070 for (j = 0; j < mc_addr_num; j++) { 2071 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2072 same_addr = true; 2073 break; 2074 } 2075 } 2076 2077 if (!same_addr) { 2078 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2079 rm_num++; 2080 } else { 2081 rte_ether_addr_copy(addr, 2082 &reserved_addr_list[reserved_num]); 2083 reserved_num++; 2084 } 2085 } 2086 2087 /* Calculate the mc mac address list that should be added */ 2088 for (i = 0; i < mc_addr_num; i++) { 2089 addr = &mc_addr_set[i]; 2090 same_addr = false; 2091 for (j = 0; j < current_addr_num; j++) { 2092 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2093 same_addr = true; 2094 break; 2095 } 2096 } 2097 2098 if (!same_addr) { 2099 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2100 add_num++; 2101 } 2102 } 2103 2104 /* Reorder the mc mac address list maintained by driver */ 2105 for (i = 0; i < reserved_num; i++) 2106 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2107 2108 for (i = 0; i < rm_num; i++) { 2109 num = reserved_num + i; 2110 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2111 } 2112 2113 *reserved_addr_num = reserved_num; 2114 *add_addr_num = add_num; 2115 *rm_addr_num = rm_num; 2116 } 2117 2118 static int 2119 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2120 struct rte_ether_addr *mc_addr_set, 2121 uint32_t nb_mc_addr) 2122 { 2123 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2124 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2125 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2126 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2127 struct rte_ether_addr *addr; 2128 int reserved_addr_num; 2129 int add_addr_num; 2130 int rm_addr_num; 2131 int mc_addr_num; 2132 int num; 2133 int ret; 2134 int i; 2135 2136 /* Check if input parameters are valid */ 2137 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2138 if (ret) 2139 return ret; 2140 2141 rte_spinlock_lock(&hw->lock); 2142 2143 /* 2144 * Calculate the mc mac address lists those should be removed and be 2145 * added, Reorder the mc mac address list maintained by driver. 2146 */ 2147 mc_addr_num = (int)nb_mc_addr; 2148 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2149 reserved_addr_list, &reserved_addr_num, 2150 add_addr_list, &add_addr_num, 2151 rm_addr_list, &rm_addr_num); 2152 2153 /* Remove mc mac addresses */ 2154 for (i = 0; i < rm_addr_num; i++) { 2155 num = rm_addr_num - i - 1; 2156 addr = &rm_addr_list[num]; 2157 ret = hns3_remove_mc_addr(hw, addr); 2158 if (ret) { 2159 rte_spinlock_unlock(&hw->lock); 2160 return ret; 2161 } 2162 hw->mc_addrs_num--; 2163 } 2164 2165 /* Add mc mac addresses */ 2166 for (i = 0; i < add_addr_num; i++) { 2167 addr = &add_addr_list[i]; 2168 ret = hns3_add_mc_addr(hw, addr); 2169 if (ret) { 2170 rte_spinlock_unlock(&hw->lock); 2171 return ret; 2172 } 2173 2174 num = reserved_addr_num + i; 2175 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2176 hw->mc_addrs_num++; 2177 } 2178 rte_spinlock_unlock(&hw->lock); 2179 2180 return 0; 2181 } 2182 2183 static int 2184 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2185 { 2186 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2187 struct hns3_hw *hw = &hns->hw; 2188 struct rte_ether_addr *addr; 2189 int err = 0; 2190 int ret; 2191 int i; 2192 2193 for (i = 0; i < hw->mc_addrs_num; i++) { 2194 addr = &hw->mc_addrs[i]; 2195 if (!rte_is_multicast_ether_addr(addr)) 2196 continue; 2197 if (del) 2198 ret = hns3_remove_mc_addr(hw, addr); 2199 else 2200 ret = hns3_add_mc_addr(hw, addr); 2201 if (ret) { 2202 err = ret; 2203 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2204 addr); 2205 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2206 del ? "Remove" : "Restore", mac_str, ret); 2207 } 2208 } 2209 return err; 2210 } 2211 2212 static int 2213 hns3_check_mq_mode(struct rte_eth_dev *dev) 2214 { 2215 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2216 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2217 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2218 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2219 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2220 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2221 uint8_t num_tc; 2222 int max_tc = 0; 2223 int i; 2224 2225 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2226 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2227 2228 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2229 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. " 2230 "rx_mq_mode = %d", rx_mq_mode); 2231 return -EINVAL; 2232 } 2233 2234 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB || 2235 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2236 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB " 2237 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d", 2238 rx_mq_mode, tx_mq_mode); 2239 return -EINVAL; 2240 } 2241 2242 if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) { 2243 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2244 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2245 dcb_rx_conf->nb_tcs, pf->tc_max); 2246 return -EINVAL; 2247 } 2248 2249 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2250 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2251 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2252 "nb_tcs(%d) != %d or %d in rx direction.", 2253 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2254 return -EINVAL; 2255 } 2256 2257 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2258 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2259 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2260 return -EINVAL; 2261 } 2262 2263 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2264 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2265 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2266 "is not equal to one in tx direction.", 2267 i, dcb_rx_conf->dcb_tc[i]); 2268 return -EINVAL; 2269 } 2270 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2271 max_tc = dcb_rx_conf->dcb_tc[i]; 2272 } 2273 2274 num_tc = max_tc + 1; 2275 if (num_tc > dcb_rx_conf->nb_tcs) { 2276 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2277 num_tc, dcb_rx_conf->nb_tcs); 2278 return -EINVAL; 2279 } 2280 } 2281 2282 return 0; 2283 } 2284 2285 static int 2286 hns3_check_dcb_cfg(struct rte_eth_dev *dev) 2287 { 2288 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2289 2290 if (!hns3_dev_dcb_supported(hw)) { 2291 hns3_err(hw, "this port does not support dcb configurations."); 2292 return -EOPNOTSUPP; 2293 } 2294 2295 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2296 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2297 return -EOPNOTSUPP; 2298 } 2299 2300 /* Check multiple queue mode */ 2301 return hns3_check_mq_mode(dev); 2302 } 2303 2304 static int 2305 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2306 enum hns3_ring_type queue_type, uint16_t queue_id) 2307 { 2308 struct hns3_cmd_desc desc; 2309 struct hns3_ctrl_vector_chain_cmd *req = 2310 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2311 enum hns3_opcode_type op; 2312 uint16_t tqp_type_and_id = 0; 2313 uint16_t type; 2314 uint16_t gl; 2315 int ret; 2316 2317 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2318 hns3_cmd_setup_basic_desc(&desc, op, false); 2319 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2320 HNS3_TQP_INT_ID_L_S); 2321 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2322 HNS3_TQP_INT_ID_H_S); 2323 2324 if (queue_type == HNS3_RING_TYPE_RX) 2325 gl = HNS3_RING_GL_RX; 2326 else 2327 gl = HNS3_RING_GL_TX; 2328 2329 type = queue_type; 2330 2331 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2332 type); 2333 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2334 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2335 gl); 2336 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2337 req->int_cause_num = 1; 2338 ret = hns3_cmd_send(hw, &desc, 1); 2339 if (ret) { 2340 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2341 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2342 return ret; 2343 } 2344 2345 return 0; 2346 } 2347 2348 static int 2349 hns3_init_ring_with_vector(struct hns3_hw *hw) 2350 { 2351 uint16_t vec; 2352 int ret; 2353 int i; 2354 2355 /* 2356 * In hns3 network engine, vector 0 is always the misc interrupt of this 2357 * function, vector 1~N can be used respectively for the queues of the 2358 * function. Tx and Rx queues with the same number share the interrupt 2359 * vector. In the initialization clearing the all hardware mapping 2360 * relationship configurations between queues and interrupt vectors is 2361 * needed, so some error caused by the residual configurations, such as 2362 * the unexpected Tx interrupt, can be avoid. 2363 */ 2364 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2365 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2366 vec = vec - 1; /* the last interrupt is reserved */ 2367 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2368 for (i = 0; i < hw->intr_tqps_num; i++) { 2369 /* 2370 * Set gap limiter/rate limiter/quanity limiter algorithm 2371 * configuration for interrupt coalesce of queue's interrupt. 2372 */ 2373 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2374 HNS3_TQP_INTR_GL_DEFAULT); 2375 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2376 HNS3_TQP_INTR_GL_DEFAULT); 2377 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2378 /* 2379 * QL(quantity limiter) is not used currently, just set 0 to 2380 * close it. 2381 */ 2382 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2383 2384 ret = hns3_bind_ring_with_vector(hw, vec, false, 2385 HNS3_RING_TYPE_TX, i); 2386 if (ret) { 2387 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2388 "vector: %u, ret=%d", i, vec, ret); 2389 return ret; 2390 } 2391 2392 ret = hns3_bind_ring_with_vector(hw, vec, false, 2393 HNS3_RING_TYPE_RX, i); 2394 if (ret) { 2395 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2396 "vector: %u, ret=%d", i, vec, ret); 2397 return ret; 2398 } 2399 } 2400 2401 return 0; 2402 } 2403 2404 static int 2405 hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf) 2406 { 2407 struct hns3_adapter *hns = dev->data->dev_private; 2408 struct hns3_hw *hw = &hns->hw; 2409 uint32_t max_rx_pkt_len; 2410 uint16_t mtu; 2411 int ret; 2412 2413 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) 2414 return 0; 2415 2416 /* 2417 * If jumbo frames are enabled, MTU needs to be refreshed 2418 * according to the maximum RX packet length. 2419 */ 2420 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 2421 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 2422 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 2423 hns3_err(hw, "maximum Rx packet length must be greater than %u " 2424 "and no more than %u when jumbo frame enabled.", 2425 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 2426 (uint16_t)HNS3_MAX_FRAME_LEN); 2427 return -EINVAL; 2428 } 2429 2430 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 2431 ret = hns3_dev_mtu_set(dev, mtu); 2432 if (ret) 2433 return ret; 2434 dev->data->mtu = mtu; 2435 2436 return 0; 2437 } 2438 2439 static int 2440 hns3_dev_configure(struct rte_eth_dev *dev) 2441 { 2442 struct hns3_adapter *hns = dev->data->dev_private; 2443 struct rte_eth_conf *conf = &dev->data->dev_conf; 2444 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2445 struct hns3_hw *hw = &hns->hw; 2446 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2447 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2448 struct rte_eth_rss_conf rss_conf; 2449 bool gro_en; 2450 int ret; 2451 2452 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2453 2454 /* 2455 * Some versions of hardware network engine does not support 2456 * individually enable/disable/reset the Tx or Rx queue. These devices 2457 * must enable/disable/reset Tx and Rx queues at the same time. When the 2458 * numbers of Tx queues allocated by upper applications are not equal to 2459 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2460 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2461 * work as usual. But these fake queues are imperceptible, and can not 2462 * be used by upper applications. 2463 */ 2464 if (!hns3_dev_indep_txrx_supported(hw)) { 2465 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2466 if (ret) { 2467 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", 2468 ret); 2469 return ret; 2470 } 2471 } 2472 2473 hw->adapter_state = HNS3_NIC_CONFIGURING; 2474 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2475 ret = hns3_check_dcb_cfg(dev); 2476 if (ret) 2477 goto cfg_err; 2478 } 2479 2480 /* When RSS is not configured, redirect the packet queue 0 */ 2481 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2482 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2483 rss_conf = conf->rx_adv_conf.rss_conf; 2484 hw->rss_dis_flag = false; 2485 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2486 if (ret) 2487 goto cfg_err; 2488 } 2489 2490 ret = hns3_refresh_mtu(dev, conf); 2491 if (ret) 2492 goto cfg_err; 2493 2494 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2495 if (ret) 2496 goto cfg_err; 2497 2498 ret = hns3_dev_configure_vlan(dev); 2499 if (ret) 2500 goto cfg_err; 2501 2502 /* config hardware GRO */ 2503 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2504 ret = hns3_config_gro(hw, gro_en); 2505 if (ret) 2506 goto cfg_err; 2507 2508 hns3_init_rx_ptype_tble(dev); 2509 hw->adapter_state = HNS3_NIC_CONFIGURED; 2510 2511 return 0; 2512 2513 cfg_err: 2514 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2515 hw->adapter_state = HNS3_NIC_INITIALIZED; 2516 2517 return ret; 2518 } 2519 2520 static int 2521 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2522 { 2523 struct hns3_config_max_frm_size_cmd *req; 2524 struct hns3_cmd_desc desc; 2525 2526 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2527 2528 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2529 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2530 req->min_frm_size = RTE_ETHER_MIN_LEN; 2531 2532 return hns3_cmd_send(hw, &desc, 1); 2533 } 2534 2535 static int 2536 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2537 { 2538 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2539 uint16_t original_mps = hns->pf.mps; 2540 int err; 2541 int ret; 2542 2543 ret = hns3_set_mac_mtu(hw, mps); 2544 if (ret) { 2545 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2546 return ret; 2547 } 2548 2549 hns->pf.mps = mps; 2550 ret = hns3_buffer_alloc(hw); 2551 if (ret) { 2552 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2553 goto rollback; 2554 } 2555 2556 return 0; 2557 2558 rollback: 2559 err = hns3_set_mac_mtu(hw, original_mps); 2560 if (err) { 2561 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2562 return ret; 2563 } 2564 hns->pf.mps = original_mps; 2565 2566 return ret; 2567 } 2568 2569 static int 2570 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2571 { 2572 struct hns3_adapter *hns = dev->data->dev_private; 2573 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2574 struct hns3_hw *hw = &hns->hw; 2575 bool is_jumbo_frame; 2576 int ret; 2577 2578 if (dev->data->dev_started) { 2579 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2580 "before configuration", dev->data->port_id); 2581 return -EBUSY; 2582 } 2583 2584 rte_spinlock_lock(&hw->lock); 2585 is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; 2586 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2587 2588 /* 2589 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2590 * assign to "uint16_t" type variable. 2591 */ 2592 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2593 if (ret) { 2594 rte_spinlock_unlock(&hw->lock); 2595 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2596 dev->data->port_id, mtu, ret); 2597 return ret; 2598 } 2599 2600 if (is_jumbo_frame) 2601 dev->data->dev_conf.rxmode.offloads |= 2602 DEV_RX_OFFLOAD_JUMBO_FRAME; 2603 else 2604 dev->data->dev_conf.rxmode.offloads &= 2605 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2606 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2607 rte_spinlock_unlock(&hw->lock); 2608 2609 return 0; 2610 } 2611 2612 static uint32_t 2613 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2614 { 2615 uint32_t speed_capa = 0; 2616 2617 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2618 speed_capa |= ETH_LINK_SPEED_10M_HD; 2619 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2620 speed_capa |= ETH_LINK_SPEED_10M; 2621 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2622 speed_capa |= ETH_LINK_SPEED_100M_HD; 2623 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2624 speed_capa |= ETH_LINK_SPEED_100M; 2625 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2626 speed_capa |= ETH_LINK_SPEED_1G; 2627 2628 return speed_capa; 2629 } 2630 2631 static uint32_t 2632 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2633 { 2634 uint32_t speed_capa = 0; 2635 2636 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2637 speed_capa |= ETH_LINK_SPEED_1G; 2638 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2639 speed_capa |= ETH_LINK_SPEED_10G; 2640 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2641 speed_capa |= ETH_LINK_SPEED_25G; 2642 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2643 speed_capa |= ETH_LINK_SPEED_40G; 2644 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2645 speed_capa |= ETH_LINK_SPEED_50G; 2646 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2647 speed_capa |= ETH_LINK_SPEED_100G; 2648 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2649 speed_capa |= ETH_LINK_SPEED_200G; 2650 2651 return speed_capa; 2652 } 2653 2654 static uint32_t 2655 hns3_get_speed_capa(struct hns3_hw *hw) 2656 { 2657 struct hns3_mac *mac = &hw->mac; 2658 uint32_t speed_capa; 2659 2660 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2661 speed_capa = 2662 hns3_get_copper_port_speed_capa(mac->supported_speed); 2663 else 2664 speed_capa = 2665 hns3_get_firber_port_speed_capa(mac->supported_speed); 2666 2667 if (mac->support_autoneg == 0) 2668 speed_capa |= ETH_LINK_SPEED_FIXED; 2669 2670 return speed_capa; 2671 } 2672 2673 int 2674 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2675 { 2676 struct hns3_adapter *hns = eth_dev->data->dev_private; 2677 struct hns3_hw *hw = &hns->hw; 2678 uint16_t queue_num = hw->tqps_num; 2679 2680 /* 2681 * In interrupt mode, 'max_rx_queues' is set based on the number of 2682 * MSI-X interrupt resources of the hardware. 2683 */ 2684 if (hw->data->dev_conf.intr_conf.rxq == 1) 2685 queue_num = hw->intr_tqps_num; 2686 2687 info->max_rx_queues = queue_num; 2688 info->max_tx_queues = hw->tqps_num; 2689 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2690 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2691 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2692 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2693 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2694 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2695 DEV_RX_OFFLOAD_TCP_CKSUM | 2696 DEV_RX_OFFLOAD_UDP_CKSUM | 2697 DEV_RX_OFFLOAD_SCTP_CKSUM | 2698 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2699 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2700 DEV_RX_OFFLOAD_KEEP_CRC | 2701 DEV_RX_OFFLOAD_SCATTER | 2702 DEV_RX_OFFLOAD_VLAN_STRIP | 2703 DEV_RX_OFFLOAD_VLAN_FILTER | 2704 DEV_RX_OFFLOAD_JUMBO_FRAME | 2705 DEV_RX_OFFLOAD_RSS_HASH | 2706 DEV_RX_OFFLOAD_TCP_LRO); 2707 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2708 DEV_TX_OFFLOAD_IPV4_CKSUM | 2709 DEV_TX_OFFLOAD_TCP_CKSUM | 2710 DEV_TX_OFFLOAD_UDP_CKSUM | 2711 DEV_TX_OFFLOAD_SCTP_CKSUM | 2712 DEV_TX_OFFLOAD_MULTI_SEGS | 2713 DEV_TX_OFFLOAD_TCP_TSO | 2714 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2715 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2716 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2717 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2718 hns3_txvlan_cap_get(hw)); 2719 2720 if (hns3_dev_outer_udp_cksum_supported(hw)) 2721 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 2722 2723 if (hns3_dev_indep_txrx_supported(hw)) 2724 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2725 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2726 2727 if (hns3_dev_ptp_supported(hw)) 2728 info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 2729 2730 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2731 .nb_max = HNS3_MAX_RING_DESC, 2732 .nb_min = HNS3_MIN_RING_DESC, 2733 .nb_align = HNS3_ALIGN_RING_DESC, 2734 }; 2735 2736 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2737 .nb_max = HNS3_MAX_RING_DESC, 2738 .nb_min = HNS3_MIN_RING_DESC, 2739 .nb_align = HNS3_ALIGN_RING_DESC, 2740 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2741 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2742 }; 2743 2744 info->speed_capa = hns3_get_speed_capa(hw); 2745 info->default_rxconf = (struct rte_eth_rxconf) { 2746 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2747 /* 2748 * If there are no available Rx buffer descriptors, incoming 2749 * packets are always dropped by hardware based on hns3 network 2750 * engine. 2751 */ 2752 .rx_drop_en = 1, 2753 .offloads = 0, 2754 }; 2755 info->default_txconf = (struct rte_eth_txconf) { 2756 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2757 .offloads = 0, 2758 }; 2759 2760 info->vmdq_queue_num = 0; 2761 2762 info->reta_size = hw->rss_ind_tbl_size; 2763 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2764 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2765 2766 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2767 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2768 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2769 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2770 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2771 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2772 2773 return 0; 2774 } 2775 2776 static int 2777 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2778 size_t fw_size) 2779 { 2780 struct hns3_adapter *hns = eth_dev->data->dev_private; 2781 struct hns3_hw *hw = &hns->hw; 2782 uint32_t version = hw->fw_version; 2783 int ret; 2784 2785 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2786 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2787 HNS3_FW_VERSION_BYTE3_S), 2788 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2789 HNS3_FW_VERSION_BYTE2_S), 2790 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2791 HNS3_FW_VERSION_BYTE1_S), 2792 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2793 HNS3_FW_VERSION_BYTE0_S)); 2794 ret += 1; /* add the size of '\0' */ 2795 if (fw_size < (uint32_t)ret) 2796 return ret; 2797 else 2798 return 0; 2799 } 2800 2801 static int 2802 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2803 { 2804 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2805 int ret; 2806 2807 (void)hns3_update_link_status(hw); 2808 2809 ret = hns3_update_link_info(eth_dev); 2810 if (ret) 2811 hw->mac.link_status = ETH_LINK_DOWN; 2812 2813 return ret; 2814 } 2815 2816 static void 2817 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2818 struct rte_eth_link *new_link) 2819 { 2820 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2821 struct hns3_mac *mac = &hw->mac; 2822 2823 switch (mac->link_speed) { 2824 case ETH_SPEED_NUM_10M: 2825 case ETH_SPEED_NUM_100M: 2826 case ETH_SPEED_NUM_1G: 2827 case ETH_SPEED_NUM_10G: 2828 case ETH_SPEED_NUM_25G: 2829 case ETH_SPEED_NUM_40G: 2830 case ETH_SPEED_NUM_50G: 2831 case ETH_SPEED_NUM_100G: 2832 case ETH_SPEED_NUM_200G: 2833 new_link->link_speed = mac->link_speed; 2834 break; 2835 default: 2836 if (mac->link_status) 2837 new_link->link_speed = ETH_SPEED_NUM_UNKNOWN; 2838 else 2839 new_link->link_speed = ETH_SPEED_NUM_NONE; 2840 break; 2841 } 2842 2843 new_link->link_duplex = mac->link_duplex; 2844 new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2845 new_link->link_autoneg = mac->link_autoneg; 2846 } 2847 2848 static int 2849 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2850 { 2851 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2852 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2853 2854 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2855 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2856 struct hns3_mac *mac = &hw->mac; 2857 struct rte_eth_link new_link; 2858 int ret; 2859 2860 do { 2861 ret = hns3_update_port_link_info(eth_dev); 2862 if (ret) { 2863 hns3_err(hw, "failed to get port link info, ret = %d.", 2864 ret); 2865 break; 2866 } 2867 2868 if (!wait_to_complete || mac->link_status == ETH_LINK_UP) 2869 break; 2870 2871 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2872 } while (retry_cnt--); 2873 2874 memset(&new_link, 0, sizeof(new_link)); 2875 hns3_setup_linkstatus(eth_dev, &new_link); 2876 2877 return rte_eth_linkstatus_set(eth_dev, &new_link); 2878 } 2879 2880 static int 2881 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2882 { 2883 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2884 struct hns3_pf *pf = &hns->pf; 2885 2886 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2887 return -EINVAL; 2888 2889 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2890 2891 return 0; 2892 } 2893 2894 static int 2895 hns3_query_function_status(struct hns3_hw *hw) 2896 { 2897 #define HNS3_QUERY_MAX_CNT 10 2898 #define HNS3_QUERY_SLEEP_MSCOEND 1 2899 struct hns3_func_status_cmd *req; 2900 struct hns3_cmd_desc desc; 2901 int timeout = 0; 2902 int ret; 2903 2904 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2905 req = (struct hns3_func_status_cmd *)desc.data; 2906 2907 do { 2908 ret = hns3_cmd_send(hw, &desc, 1); 2909 if (ret) { 2910 PMD_INIT_LOG(ERR, "query function status failed %d", 2911 ret); 2912 return ret; 2913 } 2914 2915 /* Check pf reset is done */ 2916 if (req->pf_state) 2917 break; 2918 2919 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2920 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2921 2922 return hns3_parse_func_status(hw, req); 2923 } 2924 2925 static int 2926 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2927 { 2928 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2929 struct hns3_pf *pf = &hns->pf; 2930 2931 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2932 /* 2933 * The total_tqps_num obtained from firmware is maximum tqp 2934 * numbers of this port, which should be used for PF and VFs. 2935 * There is no need for pf to have so many tqp numbers in 2936 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2937 * coming from config file, is assigned to maximum queue number 2938 * for the PF of this port by user. So users can modify the 2939 * maximum queue number of PF according to their own application 2940 * scenarios, which is more flexible to use. In addition, many 2941 * memories can be saved due to allocating queue statistics 2942 * room according to the actual number of queues required. The 2943 * maximum queue number of PF for network engine with 2944 * revision_id greater than 0x30 is assigned by config file. 2945 */ 2946 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2947 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2948 "must be greater than 0.", 2949 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2950 return -EINVAL; 2951 } 2952 2953 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2954 hw->total_tqps_num); 2955 } else { 2956 /* 2957 * Due to the limitation on the number of PF interrupts 2958 * available, the maximum queue number assigned to PF on 2959 * the network engine with revision_id 0x21 is 64. 2960 */ 2961 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2962 HNS3_MAX_TQP_NUM_HIP08_PF); 2963 } 2964 2965 return 0; 2966 } 2967 2968 static int 2969 hns3_query_pf_resource(struct hns3_hw *hw) 2970 { 2971 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2972 struct hns3_pf *pf = &hns->pf; 2973 struct hns3_pf_res_cmd *req; 2974 struct hns3_cmd_desc desc; 2975 int ret; 2976 2977 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2978 ret = hns3_cmd_send(hw, &desc, 1); 2979 if (ret) { 2980 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2981 return ret; 2982 } 2983 2984 req = (struct hns3_pf_res_cmd *)desc.data; 2985 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2986 rte_le_to_cpu_16(req->ext_tqp_num); 2987 ret = hns3_get_pf_max_tqp_num(hw); 2988 if (ret) 2989 return ret; 2990 2991 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2992 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2993 2994 if (req->tx_buf_size) 2995 pf->tx_buf_size = 2996 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2997 else 2998 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2999 3000 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 3001 3002 if (req->dv_buf_size) 3003 pf->dv_buf_size = 3004 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 3005 else 3006 pf->dv_buf_size = HNS3_DEFAULT_DV; 3007 3008 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 3009 3010 hw->num_msi = 3011 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 3012 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 3013 3014 return 0; 3015 } 3016 3017 static void 3018 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 3019 { 3020 struct hns3_cfg_param_cmd *req; 3021 uint64_t mac_addr_tmp_high; 3022 uint8_t ext_rss_size_max; 3023 uint64_t mac_addr_tmp; 3024 uint32_t i; 3025 3026 req = (struct hns3_cfg_param_cmd *)desc[0].data; 3027 3028 /* get the configuration */ 3029 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3030 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S); 3031 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3032 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3033 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3034 HNS3_CFG_TQP_DESC_N_M, 3035 HNS3_CFG_TQP_DESC_N_S); 3036 3037 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3038 HNS3_CFG_PHY_ADDR_M, 3039 HNS3_CFG_PHY_ADDR_S); 3040 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3041 HNS3_CFG_MEDIA_TP_M, 3042 HNS3_CFG_MEDIA_TP_S); 3043 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3044 HNS3_CFG_RX_BUF_LEN_M, 3045 HNS3_CFG_RX_BUF_LEN_S); 3046 /* get mac address */ 3047 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3048 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3049 HNS3_CFG_MAC_ADDR_H_M, 3050 HNS3_CFG_MAC_ADDR_H_S); 3051 3052 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3053 3054 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3055 HNS3_CFG_DEFAULT_SPEED_M, 3056 HNS3_CFG_DEFAULT_SPEED_S); 3057 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3058 HNS3_CFG_RSS_SIZE_M, 3059 HNS3_CFG_RSS_SIZE_S); 3060 3061 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3062 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3063 3064 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3065 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3066 3067 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3068 HNS3_CFG_SPEED_ABILITY_M, 3069 HNS3_CFG_SPEED_ABILITY_S); 3070 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3071 HNS3_CFG_UMV_TBL_SPACE_M, 3072 HNS3_CFG_UMV_TBL_SPACE_S); 3073 if (!cfg->umv_space) 3074 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3075 3076 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3077 HNS3_CFG_EXT_RSS_SIZE_M, 3078 HNS3_CFG_EXT_RSS_SIZE_S); 3079 3080 /* 3081 * Field ext_rss_size_max obtained from firmware will be more flexible 3082 * for future changes and expansions, which is an exponent of 2, instead 3083 * of reading out directly. If this field is not zero, hns3 PF PMD 3084 * driver uses it as rss_size_max under one TC. Device, whose revision 3085 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3086 * maximum number of queues supported under a TC through this field. 3087 */ 3088 if (ext_rss_size_max) 3089 cfg->rss_size_max = 1U << ext_rss_size_max; 3090 } 3091 3092 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3093 * @hw: pointer to struct hns3_hw 3094 * @hcfg: the config structure to be getted 3095 */ 3096 static int 3097 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3098 { 3099 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3100 struct hns3_cfg_param_cmd *req; 3101 uint32_t offset; 3102 uint32_t i; 3103 int ret; 3104 3105 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3106 offset = 0; 3107 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3108 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3109 true); 3110 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3111 i * HNS3_CFG_RD_LEN_BYTES); 3112 /* Len should be divided by 4 when send to hardware */ 3113 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3114 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3115 req->offset = rte_cpu_to_le_32(offset); 3116 } 3117 3118 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3119 if (ret) { 3120 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3121 return ret; 3122 } 3123 3124 hns3_parse_cfg(hcfg, desc); 3125 3126 return 0; 3127 } 3128 3129 static int 3130 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3131 { 3132 switch (speed_cmd) { 3133 case HNS3_CFG_SPEED_10M: 3134 *speed = ETH_SPEED_NUM_10M; 3135 break; 3136 case HNS3_CFG_SPEED_100M: 3137 *speed = ETH_SPEED_NUM_100M; 3138 break; 3139 case HNS3_CFG_SPEED_1G: 3140 *speed = ETH_SPEED_NUM_1G; 3141 break; 3142 case HNS3_CFG_SPEED_10G: 3143 *speed = ETH_SPEED_NUM_10G; 3144 break; 3145 case HNS3_CFG_SPEED_25G: 3146 *speed = ETH_SPEED_NUM_25G; 3147 break; 3148 case HNS3_CFG_SPEED_40G: 3149 *speed = ETH_SPEED_NUM_40G; 3150 break; 3151 case HNS3_CFG_SPEED_50G: 3152 *speed = ETH_SPEED_NUM_50G; 3153 break; 3154 case HNS3_CFG_SPEED_100G: 3155 *speed = ETH_SPEED_NUM_100G; 3156 break; 3157 case HNS3_CFG_SPEED_200G: 3158 *speed = ETH_SPEED_NUM_200G; 3159 break; 3160 default: 3161 return -EINVAL; 3162 } 3163 3164 return 0; 3165 } 3166 3167 static void 3168 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3169 { 3170 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3171 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3172 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3173 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3174 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3175 } 3176 3177 static void 3178 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3179 { 3180 struct hns3_dev_specs_0_cmd *req0; 3181 3182 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3183 3184 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3185 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3186 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3187 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3188 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3189 } 3190 3191 static int 3192 hns3_check_dev_specifications(struct hns3_hw *hw) 3193 { 3194 if (hw->rss_ind_tbl_size == 0 || 3195 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3196 hns3_err(hw, "the size of hash lookup table configured (%u)" 3197 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3198 HNS3_RSS_IND_TBL_SIZE_MAX); 3199 return -EINVAL; 3200 } 3201 3202 return 0; 3203 } 3204 3205 static int 3206 hns3_query_dev_specifications(struct hns3_hw *hw) 3207 { 3208 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3209 int ret; 3210 int i; 3211 3212 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3213 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3214 true); 3215 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3216 } 3217 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3218 3219 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3220 if (ret) 3221 return ret; 3222 3223 hns3_parse_dev_specifications(hw, desc); 3224 3225 return hns3_check_dev_specifications(hw); 3226 } 3227 3228 static int 3229 hns3_get_capability(struct hns3_hw *hw) 3230 { 3231 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3232 struct rte_pci_device *pci_dev; 3233 struct hns3_pf *pf = &hns->pf; 3234 struct rte_eth_dev *eth_dev; 3235 uint16_t device_id; 3236 uint8_t revision; 3237 int ret; 3238 3239 eth_dev = &rte_eth_devices[hw->data->port_id]; 3240 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3241 device_id = pci_dev->id.device_id; 3242 3243 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3244 device_id == HNS3_DEV_ID_50GE_RDMA || 3245 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3246 device_id == HNS3_DEV_ID_200G_RDMA) 3247 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3248 3249 /* Get PCI revision id */ 3250 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3251 HNS3_PCI_REVISION_ID); 3252 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3253 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3254 ret); 3255 return -EIO; 3256 } 3257 hw->revision = revision; 3258 3259 if (revision < PCI_REVISION_ID_HIP09_A) { 3260 hns3_set_default_dev_specifications(hw); 3261 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3262 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3263 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3264 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3265 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3266 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3267 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3268 hw->rss_info.ipv6_sctp_offload_supported = false; 3269 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3270 return 0; 3271 } 3272 3273 ret = hns3_query_dev_specifications(hw); 3274 if (ret) { 3275 PMD_INIT_LOG(ERR, 3276 "failed to query dev specifications, ret = %d", 3277 ret); 3278 return ret; 3279 } 3280 3281 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3282 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3283 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3284 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3285 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3286 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3287 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3288 hw->rss_info.ipv6_sctp_offload_supported = true; 3289 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3290 3291 return 0; 3292 } 3293 3294 static int 3295 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3296 { 3297 int ret; 3298 3299 switch (media_type) { 3300 case HNS3_MEDIA_TYPE_COPPER: 3301 if (!hns3_dev_copper_supported(hw)) { 3302 PMD_INIT_LOG(ERR, 3303 "Media type is copper, not supported."); 3304 ret = -EOPNOTSUPP; 3305 } else { 3306 ret = 0; 3307 } 3308 break; 3309 case HNS3_MEDIA_TYPE_FIBER: 3310 ret = 0; 3311 break; 3312 case HNS3_MEDIA_TYPE_BACKPLANE: 3313 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3314 ret = -EOPNOTSUPP; 3315 break; 3316 default: 3317 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3318 ret = -EINVAL; 3319 break; 3320 } 3321 3322 return ret; 3323 } 3324 3325 static int 3326 hns3_get_board_configuration(struct hns3_hw *hw) 3327 { 3328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3329 struct hns3_pf *pf = &hns->pf; 3330 struct hns3_cfg cfg; 3331 int ret; 3332 3333 ret = hns3_get_board_cfg(hw, &cfg); 3334 if (ret) { 3335 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3336 return ret; 3337 } 3338 3339 ret = hns3_check_media_type(hw, cfg.media_type); 3340 if (ret) 3341 return ret; 3342 3343 hw->mac.media_type = cfg.media_type; 3344 hw->rss_size_max = cfg.rss_size_max; 3345 hw->rss_dis_flag = false; 3346 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3347 hw->mac.phy_addr = cfg.phy_addr; 3348 hw->mac.default_addr_setted = false; 3349 hw->num_tx_desc = cfg.tqp_desc_num; 3350 hw->num_rx_desc = cfg.tqp_desc_num; 3351 hw->dcb_info.num_pg = 1; 3352 hw->dcb_info.hw_pfc_map = 0; 3353 3354 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3355 if (ret) { 3356 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3357 cfg.default_speed, ret); 3358 return ret; 3359 } 3360 3361 pf->tc_max = cfg.tc_num; 3362 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3363 PMD_INIT_LOG(WARNING, 3364 "Get TC num(%u) from flash, set TC num to 1", 3365 pf->tc_max); 3366 pf->tc_max = 1; 3367 } 3368 3369 /* Dev does not support DCB */ 3370 if (!hns3_dev_dcb_supported(hw)) { 3371 pf->tc_max = 1; 3372 pf->pfc_max = 0; 3373 } else 3374 pf->pfc_max = pf->tc_max; 3375 3376 hw->dcb_info.num_tc = 1; 3377 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3378 hw->tqps_num / hw->dcb_info.num_tc); 3379 hns3_set_bit(hw->hw_tc_map, 0, 1); 3380 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3381 3382 pf->wanted_umv_size = cfg.umv_space; 3383 3384 return ret; 3385 } 3386 3387 static int 3388 hns3_get_configuration(struct hns3_hw *hw) 3389 { 3390 int ret; 3391 3392 ret = hns3_query_function_status(hw); 3393 if (ret) { 3394 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3395 return ret; 3396 } 3397 3398 /* Get device capability */ 3399 ret = hns3_get_capability(hw); 3400 if (ret) { 3401 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3402 return ret; 3403 } 3404 3405 /* Get pf resource */ 3406 ret = hns3_query_pf_resource(hw); 3407 if (ret) { 3408 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3409 return ret; 3410 } 3411 3412 ret = hns3_get_board_configuration(hw); 3413 if (ret) { 3414 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3415 return ret; 3416 } 3417 3418 ret = hns3_query_dev_fec_info(hw); 3419 if (ret) 3420 PMD_INIT_LOG(ERR, 3421 "failed to query FEC information, ret = %d", ret); 3422 3423 return ret; 3424 } 3425 3426 static int 3427 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3428 uint16_t tqp_vid, bool is_pf) 3429 { 3430 struct hns3_tqp_map_cmd *req; 3431 struct hns3_cmd_desc desc; 3432 int ret; 3433 3434 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3435 3436 req = (struct hns3_tqp_map_cmd *)desc.data; 3437 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3438 req->tqp_vf = func_id; 3439 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3440 if (!is_pf) 3441 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3442 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3443 3444 ret = hns3_cmd_send(hw, &desc, 1); 3445 if (ret) 3446 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3447 3448 return ret; 3449 } 3450 3451 static int 3452 hns3_map_tqp(struct hns3_hw *hw) 3453 { 3454 int ret; 3455 int i; 3456 3457 /* 3458 * In current version, VF is not supported when PF is driven by DPDK 3459 * driver, so we assign total tqps_num tqps allocated to this port 3460 * to PF. 3461 */ 3462 for (i = 0; i < hw->total_tqps_num; i++) { 3463 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3464 if (ret) 3465 return ret; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static int 3472 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3473 { 3474 struct hns3_config_mac_speed_dup_cmd *req; 3475 struct hns3_cmd_desc desc; 3476 int ret; 3477 3478 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3479 3480 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3481 3482 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3483 3484 switch (speed) { 3485 case ETH_SPEED_NUM_10M: 3486 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3487 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3488 break; 3489 case ETH_SPEED_NUM_100M: 3490 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3491 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3492 break; 3493 case ETH_SPEED_NUM_1G: 3494 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3495 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3496 break; 3497 case ETH_SPEED_NUM_10G: 3498 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3499 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3500 break; 3501 case ETH_SPEED_NUM_25G: 3502 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3503 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3504 break; 3505 case ETH_SPEED_NUM_40G: 3506 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3507 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3508 break; 3509 case ETH_SPEED_NUM_50G: 3510 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3511 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3512 break; 3513 case ETH_SPEED_NUM_100G: 3514 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3515 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3516 break; 3517 case ETH_SPEED_NUM_200G: 3518 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3519 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3520 break; 3521 default: 3522 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3523 return -EINVAL; 3524 } 3525 3526 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3527 3528 ret = hns3_cmd_send(hw, &desc, 1); 3529 if (ret) 3530 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3531 3532 return ret; 3533 } 3534 3535 static int 3536 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3537 { 3538 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3539 struct hns3_pf *pf = &hns->pf; 3540 struct hns3_priv_buf *priv; 3541 uint32_t i, total_size; 3542 3543 total_size = pf->pkt_buf_size; 3544 3545 /* alloc tx buffer for all enabled tc */ 3546 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3547 priv = &buf_alloc->priv_buf[i]; 3548 3549 if (hw->hw_tc_map & BIT(i)) { 3550 if (total_size < pf->tx_buf_size) 3551 return -ENOMEM; 3552 3553 priv->tx_buf_size = pf->tx_buf_size; 3554 } else 3555 priv->tx_buf_size = 0; 3556 3557 total_size -= priv->tx_buf_size; 3558 } 3559 3560 return 0; 3561 } 3562 3563 static int 3564 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3565 { 3566 /* TX buffer size is unit by 128 byte */ 3567 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3568 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3569 struct hns3_tx_buff_alloc_cmd *req; 3570 struct hns3_cmd_desc desc; 3571 uint32_t buf_size; 3572 uint32_t i; 3573 int ret; 3574 3575 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3576 3577 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3578 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3579 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3580 3581 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3582 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3583 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3584 } 3585 3586 ret = hns3_cmd_send(hw, &desc, 1); 3587 if (ret) 3588 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3589 3590 return ret; 3591 } 3592 3593 static int 3594 hns3_get_tc_num(struct hns3_hw *hw) 3595 { 3596 int cnt = 0; 3597 uint8_t i; 3598 3599 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3600 if (hw->hw_tc_map & BIT(i)) 3601 cnt++; 3602 return cnt; 3603 } 3604 3605 static uint32_t 3606 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3607 { 3608 struct hns3_priv_buf *priv; 3609 uint32_t rx_priv = 0; 3610 int i; 3611 3612 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3613 priv = &buf_alloc->priv_buf[i]; 3614 if (priv->enable) 3615 rx_priv += priv->buf_size; 3616 } 3617 return rx_priv; 3618 } 3619 3620 static uint32_t 3621 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3622 { 3623 uint32_t total_tx_size = 0; 3624 uint32_t i; 3625 3626 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3627 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3628 3629 return total_tx_size; 3630 } 3631 3632 /* Get the number of pfc enabled TCs, which have private buffer */ 3633 static int 3634 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3635 { 3636 struct hns3_priv_buf *priv; 3637 int cnt = 0; 3638 uint8_t i; 3639 3640 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3641 priv = &buf_alloc->priv_buf[i]; 3642 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3643 cnt++; 3644 } 3645 3646 return cnt; 3647 } 3648 3649 /* Get the number of pfc disabled TCs, which have private buffer */ 3650 static int 3651 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3652 struct hns3_pkt_buf_alloc *buf_alloc) 3653 { 3654 struct hns3_priv_buf *priv; 3655 int cnt = 0; 3656 uint8_t i; 3657 3658 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3659 priv = &buf_alloc->priv_buf[i]; 3660 if (hw->hw_tc_map & BIT(i) && 3661 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3662 cnt++; 3663 } 3664 3665 return cnt; 3666 } 3667 3668 static bool 3669 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3670 uint32_t rx_all) 3671 { 3672 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3673 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3674 struct hns3_pf *pf = &hns->pf; 3675 uint32_t shared_buf, aligned_mps; 3676 uint32_t rx_priv; 3677 uint8_t tc_num; 3678 uint8_t i; 3679 3680 tc_num = hns3_get_tc_num(hw); 3681 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3682 3683 if (hns3_dev_dcb_supported(hw)) 3684 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3685 pf->dv_buf_size; 3686 else 3687 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3688 + pf->dv_buf_size; 3689 3690 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3691 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3692 HNS3_BUF_SIZE_UNIT); 3693 3694 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3695 if (rx_all < rx_priv + shared_std) 3696 return false; 3697 3698 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3699 buf_alloc->s_buf.buf_size = shared_buf; 3700 if (hns3_dev_dcb_supported(hw)) { 3701 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3702 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3703 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3704 HNS3_BUF_SIZE_UNIT); 3705 } else { 3706 buf_alloc->s_buf.self.high = 3707 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3708 buf_alloc->s_buf.self.low = aligned_mps; 3709 } 3710 3711 if (hns3_dev_dcb_supported(hw)) { 3712 hi_thrd = shared_buf - pf->dv_buf_size; 3713 3714 if (tc_num <= NEED_RESERVE_TC_NUM) 3715 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3716 BUF_MAX_PERCENT; 3717 3718 if (tc_num) 3719 hi_thrd = hi_thrd / tc_num; 3720 3721 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3722 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3723 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3724 } else { 3725 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3726 lo_thrd = aligned_mps; 3727 } 3728 3729 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3730 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3731 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3732 } 3733 3734 return true; 3735 } 3736 3737 static bool 3738 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3739 struct hns3_pkt_buf_alloc *buf_alloc) 3740 { 3741 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3742 struct hns3_pf *pf = &hns->pf; 3743 struct hns3_priv_buf *priv; 3744 uint32_t aligned_mps; 3745 uint32_t rx_all; 3746 uint8_t i; 3747 3748 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3749 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3750 3751 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3752 priv = &buf_alloc->priv_buf[i]; 3753 3754 priv->enable = 0; 3755 priv->wl.low = 0; 3756 priv->wl.high = 0; 3757 priv->buf_size = 0; 3758 3759 if (!(hw->hw_tc_map & BIT(i))) 3760 continue; 3761 3762 priv->enable = 1; 3763 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3764 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3765 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3766 HNS3_BUF_SIZE_UNIT); 3767 } else { 3768 priv->wl.low = 0; 3769 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3770 aligned_mps; 3771 } 3772 3773 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3774 } 3775 3776 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3777 } 3778 3779 static bool 3780 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3781 struct hns3_pkt_buf_alloc *buf_alloc) 3782 { 3783 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3784 struct hns3_pf *pf = &hns->pf; 3785 struct hns3_priv_buf *priv; 3786 int no_pfc_priv_num; 3787 uint32_t rx_all; 3788 uint8_t mask; 3789 int i; 3790 3791 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3792 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3793 3794 /* let the last to be cleared first */ 3795 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3796 priv = &buf_alloc->priv_buf[i]; 3797 mask = BIT((uint8_t)i); 3798 3799 if (hw->hw_tc_map & mask && 3800 !(hw->dcb_info.hw_pfc_map & mask)) { 3801 /* Clear the no pfc TC private buffer */ 3802 priv->wl.low = 0; 3803 priv->wl.high = 0; 3804 priv->buf_size = 0; 3805 priv->enable = 0; 3806 no_pfc_priv_num--; 3807 } 3808 3809 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3810 no_pfc_priv_num == 0) 3811 break; 3812 } 3813 3814 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3815 } 3816 3817 static bool 3818 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3819 struct hns3_pkt_buf_alloc *buf_alloc) 3820 { 3821 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3822 struct hns3_pf *pf = &hns->pf; 3823 struct hns3_priv_buf *priv; 3824 uint32_t rx_all; 3825 int pfc_priv_num; 3826 uint8_t mask; 3827 int i; 3828 3829 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3830 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3831 3832 /* let the last to be cleared first */ 3833 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3834 priv = &buf_alloc->priv_buf[i]; 3835 mask = BIT((uint8_t)i); 3836 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3837 /* Reduce the number of pfc TC with private buffer */ 3838 priv->wl.low = 0; 3839 priv->enable = 0; 3840 priv->wl.high = 0; 3841 priv->buf_size = 0; 3842 pfc_priv_num--; 3843 } 3844 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3845 pfc_priv_num == 0) 3846 break; 3847 } 3848 3849 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3850 } 3851 3852 static bool 3853 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3854 struct hns3_pkt_buf_alloc *buf_alloc) 3855 { 3856 #define COMPENSATE_BUFFER 0x3C00 3857 #define COMPENSATE_HALF_MPS_NUM 5 3858 #define PRIV_WL_GAP 0x1800 3859 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3860 struct hns3_pf *pf = &hns->pf; 3861 uint32_t tc_num = hns3_get_tc_num(hw); 3862 uint32_t half_mps = pf->mps >> 1; 3863 struct hns3_priv_buf *priv; 3864 uint32_t min_rx_priv; 3865 uint32_t rx_priv; 3866 uint8_t i; 3867 3868 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3869 if (tc_num) 3870 rx_priv = rx_priv / tc_num; 3871 3872 if (tc_num <= NEED_RESERVE_TC_NUM) 3873 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3874 3875 /* 3876 * Minimum value of private buffer in rx direction (min_rx_priv) is 3877 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3878 * buffer if rx_priv is greater than min_rx_priv. 3879 */ 3880 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3881 COMPENSATE_HALF_MPS_NUM * half_mps; 3882 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3883 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3884 3885 if (rx_priv < min_rx_priv) 3886 return false; 3887 3888 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3889 priv = &buf_alloc->priv_buf[i]; 3890 priv->enable = 0; 3891 priv->wl.low = 0; 3892 priv->wl.high = 0; 3893 priv->buf_size = 0; 3894 3895 if (!(hw->hw_tc_map & BIT(i))) 3896 continue; 3897 3898 priv->enable = 1; 3899 priv->buf_size = rx_priv; 3900 priv->wl.high = rx_priv - pf->dv_buf_size; 3901 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3902 } 3903 3904 buf_alloc->s_buf.buf_size = 0; 3905 3906 return true; 3907 } 3908 3909 /* 3910 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3911 * @hw: pointer to struct hns3_hw 3912 * @buf_alloc: pointer to buffer calculation data 3913 * @return: 0: calculate sucessful, negative: fail 3914 */ 3915 static int 3916 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3917 { 3918 /* When DCB is not supported, rx private buffer is not allocated. */ 3919 if (!hns3_dev_dcb_supported(hw)) { 3920 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3921 struct hns3_pf *pf = &hns->pf; 3922 uint32_t rx_all = pf->pkt_buf_size; 3923 3924 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3925 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3926 return -ENOMEM; 3927 3928 return 0; 3929 } 3930 3931 /* 3932 * Try to allocate privated packet buffer for all TCs without share 3933 * buffer. 3934 */ 3935 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3936 return 0; 3937 3938 /* 3939 * Try to allocate privated packet buffer for all TCs with share 3940 * buffer. 3941 */ 3942 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3943 return 0; 3944 3945 /* 3946 * For different application scenes, the enabled port number, TC number 3947 * and no_drop TC number are different. In order to obtain the better 3948 * performance, software could allocate the buffer size and configure 3949 * the waterline by tring to decrease the private buffer size according 3950 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc 3951 * enabled tc. 3952 */ 3953 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3954 return 0; 3955 3956 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3957 return 0; 3958 3959 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3960 return 0; 3961 3962 return -ENOMEM; 3963 } 3964 3965 static int 3966 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3967 { 3968 struct hns3_rx_priv_buff_cmd *req; 3969 struct hns3_cmd_desc desc; 3970 uint32_t buf_size; 3971 int ret; 3972 int i; 3973 3974 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3975 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3976 3977 /* Alloc private buffer TCs */ 3978 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3979 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3980 3981 req->buf_num[i] = 3982 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3983 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3984 } 3985 3986 buf_size = buf_alloc->s_buf.buf_size; 3987 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3988 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3989 3990 ret = hns3_cmd_send(hw, &desc, 1); 3991 if (ret) 3992 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3993 3994 return ret; 3995 } 3996 3997 static int 3998 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3999 { 4000 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 4001 struct hns3_rx_priv_wl_buf *req; 4002 struct hns3_priv_buf *priv; 4003 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 4004 int i, j; 4005 int ret; 4006 4007 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 4008 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 4009 false); 4010 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 4011 4012 /* The first descriptor set the NEXT bit to 1 */ 4013 if (i == 0) 4014 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4015 else 4016 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4017 4018 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4019 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 4020 4021 priv = &buf_alloc->priv_buf[idx]; 4022 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 4023 HNS3_BUF_UNIT_S); 4024 req->tc_wl[j].high |= 4025 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4026 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 4027 HNS3_BUF_UNIT_S); 4028 req->tc_wl[j].low |= 4029 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4030 } 4031 } 4032 4033 /* Send 2 descriptor at one time */ 4034 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4035 if (ret) 4036 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4037 ret); 4038 return ret; 4039 } 4040 4041 static int 4042 hns3_common_thrd_config(struct hns3_hw *hw, 4043 struct hns3_pkt_buf_alloc *buf_alloc) 4044 { 4045 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4046 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4047 struct hns3_rx_com_thrd *req; 4048 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4049 struct hns3_tc_thrd *tc; 4050 int tc_idx; 4051 int i, j; 4052 int ret; 4053 4054 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4055 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4056 false); 4057 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4058 4059 /* The first descriptor set the NEXT bit to 1 */ 4060 if (i == 0) 4061 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4062 else 4063 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4064 4065 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4066 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4067 tc = &s_buf->tc_thrd[tc_idx]; 4068 4069 req->com_thrd[j].high = 4070 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4071 req->com_thrd[j].high |= 4072 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4073 req->com_thrd[j].low = 4074 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4075 req->com_thrd[j].low |= 4076 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4077 } 4078 } 4079 4080 /* Send 2 descriptors at one time */ 4081 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4082 if (ret) 4083 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4084 4085 return ret; 4086 } 4087 4088 static int 4089 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4090 { 4091 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4092 struct hns3_rx_com_wl *req; 4093 struct hns3_cmd_desc desc; 4094 int ret; 4095 4096 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4097 4098 req = (struct hns3_rx_com_wl *)desc.data; 4099 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4100 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4101 4102 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4103 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4104 4105 ret = hns3_cmd_send(hw, &desc, 1); 4106 if (ret) 4107 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4108 4109 return ret; 4110 } 4111 4112 int 4113 hns3_buffer_alloc(struct hns3_hw *hw) 4114 { 4115 struct hns3_pkt_buf_alloc pkt_buf; 4116 int ret; 4117 4118 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4119 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4120 if (ret) { 4121 PMD_INIT_LOG(ERR, 4122 "could not calc tx buffer size for all TCs %d", 4123 ret); 4124 return ret; 4125 } 4126 4127 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4128 if (ret) { 4129 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4130 return ret; 4131 } 4132 4133 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4134 if (ret) { 4135 PMD_INIT_LOG(ERR, 4136 "could not calc rx priv buffer size for all TCs %d", 4137 ret); 4138 return ret; 4139 } 4140 4141 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4142 if (ret) { 4143 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4144 return ret; 4145 } 4146 4147 if (hns3_dev_dcb_supported(hw)) { 4148 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4149 if (ret) { 4150 PMD_INIT_LOG(ERR, 4151 "could not configure rx private waterline %d", 4152 ret); 4153 return ret; 4154 } 4155 4156 ret = hns3_common_thrd_config(hw, &pkt_buf); 4157 if (ret) { 4158 PMD_INIT_LOG(ERR, 4159 "could not configure common threshold %d", 4160 ret); 4161 return ret; 4162 } 4163 } 4164 4165 ret = hns3_common_wl_config(hw, &pkt_buf); 4166 if (ret) 4167 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4168 ret); 4169 4170 return ret; 4171 } 4172 4173 static int 4174 hns3_mac_init(struct hns3_hw *hw) 4175 { 4176 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4177 struct hns3_mac *mac = &hw->mac; 4178 struct hns3_pf *pf = &hns->pf; 4179 int ret; 4180 4181 pf->support_sfp_query = true; 4182 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 4183 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4184 if (ret) { 4185 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4186 return ret; 4187 } 4188 4189 mac->link_status = ETH_LINK_DOWN; 4190 4191 return hns3_config_mtu(hw, pf->mps); 4192 } 4193 4194 static int 4195 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4196 { 4197 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4198 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4199 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4200 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4201 int return_status; 4202 4203 if (cmdq_resp) { 4204 PMD_INIT_LOG(ERR, 4205 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4206 cmdq_resp); 4207 return -EIO; 4208 } 4209 4210 switch (resp_code) { 4211 case HNS3_ETHERTYPE_SUCCESS_ADD: 4212 case HNS3_ETHERTYPE_ALREADY_ADD: 4213 return_status = 0; 4214 break; 4215 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4216 PMD_INIT_LOG(ERR, 4217 "add mac ethertype failed for manager table overflow."); 4218 return_status = -EIO; 4219 break; 4220 case HNS3_ETHERTYPE_KEY_CONFLICT: 4221 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4222 return_status = -EIO; 4223 break; 4224 default: 4225 PMD_INIT_LOG(ERR, 4226 "add mac ethertype failed for undefined, code=%u.", 4227 resp_code); 4228 return_status = -EIO; 4229 break; 4230 } 4231 4232 return return_status; 4233 } 4234 4235 static int 4236 hns3_add_mgr_tbl(struct hns3_hw *hw, 4237 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4238 { 4239 struct hns3_cmd_desc desc; 4240 uint8_t resp_code; 4241 uint16_t retval; 4242 int ret; 4243 4244 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4245 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4246 4247 ret = hns3_cmd_send(hw, &desc, 1); 4248 if (ret) { 4249 PMD_INIT_LOG(ERR, 4250 "add mac ethertype failed for cmd_send, ret =%d.", 4251 ret); 4252 return ret; 4253 } 4254 4255 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4256 retval = rte_le_to_cpu_16(desc.retval); 4257 4258 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4259 } 4260 4261 static void 4262 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4263 int *table_item_num) 4264 { 4265 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4266 4267 /* 4268 * In current version, we add one item in management table as below: 4269 * 0x0180C200000E -- LLDP MC address 4270 */ 4271 tbl = mgr_table; 4272 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4273 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4274 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4275 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4276 tbl->i_port_bitmap = 0x1; 4277 *table_item_num = 1; 4278 } 4279 4280 static int 4281 hns3_init_mgr_tbl(struct hns3_hw *hw) 4282 { 4283 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4284 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4285 int table_item_num; 4286 int ret; 4287 int i; 4288 4289 memset(mgr_table, 0, sizeof(mgr_table)); 4290 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4291 for (i = 0; i < table_item_num; i++) { 4292 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4293 if (ret) { 4294 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4295 ret); 4296 return ret; 4297 } 4298 } 4299 4300 return 0; 4301 } 4302 4303 static void 4304 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4305 bool en_mc, bool en_bc, int vport_id) 4306 { 4307 if (!param) 4308 return; 4309 4310 memset(param, 0, sizeof(struct hns3_promisc_param)); 4311 if (en_uc) 4312 param->enable = HNS3_PROMISC_EN_UC; 4313 if (en_mc) 4314 param->enable |= HNS3_PROMISC_EN_MC; 4315 if (en_bc) 4316 param->enable |= HNS3_PROMISC_EN_BC; 4317 param->vf_id = vport_id; 4318 } 4319 4320 static int 4321 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4322 { 4323 struct hns3_promisc_cfg_cmd *req; 4324 struct hns3_cmd_desc desc; 4325 int ret; 4326 4327 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4328 4329 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4330 req->vf_id = param->vf_id; 4331 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4332 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4333 4334 ret = hns3_cmd_send(hw, &desc, 1); 4335 if (ret) 4336 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4337 4338 return ret; 4339 } 4340 4341 static int 4342 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4343 { 4344 struct hns3_promisc_param param; 4345 bool en_bc_pmc = true; 4346 uint8_t vf_id; 4347 4348 /* 4349 * In current version VF is not supported when PF is driven by DPDK 4350 * driver, just need to configure parameters for PF vport. 4351 */ 4352 vf_id = HNS3_PF_FUNC_ID; 4353 4354 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4355 return hns3_cmd_set_promisc_mode(hw, ¶m); 4356 } 4357 4358 static int 4359 hns3_promisc_init(struct hns3_hw *hw) 4360 { 4361 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4362 struct hns3_pf *pf = &hns->pf; 4363 struct hns3_promisc_param param; 4364 uint16_t func_id; 4365 int ret; 4366 4367 ret = hns3_set_promisc_mode(hw, false, false); 4368 if (ret) { 4369 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4370 return ret; 4371 } 4372 4373 /* 4374 * In current version VFs are not supported when PF is driven by DPDK 4375 * driver. After PF has been taken over by DPDK, the original VF will 4376 * be invalid. So, there is a possibility of entry residues. It should 4377 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4378 * during init. 4379 */ 4380 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4381 hns3_promisc_param_init(¶m, false, false, false, func_id); 4382 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4383 if (ret) { 4384 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4385 " ret = %d", func_id, ret); 4386 return ret; 4387 } 4388 } 4389 4390 return 0; 4391 } 4392 4393 static void 4394 hns3_promisc_uninit(struct hns3_hw *hw) 4395 { 4396 struct hns3_promisc_param param; 4397 uint16_t func_id; 4398 int ret; 4399 4400 func_id = HNS3_PF_FUNC_ID; 4401 4402 /* 4403 * In current version VFs are not supported when PF is driven by 4404 * DPDK driver, and VFs' promisc mode status has been cleared during 4405 * init and their status will not change. So just clear PF's promisc 4406 * mode status during uninit. 4407 */ 4408 hns3_promisc_param_init(¶m, false, false, false, func_id); 4409 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4410 if (ret) 4411 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4412 " uninit, ret = %d", ret); 4413 } 4414 4415 static int 4416 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4417 { 4418 bool allmulti = dev->data->all_multicast ? true : false; 4419 struct hns3_adapter *hns = dev->data->dev_private; 4420 struct hns3_hw *hw = &hns->hw; 4421 uint64_t offloads; 4422 int err; 4423 int ret; 4424 4425 rte_spinlock_lock(&hw->lock); 4426 ret = hns3_set_promisc_mode(hw, true, true); 4427 if (ret) { 4428 rte_spinlock_unlock(&hw->lock); 4429 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4430 ret); 4431 return ret; 4432 } 4433 4434 /* 4435 * When promiscuous mode was enabled, disable the vlan filter to let 4436 * all packets coming in in the receiving direction. 4437 */ 4438 offloads = dev->data->dev_conf.rxmode.offloads; 4439 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4440 ret = hns3_enable_vlan_filter(hns, false); 4441 if (ret) { 4442 hns3_err(hw, "failed to enable promiscuous mode due to " 4443 "failure to disable vlan filter, ret = %d", 4444 ret); 4445 err = hns3_set_promisc_mode(hw, false, allmulti); 4446 if (err) 4447 hns3_err(hw, "failed to restore promiscuous " 4448 "status after disable vlan filter " 4449 "failed during enabling promiscuous " 4450 "mode, ret = %d", ret); 4451 } 4452 } 4453 4454 rte_spinlock_unlock(&hw->lock); 4455 4456 return ret; 4457 } 4458 4459 static int 4460 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4461 { 4462 bool allmulti = dev->data->all_multicast ? true : false; 4463 struct hns3_adapter *hns = dev->data->dev_private; 4464 struct hns3_hw *hw = &hns->hw; 4465 uint64_t offloads; 4466 int err; 4467 int ret; 4468 4469 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4470 rte_spinlock_lock(&hw->lock); 4471 ret = hns3_set_promisc_mode(hw, false, allmulti); 4472 if (ret) { 4473 rte_spinlock_unlock(&hw->lock); 4474 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4475 ret); 4476 return ret; 4477 } 4478 /* when promiscuous mode was disabled, restore the vlan filter status */ 4479 offloads = dev->data->dev_conf.rxmode.offloads; 4480 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4481 ret = hns3_enable_vlan_filter(hns, true); 4482 if (ret) { 4483 hns3_err(hw, "failed to disable promiscuous mode due to" 4484 " failure to restore vlan filter, ret = %d", 4485 ret); 4486 err = hns3_set_promisc_mode(hw, true, true); 4487 if (err) 4488 hns3_err(hw, "failed to restore promiscuous " 4489 "status after enabling vlan filter " 4490 "failed during disabling promiscuous " 4491 "mode, ret = %d", ret); 4492 } 4493 } 4494 rte_spinlock_unlock(&hw->lock); 4495 4496 return ret; 4497 } 4498 4499 static int 4500 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4501 { 4502 struct hns3_adapter *hns = dev->data->dev_private; 4503 struct hns3_hw *hw = &hns->hw; 4504 int ret; 4505 4506 if (dev->data->promiscuous) 4507 return 0; 4508 4509 rte_spinlock_lock(&hw->lock); 4510 ret = hns3_set_promisc_mode(hw, false, true); 4511 rte_spinlock_unlock(&hw->lock); 4512 if (ret) 4513 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4514 ret); 4515 4516 return ret; 4517 } 4518 4519 static int 4520 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4521 { 4522 struct hns3_adapter *hns = dev->data->dev_private; 4523 struct hns3_hw *hw = &hns->hw; 4524 int ret; 4525 4526 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4527 if (dev->data->promiscuous) 4528 return 0; 4529 4530 rte_spinlock_lock(&hw->lock); 4531 ret = hns3_set_promisc_mode(hw, false, false); 4532 rte_spinlock_unlock(&hw->lock); 4533 if (ret) 4534 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4535 ret); 4536 4537 return ret; 4538 } 4539 4540 static int 4541 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4542 { 4543 struct hns3_hw *hw = &hns->hw; 4544 bool allmulti = hw->data->all_multicast ? true : false; 4545 int ret; 4546 4547 if (hw->data->promiscuous) { 4548 ret = hns3_set_promisc_mode(hw, true, true); 4549 if (ret) 4550 hns3_err(hw, "failed to restore promiscuous mode, " 4551 "ret = %d", ret); 4552 return ret; 4553 } 4554 4555 ret = hns3_set_promisc_mode(hw, false, allmulti); 4556 if (ret) 4557 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4558 ret); 4559 return ret; 4560 } 4561 4562 static int 4563 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4564 { 4565 struct hns3_sfp_info_cmd *resp; 4566 struct hns3_cmd_desc desc; 4567 int ret; 4568 4569 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4570 resp = (struct hns3_sfp_info_cmd *)desc.data; 4571 resp->query_type = HNS3_ACTIVE_QUERY; 4572 4573 ret = hns3_cmd_send(hw, &desc, 1); 4574 if (ret == -EOPNOTSUPP) { 4575 hns3_warn(hw, "firmware does not support get SFP info," 4576 " ret = %d.", ret); 4577 return ret; 4578 } else if (ret) { 4579 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4580 return ret; 4581 } 4582 4583 /* 4584 * In some case, the speed of MAC obtained from firmware may be 0, it 4585 * shouldn't be set to mac->speed. 4586 */ 4587 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4588 return 0; 4589 4590 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4591 /* 4592 * if resp->supported_speed is 0, it means it's an old version 4593 * firmware, do not update these params. 4594 */ 4595 if (resp->supported_speed) { 4596 mac_info->query_type = HNS3_ACTIVE_QUERY; 4597 mac_info->supported_speed = 4598 rte_le_to_cpu_32(resp->supported_speed); 4599 mac_info->support_autoneg = resp->autoneg_ability; 4600 mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED 4601 : ETH_LINK_AUTONEG; 4602 } else { 4603 mac_info->query_type = HNS3_DEFAULT_QUERY; 4604 } 4605 4606 return 0; 4607 } 4608 4609 static uint8_t 4610 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4611 { 4612 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4613 duplex = ETH_LINK_FULL_DUPLEX; 4614 4615 return duplex; 4616 } 4617 4618 static int 4619 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4620 { 4621 struct hns3_mac *mac = &hw->mac; 4622 int ret; 4623 4624 duplex = hns3_check_speed_dup(duplex, speed); 4625 if (mac->link_speed == speed && mac->link_duplex == duplex) 4626 return 0; 4627 4628 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4629 if (ret) 4630 return ret; 4631 4632 ret = hns3_port_shaper_update(hw, speed); 4633 if (ret) 4634 return ret; 4635 4636 mac->link_speed = speed; 4637 mac->link_duplex = duplex; 4638 4639 return 0; 4640 } 4641 4642 static int 4643 hns3_update_fiber_link_info(struct hns3_hw *hw) 4644 { 4645 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4646 struct hns3_mac *mac = &hw->mac; 4647 struct hns3_mac mac_info; 4648 int ret; 4649 4650 /* If firmware do not support get SFP/qSFP speed, return directly */ 4651 if (!pf->support_sfp_query) 4652 return 0; 4653 4654 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4655 ret = hns3_get_sfp_info(hw, &mac_info); 4656 if (ret == -EOPNOTSUPP) { 4657 pf->support_sfp_query = false; 4658 return ret; 4659 } else if (ret) 4660 return ret; 4661 4662 /* Do nothing if no SFP */ 4663 if (mac_info.link_speed == ETH_SPEED_NUM_NONE) 4664 return 0; 4665 4666 /* 4667 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4668 * to reconfigure the speed of MAC. Otherwise, it indicates 4669 * that the current firmware only supports to obtain the 4670 * speed of the SFP, and the speed of MAC needs to reconfigure. 4671 */ 4672 mac->query_type = mac_info.query_type; 4673 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4674 if (mac_info.link_speed != mac->link_speed) { 4675 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4676 if (ret) 4677 return ret; 4678 } 4679 4680 mac->link_speed = mac_info.link_speed; 4681 mac->supported_speed = mac_info.supported_speed; 4682 mac->support_autoneg = mac_info.support_autoneg; 4683 mac->link_autoneg = mac_info.link_autoneg; 4684 4685 return 0; 4686 } 4687 4688 /* Config full duplex for SFP */ 4689 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4690 ETH_LINK_FULL_DUPLEX); 4691 } 4692 4693 static void 4694 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4695 { 4696 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4697 4698 struct hns3_phy_params_bd0_cmd *req; 4699 uint32_t supported; 4700 4701 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4702 mac->link_speed = rte_le_to_cpu_32(req->speed); 4703 mac->link_duplex = hns3_get_bit(req->duplex, 4704 HNS3_PHY_DUPLEX_CFG_B); 4705 mac->link_autoneg = hns3_get_bit(req->autoneg, 4706 HNS3_PHY_AUTONEG_CFG_B); 4707 mac->advertising = rte_le_to_cpu_32(req->advertising); 4708 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4709 supported = rte_le_to_cpu_32(req->supported); 4710 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4711 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4712 } 4713 4714 static int 4715 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4716 { 4717 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4718 uint16_t i; 4719 int ret; 4720 4721 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4722 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4723 true); 4724 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4725 } 4726 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4727 4728 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4729 if (ret) { 4730 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4731 return ret; 4732 } 4733 4734 hns3_parse_copper_phy_params(desc, mac); 4735 4736 return 0; 4737 } 4738 4739 static int 4740 hns3_update_copper_link_info(struct hns3_hw *hw) 4741 { 4742 struct hns3_mac *mac = &hw->mac; 4743 struct hns3_mac mac_info; 4744 int ret; 4745 4746 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4747 ret = hns3_get_copper_phy_params(hw, &mac_info); 4748 if (ret) 4749 return ret; 4750 4751 if (mac_info.link_speed != mac->link_speed) { 4752 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4753 if (ret) 4754 return ret; 4755 } 4756 4757 mac->link_speed = mac_info.link_speed; 4758 mac->link_duplex = mac_info.link_duplex; 4759 mac->link_autoneg = mac_info.link_autoneg; 4760 mac->supported_speed = mac_info.supported_speed; 4761 mac->advertising = mac_info.advertising; 4762 mac->lp_advertising = mac_info.lp_advertising; 4763 mac->support_autoneg = mac_info.support_autoneg; 4764 4765 return 0; 4766 } 4767 4768 static int 4769 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4770 { 4771 struct hns3_adapter *hns = eth_dev->data->dev_private; 4772 struct hns3_hw *hw = &hns->hw; 4773 int ret = 0; 4774 4775 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4776 ret = hns3_update_copper_link_info(hw); 4777 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4778 ret = hns3_update_fiber_link_info(hw); 4779 4780 return ret; 4781 } 4782 4783 static int 4784 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4785 { 4786 struct hns3_config_mac_mode_cmd *req; 4787 struct hns3_cmd_desc desc; 4788 uint32_t loop_en = 0; 4789 uint8_t val = 0; 4790 int ret; 4791 4792 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4793 4794 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4795 if (enable) 4796 val = 1; 4797 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4798 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4799 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4800 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4801 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4802 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4803 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4804 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4805 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4806 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4807 4808 /* 4809 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4810 * when receiving frames. Otherwise, CRC will be stripped. 4811 */ 4812 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4813 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4814 else 4815 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4816 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4817 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4818 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4819 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4820 4821 ret = hns3_cmd_send(hw, &desc, 1); 4822 if (ret) 4823 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4824 4825 return ret; 4826 } 4827 4828 static int 4829 hns3_get_mac_link_status(struct hns3_hw *hw) 4830 { 4831 struct hns3_link_status_cmd *req; 4832 struct hns3_cmd_desc desc; 4833 int link_status; 4834 int ret; 4835 4836 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4837 ret = hns3_cmd_send(hw, &desc, 1); 4838 if (ret) { 4839 hns3_err(hw, "get link status cmd failed %d", ret); 4840 return ETH_LINK_DOWN; 4841 } 4842 4843 req = (struct hns3_link_status_cmd *)desc.data; 4844 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4845 4846 return !!link_status; 4847 } 4848 4849 static bool 4850 hns3_update_link_status(struct hns3_hw *hw) 4851 { 4852 int state; 4853 4854 state = hns3_get_mac_link_status(hw); 4855 if (state != hw->mac.link_status) { 4856 hw->mac.link_status = state; 4857 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4858 hns3_config_mac_tnl_int(hw, 4859 state == ETH_LINK_UP ? true : false); 4860 return true; 4861 } 4862 4863 return false; 4864 } 4865 4866 void 4867 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4868 { 4869 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4870 struct rte_eth_link new_link; 4871 int ret; 4872 4873 if (query) 4874 hns3_update_port_link_info(dev); 4875 4876 memset(&new_link, 0, sizeof(new_link)); 4877 hns3_setup_linkstatus(dev, &new_link); 4878 4879 ret = rte_eth_linkstatus_set(dev, &new_link); 4880 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4881 hns3_start_report_lse(dev); 4882 } 4883 4884 static void 4885 hns3_service_handler(void *param) 4886 { 4887 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4888 struct hns3_adapter *hns = eth_dev->data->dev_private; 4889 struct hns3_hw *hw = &hns->hw; 4890 4891 if (!hns3_is_reset_pending(hns)) 4892 hns3_update_linkstatus_and_event(hw, true); 4893 else 4894 hns3_warn(hw, "Cancel the query when reset is pending"); 4895 4896 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4897 } 4898 4899 static int 4900 hns3_init_hardware(struct hns3_adapter *hns) 4901 { 4902 struct hns3_hw *hw = &hns->hw; 4903 int ret; 4904 4905 ret = hns3_map_tqp(hw); 4906 if (ret) { 4907 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4908 return ret; 4909 } 4910 4911 ret = hns3_init_umv_space(hw); 4912 if (ret) { 4913 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4914 return ret; 4915 } 4916 4917 ret = hns3_mac_init(hw); 4918 if (ret) { 4919 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4920 goto err_mac_init; 4921 } 4922 4923 ret = hns3_init_mgr_tbl(hw); 4924 if (ret) { 4925 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4926 goto err_mac_init; 4927 } 4928 4929 ret = hns3_promisc_init(hw); 4930 if (ret) { 4931 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4932 ret); 4933 goto err_mac_init; 4934 } 4935 4936 ret = hns3_init_vlan_config(hns); 4937 if (ret) { 4938 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4939 goto err_mac_init; 4940 } 4941 4942 ret = hns3_dcb_init(hw); 4943 if (ret) { 4944 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4945 goto err_mac_init; 4946 } 4947 4948 ret = hns3_init_fd_config(hns); 4949 if (ret) { 4950 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4951 goto err_mac_init; 4952 } 4953 4954 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4955 if (ret) { 4956 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4957 goto err_mac_init; 4958 } 4959 4960 ret = hns3_config_gro(hw, false); 4961 if (ret) { 4962 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4963 goto err_mac_init; 4964 } 4965 4966 /* 4967 * In the initialization clearing the all hardware mapping relationship 4968 * configurations between queues and interrupt vectors is needed, so 4969 * some error caused by the residual configurations, such as the 4970 * unexpected interrupt, can be avoid. 4971 */ 4972 ret = hns3_init_ring_with_vector(hw); 4973 if (ret) { 4974 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4975 goto err_mac_init; 4976 } 4977 4978 return 0; 4979 4980 err_mac_init: 4981 hns3_uninit_umv_space(hw); 4982 return ret; 4983 } 4984 4985 static int 4986 hns3_clear_hw(struct hns3_hw *hw) 4987 { 4988 struct hns3_cmd_desc desc; 4989 int ret; 4990 4991 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4992 4993 ret = hns3_cmd_send(hw, &desc, 1); 4994 if (ret && ret != -EOPNOTSUPP) 4995 return ret; 4996 4997 return 0; 4998 } 4999 5000 static void 5001 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 5002 { 5003 uint32_t val; 5004 5005 /* 5006 * The new firmware support report more hardware error types by 5007 * msix mode. These errors are defined as RAS errors in hardware 5008 * and belong to a different type from the MSI-x errors processed 5009 * by the network driver. 5010 * 5011 * Network driver should open the new error report on initialition 5012 */ 5013 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5014 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 5015 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5016 } 5017 5018 static uint32_t 5019 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 5020 { 5021 struct hns3_mac *mac = &hw->mac; 5022 5023 switch (mac->link_speed) { 5024 case ETH_SPEED_NUM_1G: 5025 return HNS3_FIBER_LINK_SPEED_1G_BIT; 5026 case ETH_SPEED_NUM_10G: 5027 return HNS3_FIBER_LINK_SPEED_10G_BIT; 5028 case ETH_SPEED_NUM_25G: 5029 return HNS3_FIBER_LINK_SPEED_25G_BIT; 5030 case ETH_SPEED_NUM_40G: 5031 return HNS3_FIBER_LINK_SPEED_40G_BIT; 5032 case ETH_SPEED_NUM_50G: 5033 return HNS3_FIBER_LINK_SPEED_50G_BIT; 5034 case ETH_SPEED_NUM_100G: 5035 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5036 case ETH_SPEED_NUM_200G: 5037 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5038 default: 5039 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5040 return 0; 5041 } 5042 } 5043 5044 /* 5045 * Validity of supported_speed for firber and copper media type can be 5046 * guaranteed by the following policy: 5047 * Copper: 5048 * Although the initialization of the phy in the firmware may not be 5049 * completed, the firmware can guarantees that the supported_speed is 5050 * an valid value. 5051 * Firber: 5052 * If the version of firmware supports the acitive query way of the 5053 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5054 * through it. If unsupported, use the SFP's speed as the value of the 5055 * supported_speed. 5056 */ 5057 static int 5058 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5059 { 5060 struct hns3_adapter *hns = eth_dev->data->dev_private; 5061 struct hns3_hw *hw = &hns->hw; 5062 struct hns3_mac *mac = &hw->mac; 5063 int ret; 5064 5065 ret = hns3_update_link_info(eth_dev); 5066 if (ret) 5067 return ret; 5068 5069 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5070 /* 5071 * Some firmware does not support the report of supported_speed, 5072 * and only report the effective speed of SFP. In this case, it 5073 * is necessary to use the SFP's speed as the supported_speed. 5074 */ 5075 if (mac->supported_speed == 0) 5076 mac->supported_speed = 5077 hns3_set_firber_default_support_speed(hw); 5078 } 5079 5080 return 0; 5081 } 5082 5083 static void 5084 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5085 { 5086 struct hns3_mac *mac = &hns->hw.mac; 5087 5088 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5089 hns->pf.support_fc_autoneg = true; 5090 return; 5091 } 5092 5093 /* 5094 * Flow control auto-negotiation requires the cooperation of the driver 5095 * and firmware. Currently, the optical port does not support flow 5096 * control auto-negotiation. 5097 */ 5098 hns->pf.support_fc_autoneg = false; 5099 } 5100 5101 static int 5102 hns3_init_pf(struct rte_eth_dev *eth_dev) 5103 { 5104 struct rte_device *dev = eth_dev->device; 5105 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5106 struct hns3_adapter *hns = eth_dev->data->dev_private; 5107 struct hns3_hw *hw = &hns->hw; 5108 int ret; 5109 5110 PMD_INIT_FUNC_TRACE(); 5111 5112 /* Get hardware io base address from pcie BAR2 IO space */ 5113 hw->io_base = pci_dev->mem_resource[2].addr; 5114 5115 /* Firmware command queue initialize */ 5116 ret = hns3_cmd_init_queue(hw); 5117 if (ret) { 5118 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5119 goto err_cmd_init_queue; 5120 } 5121 5122 hns3_clear_all_event_cause(hw); 5123 5124 /* Firmware command initialize */ 5125 ret = hns3_cmd_init(hw); 5126 if (ret) { 5127 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5128 goto err_cmd_init; 5129 } 5130 5131 /* 5132 * To ensure that the hardware environment is clean during 5133 * initialization, the driver actively clear the hardware environment 5134 * during initialization, including PF and corresponding VFs' vlan, mac, 5135 * flow table configurations, etc. 5136 */ 5137 ret = hns3_clear_hw(hw); 5138 if (ret) { 5139 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5140 goto err_cmd_init; 5141 } 5142 5143 /* Hardware statistics of imissed registers cleared. */ 5144 ret = hns3_update_imissed_stats(hw, true); 5145 if (ret) { 5146 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5147 goto err_cmd_init; 5148 } 5149 5150 hns3_config_all_msix_error(hw, true); 5151 5152 ret = rte_intr_callback_register(&pci_dev->intr_handle, 5153 hns3_interrupt_handler, 5154 eth_dev); 5155 if (ret) { 5156 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5157 goto err_intr_callback_register; 5158 } 5159 5160 ret = hns3_ptp_init(hw); 5161 if (ret) 5162 goto err_get_config; 5163 5164 /* Enable interrupt */ 5165 rte_intr_enable(&pci_dev->intr_handle); 5166 hns3_pf_enable_irq0(hw); 5167 5168 /* Get configuration */ 5169 ret = hns3_get_configuration(hw); 5170 if (ret) { 5171 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5172 goto err_get_config; 5173 } 5174 5175 ret = hns3_tqp_stats_init(hw); 5176 if (ret) 5177 goto err_get_config; 5178 5179 ret = hns3_init_hardware(hns); 5180 if (ret) { 5181 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5182 goto err_init_hw; 5183 } 5184 5185 /* Initialize flow director filter list & hash */ 5186 ret = hns3_fdir_filter_init(hns); 5187 if (ret) { 5188 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5189 goto err_fdir; 5190 } 5191 5192 hns3_rss_set_default_args(hw); 5193 5194 ret = hns3_enable_hw_error_intr(hns, true); 5195 if (ret) { 5196 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5197 ret); 5198 goto err_enable_intr; 5199 } 5200 5201 ret = hns3_get_port_supported_speed(eth_dev); 5202 if (ret) { 5203 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5204 "by device, ret = %d.", ret); 5205 goto err_supported_speed; 5206 } 5207 5208 hns3_get_fc_autoneg_capability(hns); 5209 5210 hns3_tm_conf_init(eth_dev); 5211 5212 return 0; 5213 5214 err_supported_speed: 5215 (void)hns3_enable_hw_error_intr(hns, false); 5216 err_enable_intr: 5217 hns3_fdir_filter_uninit(hns); 5218 err_fdir: 5219 hns3_uninit_umv_space(hw); 5220 err_init_hw: 5221 hns3_tqp_stats_uninit(hw); 5222 err_get_config: 5223 hns3_pf_disable_irq0(hw); 5224 rte_intr_disable(&pci_dev->intr_handle); 5225 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5226 eth_dev); 5227 err_intr_callback_register: 5228 err_cmd_init: 5229 hns3_cmd_uninit(hw); 5230 hns3_cmd_destroy_queue(hw); 5231 err_cmd_init_queue: 5232 hw->io_base = NULL; 5233 5234 return ret; 5235 } 5236 5237 static void 5238 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5239 { 5240 struct hns3_adapter *hns = eth_dev->data->dev_private; 5241 struct rte_device *dev = eth_dev->device; 5242 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5243 struct hns3_hw *hw = &hns->hw; 5244 5245 PMD_INIT_FUNC_TRACE(); 5246 5247 hns3_tm_conf_uninit(eth_dev); 5248 hns3_enable_hw_error_intr(hns, false); 5249 hns3_rss_uninit(hns); 5250 (void)hns3_config_gro(hw, false); 5251 hns3_promisc_uninit(hw); 5252 hns3_fdir_filter_uninit(hns); 5253 hns3_uninit_umv_space(hw); 5254 hns3_tqp_stats_uninit(hw); 5255 hns3_config_mac_tnl_int(hw, false); 5256 hns3_pf_disable_irq0(hw); 5257 rte_intr_disable(&pci_dev->intr_handle); 5258 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5259 eth_dev); 5260 hns3_config_all_msix_error(hw, false); 5261 hns3_cmd_uninit(hw); 5262 hns3_cmd_destroy_queue(hw); 5263 hw->io_base = NULL; 5264 } 5265 5266 static uint32_t 5267 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5268 { 5269 uint32_t speed_bit; 5270 5271 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5272 case ETH_LINK_SPEED_10M: 5273 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5274 break; 5275 case ETH_LINK_SPEED_10M_HD: 5276 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5277 break; 5278 case ETH_LINK_SPEED_100M: 5279 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5280 break; 5281 case ETH_LINK_SPEED_100M_HD: 5282 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5283 break; 5284 case ETH_LINK_SPEED_1G: 5285 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5286 break; 5287 default: 5288 speed_bit = 0; 5289 break; 5290 } 5291 5292 return speed_bit; 5293 } 5294 5295 static uint32_t 5296 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5297 { 5298 uint32_t speed_bit; 5299 5300 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5301 case ETH_LINK_SPEED_1G: 5302 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5303 break; 5304 case ETH_LINK_SPEED_10G: 5305 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5306 break; 5307 case ETH_LINK_SPEED_25G: 5308 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5309 break; 5310 case ETH_LINK_SPEED_40G: 5311 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5312 break; 5313 case ETH_LINK_SPEED_50G: 5314 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5315 break; 5316 case ETH_LINK_SPEED_100G: 5317 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5318 break; 5319 case ETH_LINK_SPEED_200G: 5320 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5321 break; 5322 default: 5323 speed_bit = 0; 5324 break; 5325 } 5326 5327 return speed_bit; 5328 } 5329 5330 static int 5331 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5332 { 5333 struct hns3_mac *mac = &hw->mac; 5334 uint32_t supported_speed = mac->supported_speed; 5335 uint32_t speed_bit = 0; 5336 5337 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5338 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5339 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5340 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5341 5342 if (!(speed_bit & supported_speed)) { 5343 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5344 link_speeds); 5345 return -EINVAL; 5346 } 5347 5348 return 0; 5349 } 5350 5351 static inline uint32_t 5352 hns3_get_link_speed(uint32_t link_speeds) 5353 { 5354 uint32_t speed = ETH_SPEED_NUM_NONE; 5355 5356 if (link_speeds & ETH_LINK_SPEED_10M || 5357 link_speeds & ETH_LINK_SPEED_10M_HD) 5358 speed = ETH_SPEED_NUM_10M; 5359 if (link_speeds & ETH_LINK_SPEED_100M || 5360 link_speeds & ETH_LINK_SPEED_100M_HD) 5361 speed = ETH_SPEED_NUM_100M; 5362 if (link_speeds & ETH_LINK_SPEED_1G) 5363 speed = ETH_SPEED_NUM_1G; 5364 if (link_speeds & ETH_LINK_SPEED_10G) 5365 speed = ETH_SPEED_NUM_10G; 5366 if (link_speeds & ETH_LINK_SPEED_25G) 5367 speed = ETH_SPEED_NUM_25G; 5368 if (link_speeds & ETH_LINK_SPEED_40G) 5369 speed = ETH_SPEED_NUM_40G; 5370 if (link_speeds & ETH_LINK_SPEED_50G) 5371 speed = ETH_SPEED_NUM_50G; 5372 if (link_speeds & ETH_LINK_SPEED_100G) 5373 speed = ETH_SPEED_NUM_100G; 5374 if (link_speeds & ETH_LINK_SPEED_200G) 5375 speed = ETH_SPEED_NUM_200G; 5376 5377 return speed; 5378 } 5379 5380 static uint8_t 5381 hns3_get_link_duplex(uint32_t link_speeds) 5382 { 5383 if ((link_speeds & ETH_LINK_SPEED_10M_HD) || 5384 (link_speeds & ETH_LINK_SPEED_100M_HD)) 5385 return ETH_LINK_HALF_DUPLEX; 5386 else 5387 return ETH_LINK_FULL_DUPLEX; 5388 } 5389 5390 static int 5391 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5392 struct hns3_set_link_speed_cfg *cfg) 5393 { 5394 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5395 struct hns3_phy_params_bd0_cmd *req; 5396 uint16_t i; 5397 5398 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5399 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5400 false); 5401 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5402 } 5403 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5404 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5405 req->autoneg = cfg->autoneg; 5406 5407 /* 5408 * The full speed capability is used to negotiate when 5409 * auto-negotiation is enabled. 5410 */ 5411 if (cfg->autoneg) { 5412 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5413 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5414 HNS3_PHY_LINK_SPEED_100M_BIT | 5415 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5416 HNS3_PHY_LINK_SPEED_1000M_BIT; 5417 } else { 5418 req->speed = cfg->speed; 5419 req->duplex = cfg->duplex; 5420 } 5421 5422 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5423 } 5424 5425 static int 5426 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5427 { 5428 struct hns3_config_auto_neg_cmd *req; 5429 struct hns3_cmd_desc desc; 5430 uint32_t flag = 0; 5431 int ret; 5432 5433 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5434 5435 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5436 if (enable) 5437 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5438 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5439 5440 ret = hns3_cmd_send(hw, &desc, 1); 5441 if (ret) 5442 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5443 5444 return ret; 5445 } 5446 5447 static int 5448 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5449 struct hns3_set_link_speed_cfg *cfg) 5450 { 5451 int ret; 5452 5453 if (hw->mac.support_autoneg) { 5454 ret = hns3_set_autoneg(hw, cfg->autoneg); 5455 if (ret) { 5456 hns3_err(hw, "failed to configure auto-negotiation."); 5457 return ret; 5458 } 5459 5460 /* 5461 * To enable auto-negotiation, we only need to open the switch 5462 * of auto-negotiation, then firmware sets all speed 5463 * capabilities. 5464 */ 5465 if (cfg->autoneg) 5466 return 0; 5467 } 5468 5469 /* 5470 * Some hardware doesn't support auto-negotiation, but users may not 5471 * configure link_speeds (default 0), which means auto-negotiation 5472 * In this case, a warning message need to be printed, instead of 5473 * an error. 5474 */ 5475 if (cfg->autoneg) { 5476 hns3_warn(hw, "auto-negotiation is not supported."); 5477 return 0; 5478 } 5479 5480 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5481 } 5482 5483 static int 5484 hns3_set_port_link_speed(struct hns3_hw *hw, 5485 struct hns3_set_link_speed_cfg *cfg) 5486 { 5487 int ret; 5488 5489 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5490 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5491 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5492 if (pf->is_tmp_phy) 5493 return 0; 5494 #endif 5495 5496 ret = hns3_set_copper_port_link_speed(hw, cfg); 5497 if (ret) { 5498 hns3_err(hw, "failed to set copper port link speed," 5499 "ret = %d.", ret); 5500 return ret; 5501 } 5502 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5503 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5504 if (ret) { 5505 hns3_err(hw, "failed to set fiber port link speed," 5506 "ret = %d.", ret); 5507 return ret; 5508 } 5509 } 5510 5511 return 0; 5512 } 5513 5514 static int 5515 hns3_apply_link_speed(struct hns3_hw *hw) 5516 { 5517 struct rte_eth_conf *conf = &hw->data->dev_conf; 5518 struct hns3_set_link_speed_cfg cfg; 5519 int ret; 5520 5521 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5522 cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ? 5523 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 5524 if (cfg.autoneg != ETH_LINK_AUTONEG) { 5525 ret = hns3_check_port_speed(hw, conf->link_speeds); 5526 if (ret) 5527 return ret; 5528 5529 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5530 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5531 } 5532 5533 return hns3_set_port_link_speed(hw, &cfg); 5534 } 5535 5536 static int 5537 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5538 { 5539 struct hns3_hw *hw = &hns->hw; 5540 int ret; 5541 5542 ret = hns3_dcb_cfg_update(hns); 5543 if (ret) 5544 return ret; 5545 5546 /* 5547 * The hns3_dcb_cfg_update may configure TM module, so 5548 * hns3_tm_conf_update must called later. 5549 */ 5550 ret = hns3_tm_conf_update(hw); 5551 if (ret) { 5552 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5553 return ret; 5554 } 5555 5556 hns3_enable_rxd_adv_layout(hw); 5557 5558 ret = hns3_init_queues(hns, reset_queue); 5559 if (ret) { 5560 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5561 return ret; 5562 } 5563 5564 ret = hns3_cfg_mac_mode(hw, true); 5565 if (ret) { 5566 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5567 goto err_config_mac_mode; 5568 } 5569 5570 ret = hns3_apply_link_speed(hw); 5571 if (ret) 5572 goto err_config_mac_mode; 5573 5574 return 0; 5575 5576 err_config_mac_mode: 5577 (void)hns3_cfg_mac_mode(hw, false); 5578 hns3_dev_release_mbufs(hns); 5579 /* 5580 * Here is exception handling, hns3_reset_all_tqps will have the 5581 * corresponding error message if it is handled incorrectly, so it is 5582 * not necessary to check hns3_reset_all_tqps return value, here keep 5583 * ret as the error code causing the exception. 5584 */ 5585 (void)hns3_reset_all_tqps(hns); 5586 return ret; 5587 } 5588 5589 static int 5590 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5591 { 5592 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5593 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5594 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5595 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5596 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5597 uint32_t intr_vector; 5598 uint16_t q_id; 5599 int ret; 5600 5601 /* 5602 * hns3 needs a separate interrupt to be used as event interrupt which 5603 * could not be shared with task queue pair, so KERNEL drivers need 5604 * support multiple interrupt vectors. 5605 */ 5606 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5607 !rte_intr_cap_multiple(intr_handle)) 5608 return 0; 5609 5610 rte_intr_disable(intr_handle); 5611 intr_vector = hw->used_rx_queues; 5612 /* creates event fd for each intr vector when MSIX is used */ 5613 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5614 return -EINVAL; 5615 5616 if (intr_handle->intr_vec == NULL) { 5617 intr_handle->intr_vec = 5618 rte_zmalloc("intr_vec", 5619 hw->used_rx_queues * sizeof(int), 0); 5620 if (intr_handle->intr_vec == NULL) { 5621 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5622 hw->used_rx_queues); 5623 ret = -ENOMEM; 5624 goto alloc_intr_vec_error; 5625 } 5626 } 5627 5628 if (rte_intr_allow_others(intr_handle)) { 5629 vec = RTE_INTR_VEC_RXTX_OFFSET; 5630 base = RTE_INTR_VEC_RXTX_OFFSET; 5631 } 5632 5633 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5634 ret = hns3_bind_ring_with_vector(hw, vec, true, 5635 HNS3_RING_TYPE_RX, q_id); 5636 if (ret) 5637 goto bind_vector_error; 5638 intr_handle->intr_vec[q_id] = vec; 5639 /* 5640 * If there are not enough efds (e.g. not enough interrupt), 5641 * remaining queues will be bond to the last interrupt. 5642 */ 5643 if (vec < base + intr_handle->nb_efd - 1) 5644 vec++; 5645 } 5646 rte_intr_enable(intr_handle); 5647 return 0; 5648 5649 bind_vector_error: 5650 rte_free(intr_handle->intr_vec); 5651 intr_handle->intr_vec = NULL; 5652 alloc_intr_vec_error: 5653 rte_intr_efd_disable(intr_handle); 5654 return ret; 5655 } 5656 5657 static int 5658 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5659 { 5660 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5661 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5662 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5663 uint16_t q_id; 5664 int ret; 5665 5666 if (dev->data->dev_conf.intr_conf.rxq == 0) 5667 return 0; 5668 5669 if (rte_intr_dp_is_en(intr_handle)) { 5670 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5671 ret = hns3_bind_ring_with_vector(hw, 5672 intr_handle->intr_vec[q_id], true, 5673 HNS3_RING_TYPE_RX, q_id); 5674 if (ret) 5675 return ret; 5676 } 5677 } 5678 5679 return 0; 5680 } 5681 5682 static void 5683 hns3_restore_filter(struct rte_eth_dev *dev) 5684 { 5685 hns3_restore_rss_filter(dev); 5686 } 5687 5688 static int 5689 hns3_dev_start(struct rte_eth_dev *dev) 5690 { 5691 struct hns3_adapter *hns = dev->data->dev_private; 5692 struct hns3_hw *hw = &hns->hw; 5693 int ret; 5694 5695 PMD_INIT_FUNC_TRACE(); 5696 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5697 return -EBUSY; 5698 5699 rte_spinlock_lock(&hw->lock); 5700 hw->adapter_state = HNS3_NIC_STARTING; 5701 5702 ret = hns3_do_start(hns, true); 5703 if (ret) { 5704 hw->adapter_state = HNS3_NIC_CONFIGURED; 5705 rte_spinlock_unlock(&hw->lock); 5706 return ret; 5707 } 5708 ret = hns3_map_rx_interrupt(dev); 5709 if (ret) 5710 goto map_rx_inter_err; 5711 5712 /* 5713 * There are three register used to control the status of a TQP 5714 * (contains a pair of Tx queue and Rx queue) in the new version network 5715 * engine. One is used to control the enabling of Tx queue, the other is 5716 * used to control the enabling of Rx queue, and the last is the master 5717 * switch used to control the enabling of the tqp. The Tx register and 5718 * TQP register must be enabled at the same time to enable a Tx queue. 5719 * The same applies to the Rx queue. For the older network engine, this 5720 * function only refresh the enabled flag, and it is used to update the 5721 * status of queue in the dpdk framework. 5722 */ 5723 ret = hns3_start_all_txqs(dev); 5724 if (ret) 5725 goto map_rx_inter_err; 5726 5727 ret = hns3_start_all_rxqs(dev); 5728 if (ret) 5729 goto start_all_rxqs_fail; 5730 5731 hw->adapter_state = HNS3_NIC_STARTED; 5732 rte_spinlock_unlock(&hw->lock); 5733 5734 hns3_rx_scattered_calc(dev); 5735 hns3_set_rxtx_function(dev); 5736 hns3_mp_req_start_rxtx(dev); 5737 5738 hns3_restore_filter(dev); 5739 5740 /* Enable interrupt of all rx queues before enabling queues */ 5741 hns3_dev_all_rx_queue_intr_enable(hw, true); 5742 5743 /* 5744 * After finished the initialization, enable tqps to receive/transmit 5745 * packets and refresh all queue status. 5746 */ 5747 hns3_start_tqps(hw); 5748 5749 hns3_tm_dev_start_proc(hw); 5750 5751 if (dev->data->dev_conf.intr_conf.lsc != 0) 5752 hns3_dev_link_update(dev, 0); 5753 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5754 5755 hns3_info(hw, "hns3 dev start successful!"); 5756 5757 return 0; 5758 5759 start_all_rxqs_fail: 5760 hns3_stop_all_txqs(dev); 5761 map_rx_inter_err: 5762 (void)hns3_do_stop(hns); 5763 hw->adapter_state = HNS3_NIC_CONFIGURED; 5764 rte_spinlock_unlock(&hw->lock); 5765 5766 return ret; 5767 } 5768 5769 static int 5770 hns3_do_stop(struct hns3_adapter *hns) 5771 { 5772 struct hns3_hw *hw = &hns->hw; 5773 int ret; 5774 5775 /* 5776 * The "hns3_do_stop" function will also be called by .stop_service to 5777 * prepare reset. At the time of global or IMP reset, the command cannot 5778 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5779 * accessed during the reset process. So the mbuf can not be released 5780 * during reset and is required to be released after the reset is 5781 * completed. 5782 */ 5783 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5784 hns3_dev_release_mbufs(hns); 5785 5786 ret = hns3_cfg_mac_mode(hw, false); 5787 if (ret) 5788 return ret; 5789 hw->mac.link_status = ETH_LINK_DOWN; 5790 5791 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5792 hns3_configure_all_mac_addr(hns, true); 5793 ret = hns3_reset_all_tqps(hns); 5794 if (ret) { 5795 hns3_err(hw, "failed to reset all queues ret = %d.", 5796 ret); 5797 return ret; 5798 } 5799 } 5800 hw->mac.default_addr_setted = false; 5801 return 0; 5802 } 5803 5804 static void 5805 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5806 { 5807 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5808 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5809 struct hns3_adapter *hns = dev->data->dev_private; 5810 struct hns3_hw *hw = &hns->hw; 5811 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5812 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5813 uint16_t q_id; 5814 5815 if (dev->data->dev_conf.intr_conf.rxq == 0) 5816 return; 5817 5818 /* unmap the ring with vector */ 5819 if (rte_intr_allow_others(intr_handle)) { 5820 vec = RTE_INTR_VEC_RXTX_OFFSET; 5821 base = RTE_INTR_VEC_RXTX_OFFSET; 5822 } 5823 if (rte_intr_dp_is_en(intr_handle)) { 5824 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5825 (void)hns3_bind_ring_with_vector(hw, vec, false, 5826 HNS3_RING_TYPE_RX, 5827 q_id); 5828 if (vec < base + intr_handle->nb_efd - 1) 5829 vec++; 5830 } 5831 } 5832 /* Clean datapath event and queue/vec mapping */ 5833 rte_intr_efd_disable(intr_handle); 5834 if (intr_handle->intr_vec) { 5835 rte_free(intr_handle->intr_vec); 5836 intr_handle->intr_vec = NULL; 5837 } 5838 } 5839 5840 static int 5841 hns3_dev_stop(struct rte_eth_dev *dev) 5842 { 5843 struct hns3_adapter *hns = dev->data->dev_private; 5844 struct hns3_hw *hw = &hns->hw; 5845 5846 PMD_INIT_FUNC_TRACE(); 5847 dev->data->dev_started = 0; 5848 5849 hw->adapter_state = HNS3_NIC_STOPPING; 5850 hns3_set_rxtx_function(dev); 5851 rte_wmb(); 5852 /* Disable datapath on secondary process. */ 5853 hns3_mp_req_stop_rxtx(dev); 5854 /* Prevent crashes when queues are still in use. */ 5855 rte_delay_ms(hw->tqps_num); 5856 5857 rte_spinlock_lock(&hw->lock); 5858 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5859 hns3_tm_dev_stop_proc(hw); 5860 hns3_config_mac_tnl_int(hw, false); 5861 hns3_stop_tqps(hw); 5862 hns3_do_stop(hns); 5863 hns3_unmap_rx_interrupt(dev); 5864 hw->adapter_state = HNS3_NIC_CONFIGURED; 5865 } 5866 hns3_rx_scattered_reset(dev); 5867 rte_eal_alarm_cancel(hns3_service_handler, dev); 5868 hns3_stop_report_lse(dev); 5869 rte_spinlock_unlock(&hw->lock); 5870 5871 return 0; 5872 } 5873 5874 static int 5875 hns3_dev_close(struct rte_eth_dev *eth_dev) 5876 { 5877 struct hns3_adapter *hns = eth_dev->data->dev_private; 5878 struct hns3_hw *hw = &hns->hw; 5879 int ret = 0; 5880 5881 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5882 rte_free(eth_dev->process_private); 5883 eth_dev->process_private = NULL; 5884 return 0; 5885 } 5886 5887 if (hw->adapter_state == HNS3_NIC_STARTED) 5888 ret = hns3_dev_stop(eth_dev); 5889 5890 hw->adapter_state = HNS3_NIC_CLOSING; 5891 hns3_reset_abort(hns); 5892 hw->adapter_state = HNS3_NIC_CLOSED; 5893 5894 hns3_configure_all_mc_mac_addr(hns, true); 5895 hns3_remove_all_vlan_table(hns); 5896 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5897 hns3_uninit_pf(eth_dev); 5898 hns3_free_all_queues(eth_dev); 5899 rte_free(hw->reset.wait_data); 5900 rte_free(eth_dev->process_private); 5901 eth_dev->process_private = NULL; 5902 hns3_mp_uninit_primary(); 5903 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5904 5905 return ret; 5906 } 5907 5908 static void 5909 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5910 bool *tx_pause) 5911 { 5912 struct hns3_mac *mac = &hw->mac; 5913 uint32_t advertising = mac->advertising; 5914 uint32_t lp_advertising = mac->lp_advertising; 5915 *rx_pause = false; 5916 *tx_pause = false; 5917 5918 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5919 *rx_pause = true; 5920 *tx_pause = true; 5921 } else if (advertising & lp_advertising & 5922 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 5923 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5924 *rx_pause = true; 5925 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 5926 *tx_pause = true; 5927 } 5928 } 5929 5930 static enum hns3_fc_mode 5931 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 5932 { 5933 enum hns3_fc_mode current_mode; 5934 bool rx_pause = false; 5935 bool tx_pause = false; 5936 5937 switch (hw->mac.media_type) { 5938 case HNS3_MEDIA_TYPE_COPPER: 5939 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 5940 break; 5941 5942 /* 5943 * Flow control auto-negotiation is not supported for fiber and 5944 * backpalne media type. 5945 */ 5946 case HNS3_MEDIA_TYPE_FIBER: 5947 case HNS3_MEDIA_TYPE_BACKPLANE: 5948 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 5949 current_mode = hw->requested_fc_mode; 5950 goto out; 5951 default: 5952 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 5953 hw->mac.media_type); 5954 current_mode = HNS3_FC_NONE; 5955 goto out; 5956 } 5957 5958 if (rx_pause && tx_pause) 5959 current_mode = HNS3_FC_FULL; 5960 else if (rx_pause) 5961 current_mode = HNS3_FC_RX_PAUSE; 5962 else if (tx_pause) 5963 current_mode = HNS3_FC_TX_PAUSE; 5964 else 5965 current_mode = HNS3_FC_NONE; 5966 5967 out: 5968 return current_mode; 5969 } 5970 5971 static enum hns3_fc_mode 5972 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 5973 { 5974 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5975 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5976 struct hns3_mac *mac = &hw->mac; 5977 5978 /* 5979 * When the flow control mode is obtained, the device may not complete 5980 * auto-negotiation. It is necessary to wait for link establishment. 5981 */ 5982 (void)hns3_dev_link_update(dev, 1); 5983 5984 /* 5985 * If the link auto-negotiation of the nic is disabled, or the flow 5986 * control auto-negotiation is not supported, the forced flow control 5987 * mode is used. 5988 */ 5989 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 5990 return hw->requested_fc_mode; 5991 5992 return hns3_get_autoneg_fc_mode(hw); 5993 } 5994 5995 static int 5996 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5997 { 5998 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5999 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6000 enum hns3_fc_mode current_mode; 6001 6002 current_mode = hns3_get_current_fc_mode(dev); 6003 switch (current_mode) { 6004 case HNS3_FC_FULL: 6005 fc_conf->mode = RTE_FC_FULL; 6006 break; 6007 case HNS3_FC_TX_PAUSE: 6008 fc_conf->mode = RTE_FC_TX_PAUSE; 6009 break; 6010 case HNS3_FC_RX_PAUSE: 6011 fc_conf->mode = RTE_FC_RX_PAUSE; 6012 break; 6013 case HNS3_FC_NONE: 6014 default: 6015 fc_conf->mode = RTE_FC_NONE; 6016 break; 6017 } 6018 6019 fc_conf->pause_time = pf->pause_time; 6020 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 6021 6022 return 0; 6023 } 6024 6025 static void 6026 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) 6027 { 6028 switch (mode) { 6029 case RTE_FC_NONE: 6030 hw->requested_fc_mode = HNS3_FC_NONE; 6031 break; 6032 case RTE_FC_RX_PAUSE: 6033 hw->requested_fc_mode = HNS3_FC_RX_PAUSE; 6034 break; 6035 case RTE_FC_TX_PAUSE: 6036 hw->requested_fc_mode = HNS3_FC_TX_PAUSE; 6037 break; 6038 case RTE_FC_FULL: 6039 hw->requested_fc_mode = HNS3_FC_FULL; 6040 break; 6041 default: 6042 hw->requested_fc_mode = HNS3_FC_NONE; 6043 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is " 6044 "configured to RTE_FC_NONE", mode); 6045 break; 6046 } 6047 } 6048 6049 static int 6050 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 6051 { 6052 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 6053 6054 if (!pf->support_fc_autoneg) { 6055 if (autoneg != 0) { 6056 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6057 return -EOPNOTSUPP; 6058 } 6059 6060 /* 6061 * Flow control auto-negotiation of the NIC is not supported, 6062 * but other auto-negotiation features may be supported. 6063 */ 6064 if (autoneg != hw->mac.link_autoneg) { 6065 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6066 return -EOPNOTSUPP; 6067 } 6068 6069 return 0; 6070 } 6071 6072 /* 6073 * If flow control auto-negotiation of the NIC is supported, all 6074 * auto-negotiation features are supported. 6075 */ 6076 if (autoneg != hw->mac.link_autoneg) { 6077 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6078 return -EOPNOTSUPP; 6079 } 6080 6081 return 0; 6082 } 6083 6084 static int 6085 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6086 { 6087 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6088 int ret; 6089 6090 if (fc_conf->high_water || fc_conf->low_water || 6091 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6092 hns3_err(hw, "Unsupported flow control settings specified, " 6093 "high_water(%u), low_water(%u), send_xon(%u) and " 6094 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6095 fc_conf->high_water, fc_conf->low_water, 6096 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6097 return -EINVAL; 6098 } 6099 6100 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6101 if (ret) 6102 return ret; 6103 6104 if (!fc_conf->pause_time) { 6105 hns3_err(hw, "Invalid pause time %u setting.", 6106 fc_conf->pause_time); 6107 return -EINVAL; 6108 } 6109 6110 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6111 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6112 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6113 "current_fc_status = %d", hw->current_fc_status); 6114 return -EOPNOTSUPP; 6115 } 6116 6117 if (hw->num_tc > 1) { 6118 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6119 return -EOPNOTSUPP; 6120 } 6121 6122 hns3_get_fc_mode(hw, fc_conf->mode); 6123 6124 rte_spinlock_lock(&hw->lock); 6125 ret = hns3_fc_enable(dev, fc_conf); 6126 rte_spinlock_unlock(&hw->lock); 6127 6128 return ret; 6129 } 6130 6131 static int 6132 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6133 struct rte_eth_pfc_conf *pfc_conf) 6134 { 6135 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6136 int ret; 6137 6138 if (!hns3_dev_dcb_supported(hw)) { 6139 hns3_err(hw, "This port does not support dcb configurations."); 6140 return -EOPNOTSUPP; 6141 } 6142 6143 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6144 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6145 hns3_err(hw, "Unsupported flow control settings specified, " 6146 "high_water(%u), low_water(%u), send_xon(%u) and " 6147 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6148 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6149 pfc_conf->fc.send_xon, 6150 pfc_conf->fc.mac_ctrl_frame_fwd); 6151 return -EINVAL; 6152 } 6153 if (pfc_conf->fc.autoneg) { 6154 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6155 return -EINVAL; 6156 } 6157 if (pfc_conf->fc.pause_time == 0) { 6158 hns3_err(hw, "Invalid pause time %u setting.", 6159 pfc_conf->fc.pause_time); 6160 return -EINVAL; 6161 } 6162 6163 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6164 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6165 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6166 "current_fc_status = %d", hw->current_fc_status); 6167 return -EOPNOTSUPP; 6168 } 6169 6170 hns3_get_fc_mode(hw, pfc_conf->fc.mode); 6171 6172 rte_spinlock_lock(&hw->lock); 6173 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6174 rte_spinlock_unlock(&hw->lock); 6175 6176 return ret; 6177 } 6178 6179 static int 6180 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6181 { 6182 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6183 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6184 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6185 int i; 6186 6187 rte_spinlock_lock(&hw->lock); 6188 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 6189 dcb_info->nb_tcs = pf->local_max_tc; 6190 else 6191 dcb_info->nb_tcs = 1; 6192 6193 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6194 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6195 for (i = 0; i < dcb_info->nb_tcs; i++) 6196 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6197 6198 for (i = 0; i < hw->num_tc; i++) { 6199 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6200 dcb_info->tc_queue.tc_txq[0][i].base = 6201 hw->tc_queue[i].tqp_offset; 6202 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6203 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6204 hw->tc_queue[i].tqp_count; 6205 } 6206 rte_spinlock_unlock(&hw->lock); 6207 6208 return 0; 6209 } 6210 6211 static int 6212 hns3_reinit_dev(struct hns3_adapter *hns) 6213 { 6214 struct hns3_hw *hw = &hns->hw; 6215 int ret; 6216 6217 ret = hns3_cmd_init(hw); 6218 if (ret) { 6219 hns3_err(hw, "Failed to init cmd: %d", ret); 6220 return ret; 6221 } 6222 6223 ret = hns3_reset_all_tqps(hns); 6224 if (ret) { 6225 hns3_err(hw, "Failed to reset all queues: %d", ret); 6226 return ret; 6227 } 6228 6229 ret = hns3_init_hardware(hns); 6230 if (ret) { 6231 hns3_err(hw, "Failed to init hardware: %d", ret); 6232 return ret; 6233 } 6234 6235 ret = hns3_enable_hw_error_intr(hns, true); 6236 if (ret) { 6237 hns3_err(hw, "fail to enable hw error interrupts: %d", 6238 ret); 6239 return ret; 6240 } 6241 hns3_info(hw, "Reset done, driver initialization finished."); 6242 6243 return 0; 6244 } 6245 6246 static bool 6247 is_pf_reset_done(struct hns3_hw *hw) 6248 { 6249 uint32_t val, reg, reg_bit; 6250 6251 switch (hw->reset.level) { 6252 case HNS3_IMP_RESET: 6253 reg = HNS3_GLOBAL_RESET_REG; 6254 reg_bit = HNS3_IMP_RESET_BIT; 6255 break; 6256 case HNS3_GLOBAL_RESET: 6257 reg = HNS3_GLOBAL_RESET_REG; 6258 reg_bit = HNS3_GLOBAL_RESET_BIT; 6259 break; 6260 case HNS3_FUNC_RESET: 6261 reg = HNS3_FUN_RST_ING; 6262 reg_bit = HNS3_FUN_RST_ING_B; 6263 break; 6264 case HNS3_FLR_RESET: 6265 default: 6266 hns3_err(hw, "Wait for unsupported reset level: %d", 6267 hw->reset.level); 6268 return true; 6269 } 6270 val = hns3_read_dev(hw, reg); 6271 if (hns3_get_bit(val, reg_bit)) 6272 return false; 6273 else 6274 return true; 6275 } 6276 6277 bool 6278 hns3_is_reset_pending(struct hns3_adapter *hns) 6279 { 6280 struct hns3_hw *hw = &hns->hw; 6281 enum hns3_reset_level reset; 6282 6283 hns3_check_event_cause(hns, NULL); 6284 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6285 6286 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6287 hw->reset.level < reset) { 6288 hns3_warn(hw, "High level reset %d is pending", reset); 6289 return true; 6290 } 6291 reset = hns3_get_reset_level(hns, &hw->reset.request); 6292 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6293 hw->reset.level < reset) { 6294 hns3_warn(hw, "High level reset %d is request", reset); 6295 return true; 6296 } 6297 return false; 6298 } 6299 6300 static int 6301 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6302 { 6303 struct hns3_hw *hw = &hns->hw; 6304 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6305 struct timeval tv; 6306 6307 if (wait_data->result == HNS3_WAIT_SUCCESS) 6308 return 0; 6309 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6310 gettimeofday(&tv, NULL); 6311 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6312 tv.tv_sec, tv.tv_usec); 6313 return -ETIME; 6314 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6315 return -EAGAIN; 6316 6317 wait_data->hns = hns; 6318 wait_data->check_completion = is_pf_reset_done; 6319 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6320 HNS3_RESET_WAIT_MS + get_timeofday_ms(); 6321 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6322 wait_data->count = HNS3_RESET_WAIT_CNT; 6323 wait_data->result = HNS3_WAIT_REQUEST; 6324 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6325 return -EAGAIN; 6326 } 6327 6328 static int 6329 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6330 { 6331 struct hns3_cmd_desc desc; 6332 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6333 6334 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6335 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6336 req->fun_reset_vfid = func_id; 6337 6338 return hns3_cmd_send(hw, &desc, 1); 6339 } 6340 6341 static int 6342 hns3_imp_reset_cmd(struct hns3_hw *hw) 6343 { 6344 struct hns3_cmd_desc desc; 6345 6346 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6347 desc.data[0] = 0xeedd; 6348 6349 return hns3_cmd_send(hw, &desc, 1); 6350 } 6351 6352 static void 6353 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6354 { 6355 struct hns3_hw *hw = &hns->hw; 6356 struct timeval tv; 6357 uint32_t val; 6358 6359 gettimeofday(&tv, NULL); 6360 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6361 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6362 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6363 tv.tv_sec, tv.tv_usec); 6364 return; 6365 } 6366 6367 switch (reset_level) { 6368 case HNS3_IMP_RESET: 6369 hns3_imp_reset_cmd(hw); 6370 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6371 tv.tv_sec, tv.tv_usec); 6372 break; 6373 case HNS3_GLOBAL_RESET: 6374 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6375 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6376 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6377 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6378 tv.tv_sec, tv.tv_usec); 6379 break; 6380 case HNS3_FUNC_RESET: 6381 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6382 tv.tv_sec, tv.tv_usec); 6383 /* schedule again to check later */ 6384 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6385 hns3_schedule_reset(hns); 6386 break; 6387 default: 6388 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6389 return; 6390 } 6391 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6392 } 6393 6394 static enum hns3_reset_level 6395 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6396 { 6397 struct hns3_hw *hw = &hns->hw; 6398 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6399 6400 /* Return the highest priority reset level amongst all */ 6401 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6402 reset_level = HNS3_IMP_RESET; 6403 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6404 reset_level = HNS3_GLOBAL_RESET; 6405 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6406 reset_level = HNS3_FUNC_RESET; 6407 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6408 reset_level = HNS3_FLR_RESET; 6409 6410 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6411 return HNS3_NONE_RESET; 6412 6413 return reset_level; 6414 } 6415 6416 static void 6417 hns3_record_imp_error(struct hns3_adapter *hns) 6418 { 6419 struct hns3_hw *hw = &hns->hw; 6420 uint32_t reg_val; 6421 6422 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6423 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6424 hns3_warn(hw, "Detected IMP RD poison!"); 6425 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6426 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6427 } 6428 6429 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6430 hns3_warn(hw, "Detected IMP CMDQ error!"); 6431 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6432 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6433 } 6434 } 6435 6436 static int 6437 hns3_prepare_reset(struct hns3_adapter *hns) 6438 { 6439 struct hns3_hw *hw = &hns->hw; 6440 uint32_t reg_val; 6441 int ret; 6442 6443 switch (hw->reset.level) { 6444 case HNS3_FUNC_RESET: 6445 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6446 if (ret) 6447 return ret; 6448 6449 /* 6450 * After performaning pf reset, it is not necessary to do the 6451 * mailbox handling or send any command to firmware, because 6452 * any mailbox handling or command to firmware is only valid 6453 * after hns3_cmd_init is called. 6454 */ 6455 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6456 hw->reset.stats.request_cnt++; 6457 break; 6458 case HNS3_IMP_RESET: 6459 hns3_record_imp_error(hns); 6460 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6461 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6462 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6463 break; 6464 default: 6465 break; 6466 } 6467 return 0; 6468 } 6469 6470 static int 6471 hns3_set_rst_done(struct hns3_hw *hw) 6472 { 6473 struct hns3_pf_rst_done_cmd *req; 6474 struct hns3_cmd_desc desc; 6475 6476 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6477 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6478 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6479 return hns3_cmd_send(hw, &desc, 1); 6480 } 6481 6482 static int 6483 hns3_stop_service(struct hns3_adapter *hns) 6484 { 6485 struct hns3_hw *hw = &hns->hw; 6486 struct rte_eth_dev *eth_dev; 6487 6488 eth_dev = &rte_eth_devices[hw->data->port_id]; 6489 hw->mac.link_status = ETH_LINK_DOWN; 6490 if (hw->adapter_state == HNS3_NIC_STARTED) { 6491 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6492 hns3_update_linkstatus_and_event(hw, false); 6493 } 6494 6495 hns3_set_rxtx_function(eth_dev); 6496 rte_wmb(); 6497 /* Disable datapath on secondary process. */ 6498 hns3_mp_req_stop_rxtx(eth_dev); 6499 rte_delay_ms(hw->tqps_num); 6500 6501 rte_spinlock_lock(&hw->lock); 6502 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6503 hw->adapter_state == HNS3_NIC_STOPPING) { 6504 hns3_enable_all_queues(hw, false); 6505 hns3_do_stop(hns); 6506 hw->reset.mbuf_deferred_free = true; 6507 } else 6508 hw->reset.mbuf_deferred_free = false; 6509 6510 /* 6511 * It is cumbersome for hardware to pick-and-choose entries for deletion 6512 * from table space. Hence, for function reset software intervention is 6513 * required to delete the entries 6514 */ 6515 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6516 hns3_configure_all_mc_mac_addr(hns, true); 6517 rte_spinlock_unlock(&hw->lock); 6518 6519 return 0; 6520 } 6521 6522 static int 6523 hns3_start_service(struct hns3_adapter *hns) 6524 { 6525 struct hns3_hw *hw = &hns->hw; 6526 struct rte_eth_dev *eth_dev; 6527 6528 if (hw->reset.level == HNS3_IMP_RESET || 6529 hw->reset.level == HNS3_GLOBAL_RESET) 6530 hns3_set_rst_done(hw); 6531 eth_dev = &rte_eth_devices[hw->data->port_id]; 6532 hns3_set_rxtx_function(eth_dev); 6533 hns3_mp_req_start_rxtx(eth_dev); 6534 if (hw->adapter_state == HNS3_NIC_STARTED) { 6535 /* 6536 * This API parent function already hold the hns3_hw.lock, the 6537 * hns3_service_handler may report lse, in bonding application 6538 * it will call driver's ops which may acquire the hns3_hw.lock 6539 * again, thus lead to deadlock. 6540 * We defer calls hns3_service_handler to avoid the deadlock. 6541 */ 6542 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6543 hns3_service_handler, eth_dev); 6544 6545 /* Enable interrupt of all rx queues before enabling queues */ 6546 hns3_dev_all_rx_queue_intr_enable(hw, true); 6547 /* 6548 * Enable state of each rxq and txq will be recovered after 6549 * reset, so we need to restore them before enable all tqps; 6550 */ 6551 hns3_restore_tqp_enable_state(hw); 6552 /* 6553 * When finished the initialization, enable queues to receive 6554 * and transmit packets. 6555 */ 6556 hns3_enable_all_queues(hw, true); 6557 } 6558 6559 return 0; 6560 } 6561 6562 static int 6563 hns3_restore_conf(struct hns3_adapter *hns) 6564 { 6565 struct hns3_hw *hw = &hns->hw; 6566 int ret; 6567 6568 ret = hns3_configure_all_mac_addr(hns, false); 6569 if (ret) 6570 return ret; 6571 6572 ret = hns3_configure_all_mc_mac_addr(hns, false); 6573 if (ret) 6574 goto err_mc_mac; 6575 6576 ret = hns3_dev_promisc_restore(hns); 6577 if (ret) 6578 goto err_promisc; 6579 6580 ret = hns3_restore_vlan_table(hns); 6581 if (ret) 6582 goto err_promisc; 6583 6584 ret = hns3_restore_vlan_conf(hns); 6585 if (ret) 6586 goto err_promisc; 6587 6588 ret = hns3_restore_all_fdir_filter(hns); 6589 if (ret) 6590 goto err_promisc; 6591 6592 ret = hns3_restore_ptp(hns); 6593 if (ret) 6594 goto err_promisc; 6595 6596 ret = hns3_restore_rx_interrupt(hw); 6597 if (ret) 6598 goto err_promisc; 6599 6600 ret = hns3_restore_gro_conf(hw); 6601 if (ret) 6602 goto err_promisc; 6603 6604 ret = hns3_restore_fec(hw); 6605 if (ret) 6606 goto err_promisc; 6607 6608 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6609 ret = hns3_do_start(hns, false); 6610 if (ret) 6611 goto err_promisc; 6612 hns3_info(hw, "hns3 dev restart successful!"); 6613 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6614 hw->adapter_state = HNS3_NIC_CONFIGURED; 6615 return 0; 6616 6617 err_promisc: 6618 hns3_configure_all_mc_mac_addr(hns, true); 6619 err_mc_mac: 6620 hns3_configure_all_mac_addr(hns, true); 6621 return ret; 6622 } 6623 6624 static void 6625 hns3_reset_service(void *param) 6626 { 6627 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6628 struct hns3_hw *hw = &hns->hw; 6629 enum hns3_reset_level reset_level; 6630 struct timeval tv_delta; 6631 struct timeval tv_start; 6632 struct timeval tv; 6633 uint64_t msec; 6634 int ret; 6635 6636 /* 6637 * The interrupt is not triggered within the delay time. 6638 * The interrupt may have been lost. It is necessary to handle 6639 * the interrupt to recover from the error. 6640 */ 6641 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6642 SCHEDULE_DEFERRED) { 6643 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6644 __ATOMIC_RELAXED); 6645 hns3_err(hw, "Handling interrupts in delayed tasks"); 6646 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6647 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6648 if (reset_level == HNS3_NONE_RESET) { 6649 hns3_err(hw, "No reset level is set, try IMP reset"); 6650 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6651 } 6652 } 6653 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6654 6655 /* 6656 * Check if there is any ongoing reset in the hardware. This status can 6657 * be checked from reset_pending. If there is then, we need to wait for 6658 * hardware to complete reset. 6659 * a. If we are able to figure out in reasonable time that hardware 6660 * has fully resetted then, we can proceed with driver, client 6661 * reset. 6662 * b. else, we can come back later to check this status so re-sched 6663 * now. 6664 */ 6665 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6666 if (reset_level != HNS3_NONE_RESET) { 6667 gettimeofday(&tv_start, NULL); 6668 ret = hns3_reset_process(hns, reset_level); 6669 gettimeofday(&tv, NULL); 6670 timersub(&tv, &tv_start, &tv_delta); 6671 msec = tv_delta.tv_sec * MSEC_PER_SEC + 6672 tv_delta.tv_usec / USEC_PER_MSEC; 6673 if (msec > HNS3_RESET_PROCESS_MS) 6674 hns3_err(hw, "%d handle long time delta %" PRIx64 6675 " ms time=%ld.%.6ld", 6676 hw->reset.level, msec, 6677 tv.tv_sec, tv.tv_usec); 6678 if (ret == -EAGAIN) 6679 return; 6680 } 6681 6682 /* Check if we got any *new* reset requests to be honored */ 6683 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6684 if (reset_level != HNS3_NONE_RESET) 6685 hns3_msix_process(hns, reset_level); 6686 } 6687 6688 static unsigned int 6689 hns3_get_speed_capa_num(uint16_t device_id) 6690 { 6691 unsigned int num; 6692 6693 switch (device_id) { 6694 case HNS3_DEV_ID_25GE: 6695 case HNS3_DEV_ID_25GE_RDMA: 6696 num = 2; 6697 break; 6698 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6699 case HNS3_DEV_ID_200G_RDMA: 6700 num = 1; 6701 break; 6702 default: 6703 num = 0; 6704 break; 6705 } 6706 6707 return num; 6708 } 6709 6710 static int 6711 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6712 uint16_t device_id) 6713 { 6714 switch (device_id) { 6715 case HNS3_DEV_ID_25GE: 6716 /* fallthrough */ 6717 case HNS3_DEV_ID_25GE_RDMA: 6718 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6719 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6720 6721 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6722 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6723 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6724 break; 6725 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6726 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6727 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6728 break; 6729 case HNS3_DEV_ID_200G_RDMA: 6730 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6731 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6732 break; 6733 default: 6734 return -ENOTSUP; 6735 } 6736 6737 return 0; 6738 } 6739 6740 static int 6741 hns3_fec_get_capability(struct rte_eth_dev *dev, 6742 struct rte_eth_fec_capa *speed_fec_capa, 6743 unsigned int num) 6744 { 6745 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6746 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6747 uint16_t device_id = pci_dev->id.device_id; 6748 unsigned int capa_num; 6749 int ret; 6750 6751 capa_num = hns3_get_speed_capa_num(device_id); 6752 if (capa_num == 0) { 6753 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6754 device_id); 6755 return -ENOTSUP; 6756 } 6757 6758 if (speed_fec_capa == NULL || num < capa_num) 6759 return capa_num; 6760 6761 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6762 if (ret) 6763 return -ENOTSUP; 6764 6765 return capa_num; 6766 } 6767 6768 static int 6769 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6770 { 6771 struct hns3_config_fec_cmd *req; 6772 struct hns3_cmd_desc desc; 6773 int ret; 6774 6775 /* 6776 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6777 * in device of link speed 6778 * below 10 Gbps. 6779 */ 6780 if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { 6781 *state = 0; 6782 return 0; 6783 } 6784 6785 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6786 req = (struct hns3_config_fec_cmd *)desc.data; 6787 ret = hns3_cmd_send(hw, &desc, 1); 6788 if (ret) { 6789 hns3_err(hw, "get current fec auto state failed, ret = %d", 6790 ret); 6791 return ret; 6792 } 6793 6794 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6795 return 0; 6796 } 6797 6798 static int 6799 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6800 { 6801 struct hns3_sfp_info_cmd *resp; 6802 uint32_t tmp_fec_capa; 6803 uint8_t auto_state; 6804 struct hns3_cmd_desc desc; 6805 int ret; 6806 6807 /* 6808 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6809 * configured FEC mode is returned. 6810 * If link is up, current FEC mode is returned. 6811 */ 6812 if (hw->mac.link_status == ETH_LINK_DOWN) { 6813 ret = get_current_fec_auto_state(hw, &auto_state); 6814 if (ret) 6815 return ret; 6816 6817 if (auto_state == 0x1) { 6818 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6819 return 0; 6820 } 6821 } 6822 6823 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6824 resp = (struct hns3_sfp_info_cmd *)desc.data; 6825 resp->query_type = HNS3_ACTIVE_QUERY; 6826 6827 ret = hns3_cmd_send(hw, &desc, 1); 6828 if (ret == -EOPNOTSUPP) { 6829 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6830 return ret; 6831 } else if (ret) { 6832 hns3_err(hw, "get FEC failed, ret = %d", ret); 6833 return ret; 6834 } 6835 6836 /* 6837 * FEC mode order defined in hns3 hardware is inconsistend with 6838 * that defined in the ethdev library. So the sequence needs 6839 * to be converted. 6840 */ 6841 switch (resp->active_fec) { 6842 case HNS3_HW_FEC_MODE_NOFEC: 6843 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6844 break; 6845 case HNS3_HW_FEC_MODE_BASER: 6846 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6847 break; 6848 case HNS3_HW_FEC_MODE_RS: 6849 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6850 break; 6851 default: 6852 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6853 break; 6854 } 6855 6856 *fec_capa = tmp_fec_capa; 6857 return 0; 6858 } 6859 6860 static int 6861 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6862 { 6863 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6864 6865 return hns3_fec_get_internal(hw, fec_capa); 6866 } 6867 6868 static int 6869 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6870 { 6871 struct hns3_config_fec_cmd *req; 6872 struct hns3_cmd_desc desc; 6873 int ret; 6874 6875 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6876 6877 req = (struct hns3_config_fec_cmd *)desc.data; 6878 switch (mode) { 6879 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6880 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6881 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6882 break; 6883 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6884 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6885 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6886 break; 6887 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6888 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6889 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6890 break; 6891 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6892 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6893 break; 6894 default: 6895 return 0; 6896 } 6897 ret = hns3_cmd_send(hw, &desc, 1); 6898 if (ret) 6899 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6900 6901 return ret; 6902 } 6903 6904 static uint32_t 6905 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6906 { 6907 struct hns3_mac *mac = &hw->mac; 6908 uint32_t cur_capa; 6909 6910 switch (mac->link_speed) { 6911 case ETH_SPEED_NUM_10G: 6912 cur_capa = fec_capa[1].capa; 6913 break; 6914 case ETH_SPEED_NUM_25G: 6915 case ETH_SPEED_NUM_100G: 6916 case ETH_SPEED_NUM_200G: 6917 cur_capa = fec_capa[0].capa; 6918 break; 6919 default: 6920 cur_capa = 0; 6921 break; 6922 } 6923 6924 return cur_capa; 6925 } 6926 6927 static bool 6928 is_fec_mode_one_bit_set(uint32_t mode) 6929 { 6930 int cnt = 0; 6931 uint8_t i; 6932 6933 for (i = 0; i < sizeof(mode); i++) 6934 if (mode >> i & 0x1) 6935 cnt++; 6936 6937 return cnt == 1 ? true : false; 6938 } 6939 6940 static int 6941 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6942 { 6943 #define FEC_CAPA_NUM 2 6944 struct hns3_adapter *hns = dev->data->dev_private; 6945 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6946 struct hns3_pf *pf = &hns->pf; 6947 6948 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6949 uint32_t cur_capa; 6950 uint32_t num = FEC_CAPA_NUM; 6951 int ret; 6952 6953 ret = hns3_fec_get_capability(dev, fec_capa, num); 6954 if (ret < 0) 6955 return ret; 6956 6957 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 6958 if (!is_fec_mode_one_bit_set(mode)) 6959 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD," 6960 "FEC mode should be only one bit set", mode); 6961 6962 /* 6963 * Check whether the configured mode is within the FEC capability. 6964 * If not, the configured mode will not be supported. 6965 */ 6966 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 6967 if (!(cur_capa & mode)) { 6968 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 6969 return -EINVAL; 6970 } 6971 6972 rte_spinlock_lock(&hw->lock); 6973 ret = hns3_set_fec_hw(hw, mode); 6974 if (ret) { 6975 rte_spinlock_unlock(&hw->lock); 6976 return ret; 6977 } 6978 6979 pf->fec_mode = mode; 6980 rte_spinlock_unlock(&hw->lock); 6981 6982 return 0; 6983 } 6984 6985 static int 6986 hns3_restore_fec(struct hns3_hw *hw) 6987 { 6988 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 6989 struct hns3_pf *pf = &hns->pf; 6990 uint32_t mode = pf->fec_mode; 6991 int ret; 6992 6993 ret = hns3_set_fec_hw(hw, mode); 6994 if (ret) 6995 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 6996 mode, ret); 6997 6998 return ret; 6999 } 7000 7001 static int 7002 hns3_query_dev_fec_info(struct hns3_hw *hw) 7003 { 7004 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7005 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 7006 int ret; 7007 7008 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 7009 if (ret) 7010 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 7011 7012 return ret; 7013 } 7014 7015 static bool 7016 hns3_optical_module_existed(struct hns3_hw *hw) 7017 { 7018 struct hns3_cmd_desc desc; 7019 bool existed; 7020 int ret; 7021 7022 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 7023 ret = hns3_cmd_send(hw, &desc, 1); 7024 if (ret) { 7025 hns3_err(hw, 7026 "fail to get optical module exist state, ret = %d.\n", 7027 ret); 7028 return false; 7029 } 7030 existed = !!desc.data[0]; 7031 7032 return existed; 7033 } 7034 7035 static int 7036 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 7037 uint32_t len, uint8_t *data) 7038 { 7039 #define HNS3_SFP_INFO_CMD_NUM 6 7040 #define HNS3_SFP_INFO_MAX_LEN \ 7041 (HNS3_SFP_INFO_BD0_LEN + \ 7042 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 7043 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 7044 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 7045 uint16_t read_len; 7046 uint16_t copy_len; 7047 int ret; 7048 int i; 7049 7050 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7051 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 7052 true); 7053 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 7054 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 7055 } 7056 7057 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7058 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7059 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7060 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7061 7062 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7063 if (ret) { 7064 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7065 ret); 7066 return ret; 7067 } 7068 7069 /* The data format in BD0 is different with the others. */ 7070 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7071 memcpy(data, sfp_info_bd0->data, copy_len); 7072 read_len = copy_len; 7073 7074 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7075 if (read_len >= len) 7076 break; 7077 7078 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7079 memcpy(data + read_len, desc[i].data, copy_len); 7080 read_len += copy_len; 7081 } 7082 7083 return (int)read_len; 7084 } 7085 7086 static int 7087 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7088 struct rte_dev_eeprom_info *info) 7089 { 7090 struct hns3_adapter *hns = dev->data->dev_private; 7091 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7092 uint32_t offset = info->offset; 7093 uint32_t len = info->length; 7094 uint8_t *data = info->data; 7095 uint32_t read_len = 0; 7096 7097 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7098 return -ENOTSUP; 7099 7100 if (!hns3_optical_module_existed(hw)) { 7101 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7102 return -EIO; 7103 } 7104 7105 while (read_len < len) { 7106 int ret; 7107 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7108 len - read_len, 7109 data + read_len); 7110 if (ret < 0) 7111 return -EIO; 7112 read_len += ret; 7113 } 7114 7115 return 0; 7116 } 7117 7118 static int 7119 hns3_get_module_info(struct rte_eth_dev *dev, 7120 struct rte_eth_dev_module_info *modinfo) 7121 { 7122 #define HNS3_SFF8024_ID_SFP 0x03 7123 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7124 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7125 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7126 #define HNS3_SFF_8636_V1_3 0x03 7127 struct hns3_adapter *hns = dev->data->dev_private; 7128 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7129 struct rte_dev_eeprom_info info; 7130 struct hns3_sfp_type sfp_type; 7131 int ret; 7132 7133 memset(&sfp_type, 0, sizeof(sfp_type)); 7134 memset(&info, 0, sizeof(info)); 7135 info.data = (uint8_t *)&sfp_type; 7136 info.length = sizeof(sfp_type); 7137 ret = hns3_get_module_eeprom(dev, &info); 7138 if (ret) 7139 return ret; 7140 7141 switch (sfp_type.type) { 7142 case HNS3_SFF8024_ID_SFP: 7143 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7144 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7145 break; 7146 case HNS3_SFF8024_ID_QSFP_8438: 7147 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7148 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7149 break; 7150 case HNS3_SFF8024_ID_QSFP_8436_8636: 7151 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7152 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7153 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7154 } else { 7155 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7156 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7157 } 7158 break; 7159 case HNS3_SFF8024_ID_QSFP28_8636: 7160 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7161 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7162 break; 7163 default: 7164 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7165 sfp_type.type, sfp_type.ext_type); 7166 return -EINVAL; 7167 } 7168 7169 return 0; 7170 } 7171 7172 static int 7173 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7174 { 7175 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7176 7177 RTE_SET_USED(key); 7178 7179 if (strcmp(value, "vec") == 0) 7180 hint = HNS3_IO_FUNC_HINT_VEC; 7181 else if (strcmp(value, "sve") == 0) 7182 hint = HNS3_IO_FUNC_HINT_SVE; 7183 else if (strcmp(value, "simple") == 0) 7184 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7185 else if (strcmp(value, "common") == 0) 7186 hint = HNS3_IO_FUNC_HINT_COMMON; 7187 7188 /* If the hint is valid then update output parameters */ 7189 if (hint != HNS3_IO_FUNC_HINT_NONE) 7190 *(uint32_t *)extra_args = hint; 7191 7192 return 0; 7193 } 7194 7195 static const char * 7196 hns3_get_io_hint_func_name(uint32_t hint) 7197 { 7198 switch (hint) { 7199 case HNS3_IO_FUNC_HINT_VEC: 7200 return "vec"; 7201 case HNS3_IO_FUNC_HINT_SVE: 7202 return "sve"; 7203 case HNS3_IO_FUNC_HINT_SIMPLE: 7204 return "simple"; 7205 case HNS3_IO_FUNC_HINT_COMMON: 7206 return "common"; 7207 default: 7208 return "none"; 7209 } 7210 } 7211 7212 void 7213 hns3_parse_devargs(struct rte_eth_dev *dev) 7214 { 7215 struct hns3_adapter *hns = dev->data->dev_private; 7216 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7217 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7218 struct hns3_hw *hw = &hns->hw; 7219 struct rte_kvargs *kvlist; 7220 7221 if (dev->device->devargs == NULL) 7222 return; 7223 7224 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7225 if (!kvlist) 7226 return; 7227 7228 rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7229 &hns3_parse_io_hint_func, &rx_func_hint); 7230 rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7231 &hns3_parse_io_hint_func, &tx_func_hint); 7232 rte_kvargs_free(kvlist); 7233 7234 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7235 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7236 hns3_get_io_hint_func_name(rx_func_hint)); 7237 hns->rx_func_hint = rx_func_hint; 7238 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7239 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7240 hns3_get_io_hint_func_name(tx_func_hint)); 7241 hns->tx_func_hint = tx_func_hint; 7242 } 7243 7244 static const struct eth_dev_ops hns3_eth_dev_ops = { 7245 .dev_configure = hns3_dev_configure, 7246 .dev_start = hns3_dev_start, 7247 .dev_stop = hns3_dev_stop, 7248 .dev_close = hns3_dev_close, 7249 .promiscuous_enable = hns3_dev_promiscuous_enable, 7250 .promiscuous_disable = hns3_dev_promiscuous_disable, 7251 .allmulticast_enable = hns3_dev_allmulticast_enable, 7252 .allmulticast_disable = hns3_dev_allmulticast_disable, 7253 .mtu_set = hns3_dev_mtu_set, 7254 .stats_get = hns3_stats_get, 7255 .stats_reset = hns3_stats_reset, 7256 .xstats_get = hns3_dev_xstats_get, 7257 .xstats_get_names = hns3_dev_xstats_get_names, 7258 .xstats_reset = hns3_dev_xstats_reset, 7259 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7260 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7261 .dev_infos_get = hns3_dev_infos_get, 7262 .fw_version_get = hns3_fw_version_get, 7263 .rx_queue_setup = hns3_rx_queue_setup, 7264 .tx_queue_setup = hns3_tx_queue_setup, 7265 .rx_queue_release = hns3_dev_rx_queue_release, 7266 .tx_queue_release = hns3_dev_tx_queue_release, 7267 .rx_queue_start = hns3_dev_rx_queue_start, 7268 .rx_queue_stop = hns3_dev_rx_queue_stop, 7269 .tx_queue_start = hns3_dev_tx_queue_start, 7270 .tx_queue_stop = hns3_dev_tx_queue_stop, 7271 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7272 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7273 .rxq_info_get = hns3_rxq_info_get, 7274 .txq_info_get = hns3_txq_info_get, 7275 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7276 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7277 .flow_ctrl_get = hns3_flow_ctrl_get, 7278 .flow_ctrl_set = hns3_flow_ctrl_set, 7279 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7280 .mac_addr_add = hns3_add_mac_addr, 7281 .mac_addr_remove = hns3_remove_mac_addr, 7282 .mac_addr_set = hns3_set_default_mac_addr, 7283 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7284 .link_update = hns3_dev_link_update, 7285 .rss_hash_update = hns3_dev_rss_hash_update, 7286 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7287 .reta_update = hns3_dev_rss_reta_update, 7288 .reta_query = hns3_dev_rss_reta_query, 7289 .flow_ops_get = hns3_dev_flow_ops_get, 7290 .vlan_filter_set = hns3_vlan_filter_set, 7291 .vlan_tpid_set = hns3_vlan_tpid_set, 7292 .vlan_offload_set = hns3_vlan_offload_set, 7293 .vlan_pvid_set = hns3_vlan_pvid_set, 7294 .get_reg = hns3_get_regs, 7295 .get_module_info = hns3_get_module_info, 7296 .get_module_eeprom = hns3_get_module_eeprom, 7297 .get_dcb_info = hns3_get_dcb_info, 7298 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7299 .fec_get_capability = hns3_fec_get_capability, 7300 .fec_get = hns3_fec_get, 7301 .fec_set = hns3_fec_set, 7302 .tm_ops_get = hns3_tm_ops_get, 7303 .tx_done_cleanup = hns3_tx_done_cleanup, 7304 .timesync_enable = hns3_timesync_enable, 7305 .timesync_disable = hns3_timesync_disable, 7306 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7307 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7308 .timesync_adjust_time = hns3_timesync_adjust_time, 7309 .timesync_read_time = hns3_timesync_read_time, 7310 .timesync_write_time = hns3_timesync_write_time, 7311 }; 7312 7313 static const struct hns3_reset_ops hns3_reset_ops = { 7314 .reset_service = hns3_reset_service, 7315 .stop_service = hns3_stop_service, 7316 .prepare_reset = hns3_prepare_reset, 7317 .wait_hardware_ready = hns3_wait_hardware_ready, 7318 .reinit_dev = hns3_reinit_dev, 7319 .restore_conf = hns3_restore_conf, 7320 .start_service = hns3_start_service, 7321 }; 7322 7323 static int 7324 hns3_dev_init(struct rte_eth_dev *eth_dev) 7325 { 7326 struct hns3_adapter *hns = eth_dev->data->dev_private; 7327 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7328 struct rte_ether_addr *eth_addr; 7329 struct hns3_hw *hw = &hns->hw; 7330 int ret; 7331 7332 PMD_INIT_FUNC_TRACE(); 7333 7334 eth_dev->process_private = (struct hns3_process_private *) 7335 rte_zmalloc_socket("hns3_filter_list", 7336 sizeof(struct hns3_process_private), 7337 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 7338 if (eth_dev->process_private == NULL) { 7339 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 7340 return -ENOMEM; 7341 } 7342 /* initialize flow filter lists */ 7343 hns3_filterlist_init(eth_dev); 7344 7345 hns3_set_rxtx_function(eth_dev); 7346 eth_dev->dev_ops = &hns3_eth_dev_ops; 7347 eth_dev->rx_queue_count = hns3_rx_queue_count; 7348 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7349 ret = hns3_mp_init_secondary(); 7350 if (ret) { 7351 PMD_INIT_LOG(ERR, "Failed to init for secondary " 7352 "process, ret = %d", ret); 7353 goto err_mp_init_secondary; 7354 } 7355 7356 hw->secondary_cnt++; 7357 return 0; 7358 } 7359 7360 ret = hns3_mp_init_primary(); 7361 if (ret) { 7362 PMD_INIT_LOG(ERR, 7363 "Failed to init for primary process, ret = %d", 7364 ret); 7365 goto err_mp_init_primary; 7366 } 7367 7368 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7369 hns->is_vf = false; 7370 hw->data = eth_dev->data; 7371 hns3_parse_devargs(eth_dev); 7372 7373 /* 7374 * Set default max packet size according to the mtu 7375 * default vale in DPDK frame. 7376 */ 7377 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7378 7379 ret = hns3_reset_init(hw); 7380 if (ret) 7381 goto err_init_reset; 7382 hw->reset.ops = &hns3_reset_ops; 7383 7384 ret = hns3_init_pf(eth_dev); 7385 if (ret) { 7386 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7387 goto err_init_pf; 7388 } 7389 7390 /* Allocate memory for storing MAC addresses */ 7391 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7392 sizeof(struct rte_ether_addr) * 7393 HNS3_UC_MACADDR_NUM, 0); 7394 if (eth_dev->data->mac_addrs == NULL) { 7395 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7396 "to store MAC addresses", 7397 sizeof(struct rte_ether_addr) * 7398 HNS3_UC_MACADDR_NUM); 7399 ret = -ENOMEM; 7400 goto err_rte_zmalloc; 7401 } 7402 7403 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7404 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7405 rte_eth_random_addr(hw->mac.mac_addr); 7406 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7407 (struct rte_ether_addr *)hw->mac.mac_addr); 7408 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7409 "unicast address, using random MAC address %s", 7410 mac_str); 7411 } 7412 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7413 ð_dev->data->mac_addrs[0]); 7414 7415 hw->adapter_state = HNS3_NIC_INITIALIZED; 7416 7417 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7418 SCHEDULE_PENDING) { 7419 hns3_err(hw, "Reschedule reset service after dev_init"); 7420 hns3_schedule_reset(hns); 7421 } else { 7422 /* IMP will wait ready flag before reset */ 7423 hns3_notify_reset_ready(hw, false); 7424 } 7425 7426 hns3_info(hw, "hns3 dev initialization successful!"); 7427 return 0; 7428 7429 err_rte_zmalloc: 7430 hns3_uninit_pf(eth_dev); 7431 7432 err_init_pf: 7433 rte_free(hw->reset.wait_data); 7434 7435 err_init_reset: 7436 hns3_mp_uninit_primary(); 7437 7438 err_mp_init_primary: 7439 err_mp_init_secondary: 7440 eth_dev->dev_ops = NULL; 7441 eth_dev->rx_pkt_burst = NULL; 7442 eth_dev->rx_descriptor_status = NULL; 7443 eth_dev->tx_pkt_burst = NULL; 7444 eth_dev->tx_pkt_prepare = NULL; 7445 eth_dev->tx_descriptor_status = NULL; 7446 rte_free(eth_dev->process_private); 7447 eth_dev->process_private = NULL; 7448 return ret; 7449 } 7450 7451 static int 7452 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7453 { 7454 struct hns3_adapter *hns = eth_dev->data->dev_private; 7455 struct hns3_hw *hw = &hns->hw; 7456 7457 PMD_INIT_FUNC_TRACE(); 7458 7459 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7460 rte_free(eth_dev->process_private); 7461 eth_dev->process_private = NULL; 7462 return 0; 7463 } 7464 7465 if (hw->adapter_state < HNS3_NIC_CLOSING) 7466 hns3_dev_close(eth_dev); 7467 7468 hw->adapter_state = HNS3_NIC_REMOVED; 7469 return 0; 7470 } 7471 7472 static int 7473 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7474 struct rte_pci_device *pci_dev) 7475 { 7476 return rte_eth_dev_pci_generic_probe(pci_dev, 7477 sizeof(struct hns3_adapter), 7478 hns3_dev_init); 7479 } 7480 7481 static int 7482 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7483 { 7484 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7485 } 7486 7487 static const struct rte_pci_id pci_id_hns3_map[] = { 7488 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7489 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7490 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7491 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7492 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7493 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7494 { .vendor_id = 0, }, /* sentinel */ 7495 }; 7496 7497 static struct rte_pci_driver rte_hns3_pmd = { 7498 .id_table = pci_id_hns3_map, 7499 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7500 .probe = eth_hns3_pci_probe, 7501 .remove = eth_hns3_pci_remove, 7502 }; 7503 7504 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7505 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7506 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7507 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7508 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7509 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "); 7510 RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); 7511 RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE); 7512