1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 107 108 void hns3_ether_format_addr(char *buf, uint16_t size, 109 const struct rte_ether_addr *ether_addr) 110 { 111 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 112 ether_addr->addr_bytes[0], 113 ether_addr->addr_bytes[4], 114 ether_addr->addr_bytes[5]); 115 } 116 117 static void 118 hns3_pf_disable_irq0(struct hns3_hw *hw) 119 { 120 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 121 } 122 123 static void 124 hns3_pf_enable_irq0(struct hns3_hw *hw) 125 { 126 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 127 } 128 129 static enum hns3_evt_cause 130 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 131 uint32_t *vec_val) 132 { 133 struct hns3_hw *hw = &hns->hw; 134 135 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 136 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 137 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 138 if (!is_delay) { 139 hw->reset.stats.imp_cnt++; 140 hns3_warn(hw, "IMP reset detected, clear reset status"); 141 } else { 142 hns3_schedule_delayed_reset(hns); 143 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 144 } 145 146 return HNS3_VECTOR0_EVENT_RST; 147 } 148 149 static enum hns3_evt_cause 150 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 151 uint32_t *vec_val) 152 { 153 struct hns3_hw *hw = &hns->hw; 154 155 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 156 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 157 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 158 if (!is_delay) { 159 hw->reset.stats.global_cnt++; 160 hns3_warn(hw, "Global reset detected, clear reset status"); 161 } else { 162 hns3_schedule_delayed_reset(hns); 163 hns3_warn(hw, 164 "Global reset detected, don't clear reset status"); 165 } 166 167 return HNS3_VECTOR0_EVENT_RST; 168 } 169 170 static enum hns3_evt_cause 171 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 172 { 173 struct hns3_hw *hw = &hns->hw; 174 uint32_t vector0_int_stats; 175 uint32_t cmdq_src_val; 176 uint32_t hw_err_src_reg; 177 uint32_t val; 178 enum hns3_evt_cause ret; 179 bool is_delay; 180 181 /* fetch the events from their corresponding regs */ 182 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 183 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 184 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 185 186 is_delay = clearval == NULL ? true : false; 187 /* 188 * Assumption: If by any chance reset and mailbox events are reported 189 * together then we will only process reset event and defer the 190 * processing of the mailbox events. Since, we would have not cleared 191 * RX CMDQ event this time we would receive again another interrupt 192 * from H/W just for the mailbox. 193 */ 194 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 195 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 196 goto out; 197 } 198 199 /* Global reset */ 200 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 201 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 202 goto out; 203 } 204 205 /* Check for vector0 1588 event source */ 206 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 207 val = BIT(HNS3_VECTOR0_1588_INT_B); 208 ret = HNS3_VECTOR0_EVENT_PTP; 209 goto out; 210 } 211 212 /* check for vector0 msix event source */ 213 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 214 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 215 val = vector0_int_stats | hw_err_src_reg; 216 ret = HNS3_VECTOR0_EVENT_ERR; 217 goto out; 218 } 219 220 /* check for vector0 mailbox(=CMDQ RX) event source */ 221 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 222 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 223 val = cmdq_src_val; 224 ret = HNS3_VECTOR0_EVENT_MBX; 225 goto out; 226 } 227 228 val = vector0_int_stats; 229 ret = HNS3_VECTOR0_EVENT_OTHER; 230 out: 231 232 if (clearval) 233 *clearval = val; 234 return ret; 235 } 236 237 static bool 238 hns3_is_1588_event_type(uint32_t event_type) 239 { 240 return (event_type == HNS3_VECTOR0_EVENT_PTP); 241 } 242 243 static void 244 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 245 { 246 if (event_type == HNS3_VECTOR0_EVENT_RST || 247 hns3_is_1588_event_type(event_type)) 248 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 249 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 250 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 251 } 252 253 static void 254 hns3_clear_all_event_cause(struct hns3_hw *hw) 255 { 256 uint32_t vector0_int_stats; 257 258 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 259 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 260 hns3_warn(hw, "Probe during IMP reset interrupt"); 261 262 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 263 hns3_warn(hw, "Probe during Global reset interrupt"); 264 265 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 266 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 267 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 268 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 269 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 270 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 271 BIT(HNS3_VECTOR0_1588_INT_B)); 272 } 273 274 static void 275 hns3_handle_mac_tnl(struct hns3_hw *hw) 276 { 277 struct hns3_cmd_desc desc; 278 uint32_t status; 279 int ret; 280 281 /* query and clear mac tnl interrupt */ 282 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 283 ret = hns3_cmd_send(hw, &desc, 1); 284 if (ret) { 285 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 286 return; 287 } 288 289 status = rte_le_to_cpu_32(desc.data[0]); 290 if (status) { 291 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 292 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 293 false); 294 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 295 ret = hns3_cmd_send(hw, &desc, 1); 296 if (ret) 297 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 298 ret); 299 } 300 } 301 302 static void 303 hns3_interrupt_handler(void *param) 304 { 305 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 306 struct hns3_adapter *hns = dev->data->dev_private; 307 struct hns3_hw *hw = &hns->hw; 308 enum hns3_evt_cause event_cause; 309 uint32_t clearval = 0; 310 uint32_t vector0_int; 311 uint32_t ras_int; 312 uint32_t cmdq_int; 313 314 /* Disable interrupt */ 315 hns3_pf_disable_irq0(hw); 316 317 event_cause = hns3_check_event_cause(hns, &clearval); 318 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 319 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 320 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 321 hns3_clear_event_cause(hw, event_cause, clearval); 322 /* vector 0 interrupt is shared with reset and mailbox source events. */ 323 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 324 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 325 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 326 vector0_int, ras_int, cmdq_int); 327 hns3_handle_mac_tnl(hw); 328 hns3_handle_error(hns); 329 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 330 hns3_warn(hw, "received reset interrupt"); 331 hns3_schedule_reset(hns); 332 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 333 hns3_dev_handle_mbx_msg(hw); 334 } else { 335 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 336 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 337 vector0_int, ras_int, cmdq_int); 338 } 339 340 /* Enable interrupt if it is not cause by reset */ 341 hns3_pf_enable_irq0(hw); 342 } 343 344 static int 345 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 346 { 347 #define HNS3_VLAN_ID_OFFSET_STEP 160 348 #define HNS3_VLAN_BYTE_SIZE 8 349 struct hns3_vlan_filter_pf_cfg_cmd *req; 350 struct hns3_hw *hw = &hns->hw; 351 uint8_t vlan_offset_byte_val; 352 struct hns3_cmd_desc desc; 353 uint8_t vlan_offset_byte; 354 uint8_t vlan_offset_base; 355 int ret; 356 357 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 358 359 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 360 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 361 HNS3_VLAN_BYTE_SIZE; 362 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 363 364 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 365 req->vlan_offset = vlan_offset_base; 366 req->vlan_cfg = on ? 0 : 1; 367 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 368 369 ret = hns3_cmd_send(hw, &desc, 1); 370 if (ret) 371 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 372 vlan_id, ret); 373 374 return ret; 375 } 376 377 static void 378 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 379 { 380 struct hns3_user_vlan_table *vlan_entry; 381 struct hns3_pf *pf = &hns->pf; 382 383 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 384 if (vlan_entry->vlan_id == vlan_id) { 385 if (vlan_entry->hd_tbl_status) 386 hns3_set_port_vlan_filter(hns, vlan_id, 0); 387 LIST_REMOVE(vlan_entry, next); 388 rte_free(vlan_entry); 389 break; 390 } 391 } 392 } 393 394 static void 395 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 396 bool writen_to_tbl) 397 { 398 struct hns3_user_vlan_table *vlan_entry; 399 struct hns3_hw *hw = &hns->hw; 400 struct hns3_pf *pf = &hns->pf; 401 402 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 403 if (vlan_entry->vlan_id == vlan_id) 404 return; 405 } 406 407 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 408 if (vlan_entry == NULL) { 409 hns3_err(hw, "Failed to malloc hns3 vlan table"); 410 return; 411 } 412 413 vlan_entry->hd_tbl_status = writen_to_tbl; 414 vlan_entry->vlan_id = vlan_id; 415 416 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 417 } 418 419 static int 420 hns3_restore_vlan_table(struct hns3_adapter *hns) 421 { 422 struct hns3_user_vlan_table *vlan_entry; 423 struct hns3_hw *hw = &hns->hw; 424 struct hns3_pf *pf = &hns->pf; 425 uint16_t vlan_id; 426 int ret = 0; 427 428 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 429 return hns3_vlan_pvid_configure(hns, 430 hw->port_base_vlan_cfg.pvid, 1); 431 432 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 433 if (vlan_entry->hd_tbl_status) { 434 vlan_id = vlan_entry->vlan_id; 435 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 436 if (ret) 437 break; 438 } 439 } 440 441 return ret; 442 } 443 444 static int 445 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 446 { 447 struct hns3_hw *hw = &hns->hw; 448 bool writen_to_tbl = false; 449 int ret = 0; 450 451 /* 452 * When vlan filter is enabled, hardware regards packets without vlan 453 * as packets with vlan 0. So, to receive packets without vlan, vlan id 454 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 455 */ 456 if (on == 0 && vlan_id == 0) 457 return 0; 458 459 /* 460 * When port base vlan enabled, we use port base vlan as the vlan 461 * filter condition. In this case, we don't update vlan filter table 462 * when user add new vlan or remove exist vlan, just update the 463 * vlan list. The vlan id in vlan list will be written in vlan filter 464 * table until port base vlan disabled 465 */ 466 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 467 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 468 writen_to_tbl = true; 469 } 470 471 if (ret == 0) { 472 if (on) 473 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 474 else 475 hns3_rm_dev_vlan_table(hns, vlan_id); 476 } 477 return ret; 478 } 479 480 static int 481 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 482 { 483 struct hns3_adapter *hns = dev->data->dev_private; 484 struct hns3_hw *hw = &hns->hw; 485 int ret; 486 487 rte_spinlock_lock(&hw->lock); 488 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 489 rte_spinlock_unlock(&hw->lock); 490 return ret; 491 } 492 493 static int 494 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 495 uint16_t tpid) 496 { 497 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 498 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 499 struct hns3_hw *hw = &hns->hw; 500 struct hns3_cmd_desc desc; 501 int ret; 502 503 if ((vlan_type != ETH_VLAN_TYPE_INNER && 504 vlan_type != ETH_VLAN_TYPE_OUTER)) { 505 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 506 return -EINVAL; 507 } 508 509 if (tpid != RTE_ETHER_TYPE_VLAN) { 510 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 511 return -EINVAL; 512 } 513 514 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 515 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 516 517 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 518 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 519 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 520 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 521 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 522 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 524 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 525 } 526 527 ret = hns3_cmd_send(hw, &desc, 1); 528 if (ret) { 529 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 530 ret); 531 return ret; 532 } 533 534 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 535 536 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 537 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 538 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 539 540 ret = hns3_cmd_send(hw, &desc, 1); 541 if (ret) 542 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 543 ret); 544 return ret; 545 } 546 547 static int 548 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 549 uint16_t tpid) 550 { 551 struct hns3_adapter *hns = dev->data->dev_private; 552 struct hns3_hw *hw = &hns->hw; 553 int ret; 554 555 rte_spinlock_lock(&hw->lock); 556 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 557 rte_spinlock_unlock(&hw->lock); 558 return ret; 559 } 560 561 static int 562 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 563 struct hns3_rx_vtag_cfg *vcfg) 564 { 565 struct hns3_vport_vtag_rx_cfg_cmd *req; 566 struct hns3_hw *hw = &hns->hw; 567 struct hns3_cmd_desc desc; 568 uint16_t vport_id; 569 uint8_t bitmap; 570 int ret; 571 572 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 573 574 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 575 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 576 vcfg->strip_tag1_en ? 1 : 0); 577 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 578 vcfg->strip_tag2_en ? 1 : 0); 579 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 580 vcfg->vlan1_vlan_prionly ? 1 : 0); 581 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 582 vcfg->vlan2_vlan_prionly ? 1 : 0); 583 584 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 585 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 586 vcfg->strip_tag1_discard_en ? 1 : 0); 587 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 588 vcfg->strip_tag2_discard_en ? 1 : 0); 589 /* 590 * In current version VF is not supported when PF is driven by DPDK 591 * driver, just need to configure parameters for PF vport. 592 */ 593 vport_id = HNS3_PF_FUNC_ID; 594 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 595 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 596 req->vf_bitmap[req->vf_offset] = bitmap; 597 598 ret = hns3_cmd_send(hw, &desc, 1); 599 if (ret) 600 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 601 return ret; 602 } 603 604 static void 605 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 606 struct hns3_rx_vtag_cfg *vcfg) 607 { 608 struct hns3_pf *pf = &hns->pf; 609 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 610 } 611 612 static void 613 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 614 struct hns3_tx_vtag_cfg *vcfg) 615 { 616 struct hns3_pf *pf = &hns->pf; 617 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 618 } 619 620 static int 621 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 622 { 623 struct hns3_rx_vtag_cfg rxvlan_cfg; 624 struct hns3_hw *hw = &hns->hw; 625 int ret; 626 627 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 628 rxvlan_cfg.strip_tag1_en = false; 629 rxvlan_cfg.strip_tag2_en = enable; 630 rxvlan_cfg.strip_tag2_discard_en = false; 631 } else { 632 rxvlan_cfg.strip_tag1_en = enable; 633 rxvlan_cfg.strip_tag2_en = true; 634 rxvlan_cfg.strip_tag2_discard_en = true; 635 } 636 637 rxvlan_cfg.strip_tag1_discard_en = false; 638 rxvlan_cfg.vlan1_vlan_prionly = false; 639 rxvlan_cfg.vlan2_vlan_prionly = false; 640 rxvlan_cfg.rx_vlan_offload_en = enable; 641 642 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 643 if (ret) { 644 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 645 enable ? "enable" : "disable", ret); 646 return ret; 647 } 648 649 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 650 651 return ret; 652 } 653 654 static int 655 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 656 uint8_t fe_type, bool filter_en, uint8_t vf_id) 657 { 658 struct hns3_vlan_filter_ctrl_cmd *req; 659 struct hns3_cmd_desc desc; 660 int ret; 661 662 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 663 664 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 665 req->vlan_type = vlan_type; 666 req->vlan_fe = filter_en ? fe_type : 0; 667 req->vf_id = vf_id; 668 669 ret = hns3_cmd_send(hw, &desc, 1); 670 if (ret) 671 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 672 673 return ret; 674 } 675 676 static int 677 hns3_vlan_filter_init(struct hns3_adapter *hns) 678 { 679 struct hns3_hw *hw = &hns->hw; 680 int ret; 681 682 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 683 HNS3_FILTER_FE_EGRESS, false, 684 HNS3_PF_FUNC_ID); 685 if (ret) { 686 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 687 return ret; 688 } 689 690 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 691 HNS3_FILTER_FE_INGRESS, false, 692 HNS3_PF_FUNC_ID); 693 if (ret) 694 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 695 696 return ret; 697 } 698 699 static int 700 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 701 { 702 struct hns3_hw *hw = &hns->hw; 703 int ret; 704 705 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 706 HNS3_FILTER_FE_INGRESS, enable, 707 HNS3_PF_FUNC_ID); 708 if (ret) 709 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 710 enable ? "enable" : "disable", ret); 711 712 return ret; 713 } 714 715 static int 716 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 717 { 718 struct hns3_adapter *hns = dev->data->dev_private; 719 struct hns3_hw *hw = &hns->hw; 720 struct rte_eth_rxmode *rxmode; 721 unsigned int tmp_mask; 722 bool enable; 723 int ret = 0; 724 725 rte_spinlock_lock(&hw->lock); 726 rxmode = &dev->data->dev_conf.rxmode; 727 tmp_mask = (unsigned int)mask; 728 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 729 /* ignore vlan filter configuration during promiscuous mode */ 730 if (!dev->data->promiscuous) { 731 /* Enable or disable VLAN filter */ 732 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 733 true : false; 734 735 ret = hns3_enable_vlan_filter(hns, enable); 736 if (ret) { 737 rte_spinlock_unlock(&hw->lock); 738 hns3_err(hw, "failed to %s rx filter, ret = %d", 739 enable ? "enable" : "disable", ret); 740 return ret; 741 } 742 } 743 } 744 745 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 746 /* Enable or disable VLAN stripping */ 747 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 748 true : false; 749 750 ret = hns3_en_hw_strip_rxvtag(hns, enable); 751 if (ret) { 752 rte_spinlock_unlock(&hw->lock); 753 hns3_err(hw, "failed to %s rx strip, ret = %d", 754 enable ? "enable" : "disable", ret); 755 return ret; 756 } 757 } 758 759 rte_spinlock_unlock(&hw->lock); 760 761 return ret; 762 } 763 764 static int 765 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 766 struct hns3_tx_vtag_cfg *vcfg) 767 { 768 struct hns3_vport_vtag_tx_cfg_cmd *req; 769 struct hns3_cmd_desc desc; 770 struct hns3_hw *hw = &hns->hw; 771 uint16_t vport_id; 772 uint8_t bitmap; 773 int ret; 774 775 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 776 777 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 778 req->def_vlan_tag1 = vcfg->default_tag1; 779 req->def_vlan_tag2 = vcfg->default_tag2; 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 781 vcfg->accept_tag1 ? 1 : 0); 782 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 783 vcfg->accept_untag1 ? 1 : 0); 784 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 785 vcfg->accept_tag2 ? 1 : 0); 786 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 787 vcfg->accept_untag2 ? 1 : 0); 788 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 789 vcfg->insert_tag1_en ? 1 : 0); 790 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 791 vcfg->insert_tag2_en ? 1 : 0); 792 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 793 794 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 795 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 796 vcfg->tag_shift_mode_en ? 1 : 0); 797 798 /* 799 * In current version VF is not supported when PF is driven by DPDK 800 * driver, just need to configure parameters for PF vport. 801 */ 802 vport_id = HNS3_PF_FUNC_ID; 803 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 804 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 805 req->vf_bitmap[req->vf_offset] = bitmap; 806 807 ret = hns3_cmd_send(hw, &desc, 1); 808 if (ret) 809 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 810 811 return ret; 812 } 813 814 static int 815 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 816 uint16_t pvid) 817 { 818 struct hns3_hw *hw = &hns->hw; 819 struct hns3_tx_vtag_cfg txvlan_cfg; 820 int ret; 821 822 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 823 txvlan_cfg.accept_tag1 = true; 824 txvlan_cfg.insert_tag1_en = false; 825 txvlan_cfg.default_tag1 = 0; 826 } else { 827 txvlan_cfg.accept_tag1 = 828 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 829 txvlan_cfg.insert_tag1_en = true; 830 txvlan_cfg.default_tag1 = pvid; 831 } 832 833 txvlan_cfg.accept_untag1 = true; 834 txvlan_cfg.accept_tag2 = true; 835 txvlan_cfg.accept_untag2 = true; 836 txvlan_cfg.insert_tag2_en = false; 837 txvlan_cfg.default_tag2 = 0; 838 txvlan_cfg.tag_shift_mode_en = true; 839 840 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 841 if (ret) { 842 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 843 ret); 844 return ret; 845 } 846 847 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 848 return ret; 849 } 850 851 852 static void 853 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 854 { 855 struct hns3_user_vlan_table *vlan_entry; 856 struct hns3_pf *pf = &hns->pf; 857 858 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 859 if (vlan_entry->hd_tbl_status) { 860 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 861 vlan_entry->hd_tbl_status = false; 862 } 863 } 864 865 if (is_del_list) { 866 vlan_entry = LIST_FIRST(&pf->vlan_list); 867 while (vlan_entry) { 868 LIST_REMOVE(vlan_entry, next); 869 rte_free(vlan_entry); 870 vlan_entry = LIST_FIRST(&pf->vlan_list); 871 } 872 } 873 } 874 875 static void 876 hns3_add_all_vlan_table(struct hns3_adapter *hns) 877 { 878 struct hns3_user_vlan_table *vlan_entry; 879 struct hns3_pf *pf = &hns->pf; 880 881 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 882 if (!vlan_entry->hd_tbl_status) { 883 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 884 vlan_entry->hd_tbl_status = true; 885 } 886 } 887 } 888 889 static void 890 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 891 { 892 struct hns3_hw *hw = &hns->hw; 893 int ret; 894 895 hns3_rm_all_vlan_table(hns, true); 896 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 897 ret = hns3_set_port_vlan_filter(hns, 898 hw->port_base_vlan_cfg.pvid, 0); 899 if (ret) { 900 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 901 ret); 902 return; 903 } 904 } 905 } 906 907 static int 908 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 909 uint16_t port_base_vlan_state, uint16_t new_pvid) 910 { 911 struct hns3_hw *hw = &hns->hw; 912 uint16_t old_pvid; 913 int ret; 914 915 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 916 old_pvid = hw->port_base_vlan_cfg.pvid; 917 if (old_pvid != HNS3_INVALID_PVID) { 918 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 919 if (ret) { 920 hns3_err(hw, "failed to remove old pvid %u, " 921 "ret = %d", old_pvid, ret); 922 return ret; 923 } 924 } 925 926 hns3_rm_all_vlan_table(hns, false); 927 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 928 if (ret) { 929 hns3_err(hw, "failed to add new pvid %u, ret = %d", 930 new_pvid, ret); 931 return ret; 932 } 933 } else { 934 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 935 if (ret) { 936 hns3_err(hw, "failed to remove pvid %u, ret = %d", 937 new_pvid, ret); 938 return ret; 939 } 940 941 hns3_add_all_vlan_table(hns); 942 } 943 return 0; 944 } 945 946 static int 947 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 948 { 949 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 950 struct hns3_rx_vtag_cfg rx_vlan_cfg; 951 bool rx_strip_en; 952 int ret; 953 954 rx_strip_en = old_cfg->rx_vlan_offload_en; 955 if (on) { 956 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 957 rx_vlan_cfg.strip_tag2_en = true; 958 rx_vlan_cfg.strip_tag2_discard_en = true; 959 } else { 960 rx_vlan_cfg.strip_tag1_en = false; 961 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 962 rx_vlan_cfg.strip_tag2_discard_en = false; 963 } 964 rx_vlan_cfg.strip_tag1_discard_en = false; 965 rx_vlan_cfg.vlan1_vlan_prionly = false; 966 rx_vlan_cfg.vlan2_vlan_prionly = false; 967 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 968 969 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 970 if (ret) 971 return ret; 972 973 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 974 return ret; 975 } 976 977 static int 978 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 979 { 980 struct hns3_hw *hw = &hns->hw; 981 uint16_t port_base_vlan_state; 982 int ret, err; 983 984 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 985 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 986 hns3_warn(hw, "Invalid operation! As current pvid set " 987 "is %u, disable pvid %u is invalid", 988 hw->port_base_vlan_cfg.pvid, pvid); 989 return 0; 990 } 991 992 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 993 HNS3_PORT_BASE_VLAN_DISABLE; 994 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 995 if (ret) { 996 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 997 ret); 998 return ret; 999 } 1000 1001 ret = hns3_en_pvid_strip(hns, on); 1002 if (ret) { 1003 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1004 "ret = %d", ret); 1005 goto pvid_vlan_strip_fail; 1006 } 1007 1008 if (pvid == HNS3_INVALID_PVID) 1009 goto out; 1010 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1011 if (ret) { 1012 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1013 ret); 1014 goto vlan_filter_set_fail; 1015 } 1016 1017 out: 1018 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1019 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1020 return ret; 1021 1022 vlan_filter_set_fail: 1023 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1024 HNS3_PORT_BASE_VLAN_ENABLE); 1025 if (err) 1026 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1027 1028 pvid_vlan_strip_fail: 1029 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1030 hw->port_base_vlan_cfg.pvid); 1031 if (err) 1032 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1033 1034 return ret; 1035 } 1036 1037 static int 1038 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1039 { 1040 struct hns3_adapter *hns = dev->data->dev_private; 1041 struct hns3_hw *hw = &hns->hw; 1042 bool pvid_en_state_change; 1043 uint16_t pvid_state; 1044 int ret; 1045 1046 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1047 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1048 RTE_ETHER_MAX_VLAN_ID); 1049 return -EINVAL; 1050 } 1051 1052 /* 1053 * If PVID configuration state change, should refresh the PVID 1054 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1055 */ 1056 pvid_state = hw->port_base_vlan_cfg.state; 1057 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1058 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1059 pvid_en_state_change = false; 1060 else 1061 pvid_en_state_change = true; 1062 1063 rte_spinlock_lock(&hw->lock); 1064 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1065 rte_spinlock_unlock(&hw->lock); 1066 if (ret) 1067 return ret; 1068 /* 1069 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1070 * need be processed by PMD driver. 1071 */ 1072 if (pvid_en_state_change && 1073 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1074 hns3_update_all_queues_pvid_proc_en(hw); 1075 1076 return 0; 1077 } 1078 1079 static int 1080 hns3_default_vlan_config(struct hns3_adapter *hns) 1081 { 1082 struct hns3_hw *hw = &hns->hw; 1083 int ret; 1084 1085 /* 1086 * When vlan filter is enabled, hardware regards packets without vlan 1087 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1088 * table, packets without vlan won't be received. So, add vlan 0 as 1089 * the default vlan. 1090 */ 1091 ret = hns3_vlan_filter_configure(hns, 0, 1); 1092 if (ret) 1093 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1094 return ret; 1095 } 1096 1097 static int 1098 hns3_init_vlan_config(struct hns3_adapter *hns) 1099 { 1100 struct hns3_hw *hw = &hns->hw; 1101 int ret; 1102 1103 /* 1104 * This function can be called in the initialization and reset process, 1105 * when in reset process, it means that hardware had been reseted 1106 * successfully and we need to restore the hardware configuration to 1107 * ensure that the hardware configuration remains unchanged before and 1108 * after reset. 1109 */ 1110 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1111 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1112 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1113 } 1114 1115 ret = hns3_vlan_filter_init(hns); 1116 if (ret) { 1117 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1118 return ret; 1119 } 1120 1121 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1122 RTE_ETHER_TYPE_VLAN); 1123 if (ret) { 1124 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1125 return ret; 1126 } 1127 1128 /* 1129 * When in the reinit dev stage of the reset process, the following 1130 * vlan-related configurations may differ from those at initialization, 1131 * we will restore configurations to hardware in hns3_restore_vlan_table 1132 * and hns3_restore_vlan_conf later. 1133 */ 1134 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1135 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1136 if (ret) { 1137 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 ret = hns3_en_hw_strip_rxvtag(hns, false); 1142 if (ret) { 1143 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1144 ret); 1145 return ret; 1146 } 1147 } 1148 1149 return hns3_default_vlan_config(hns); 1150 } 1151 1152 static int 1153 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1154 { 1155 struct hns3_pf *pf = &hns->pf; 1156 struct hns3_hw *hw = &hns->hw; 1157 uint64_t offloads; 1158 bool enable; 1159 int ret; 1160 1161 if (!hw->data->promiscuous) { 1162 /* restore vlan filter states */ 1163 offloads = hw->data->dev_conf.rxmode.offloads; 1164 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1165 ret = hns3_enable_vlan_filter(hns, enable); 1166 if (ret) { 1167 hns3_err(hw, "failed to restore vlan rx filter conf, " 1168 "ret = %d", ret); 1169 return ret; 1170 } 1171 } 1172 1173 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1174 if (ret) { 1175 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1176 return ret; 1177 } 1178 1179 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1180 if (ret) 1181 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1182 1183 return ret; 1184 } 1185 1186 static int 1187 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1188 { 1189 struct hns3_adapter *hns = dev->data->dev_private; 1190 struct rte_eth_dev_data *data = dev->data; 1191 struct rte_eth_txmode *txmode; 1192 struct hns3_hw *hw = &hns->hw; 1193 int mask; 1194 int ret; 1195 1196 txmode = &data->dev_conf.txmode; 1197 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1198 hns3_warn(hw, 1199 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1200 "configuration is not supported! Ignore these two " 1201 "parameters: hw_vlan_reject_tagged(%u), " 1202 "hw_vlan_reject_untagged(%u)", 1203 txmode->hw_vlan_reject_tagged, 1204 txmode->hw_vlan_reject_untagged); 1205 1206 /* Apply vlan offload setting */ 1207 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1208 ret = hns3_vlan_offload_set(dev, mask); 1209 if (ret) { 1210 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1211 ret); 1212 return ret; 1213 } 1214 1215 /* 1216 * If pvid config is not set in rte_eth_conf, driver needn't to set 1217 * VLAN pvid related configuration to hardware. 1218 */ 1219 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1220 return 0; 1221 1222 /* Apply pvid setting */ 1223 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1224 txmode->hw_vlan_insert_pvid); 1225 if (ret) 1226 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1227 txmode->pvid, ret); 1228 1229 return ret; 1230 } 1231 1232 static int 1233 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1234 unsigned int tso_mss_max) 1235 { 1236 struct hns3_cfg_tso_status_cmd *req; 1237 struct hns3_cmd_desc desc; 1238 uint16_t tso_mss; 1239 1240 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1241 1242 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1243 1244 tso_mss = 0; 1245 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1246 tso_mss_min); 1247 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1248 1249 tso_mss = 0; 1250 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1251 tso_mss_max); 1252 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1253 1254 return hns3_cmd_send(hw, &desc, 1); 1255 } 1256 1257 static int 1258 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1259 uint16_t *allocated_size, bool is_alloc) 1260 { 1261 struct hns3_umv_spc_alc_cmd *req; 1262 struct hns3_cmd_desc desc; 1263 int ret; 1264 1265 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1267 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1268 req->space_size = rte_cpu_to_le_32(space_size); 1269 1270 ret = hns3_cmd_send(hw, &desc, 1); 1271 if (ret) { 1272 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1273 is_alloc ? "allocate" : "free", ret); 1274 return ret; 1275 } 1276 1277 if (is_alloc && allocated_size) 1278 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1279 1280 return 0; 1281 } 1282 1283 static int 1284 hns3_init_umv_space(struct hns3_hw *hw) 1285 { 1286 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1287 struct hns3_pf *pf = &hns->pf; 1288 uint16_t allocated_size = 0; 1289 int ret; 1290 1291 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1292 true); 1293 if (ret) 1294 return ret; 1295 1296 if (allocated_size < pf->wanted_umv_size) 1297 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1298 pf->wanted_umv_size, allocated_size); 1299 1300 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1301 pf->wanted_umv_size; 1302 pf->used_umv_size = 0; 1303 return 0; 1304 } 1305 1306 static int 1307 hns3_uninit_umv_space(struct hns3_hw *hw) 1308 { 1309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1310 struct hns3_pf *pf = &hns->pf; 1311 int ret; 1312 1313 if (pf->max_umv_size == 0) 1314 return 0; 1315 1316 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1317 if (ret) 1318 return ret; 1319 1320 pf->max_umv_size = 0; 1321 1322 return 0; 1323 } 1324 1325 static bool 1326 hns3_is_umv_space_full(struct hns3_hw *hw) 1327 { 1328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1329 struct hns3_pf *pf = &hns->pf; 1330 bool is_full; 1331 1332 is_full = (pf->used_umv_size >= pf->max_umv_size); 1333 1334 return is_full; 1335 } 1336 1337 static void 1338 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1339 { 1340 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1341 struct hns3_pf *pf = &hns->pf; 1342 1343 if (is_free) { 1344 if (pf->used_umv_size > 0) 1345 pf->used_umv_size--; 1346 } else 1347 pf->used_umv_size++; 1348 } 1349 1350 static void 1351 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1352 const uint8_t *addr, bool is_mc) 1353 { 1354 const unsigned char *mac_addr = addr; 1355 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1356 ((uint32_t)mac_addr[2] << 16) | 1357 ((uint32_t)mac_addr[1] << 8) | 1358 (uint32_t)mac_addr[0]; 1359 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1360 1361 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1362 if (is_mc) { 1363 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1364 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1365 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1366 } 1367 1368 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1369 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1370 } 1371 1372 static int 1373 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1374 uint8_t resp_code, 1375 enum hns3_mac_vlan_tbl_opcode op) 1376 { 1377 if (cmdq_resp) { 1378 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1379 cmdq_resp); 1380 return -EIO; 1381 } 1382 1383 if (op == HNS3_MAC_VLAN_ADD) { 1384 if (resp_code == 0 || resp_code == 1) { 1385 return 0; 1386 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1387 hns3_err(hw, "add mac addr failed for uc_overflow"); 1388 return -ENOSPC; 1389 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1390 hns3_err(hw, "add mac addr failed for mc_overflow"); 1391 return -ENOSPC; 1392 } 1393 1394 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1395 resp_code); 1396 return -EIO; 1397 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1398 if (resp_code == 0) { 1399 return 0; 1400 } else if (resp_code == 1) { 1401 hns3_dbg(hw, "remove mac addr failed for miss"); 1402 return -ENOENT; 1403 } 1404 1405 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1406 resp_code); 1407 return -EIO; 1408 } else if (op == HNS3_MAC_VLAN_LKUP) { 1409 if (resp_code == 0) { 1410 return 0; 1411 } else if (resp_code == 1) { 1412 hns3_dbg(hw, "lookup mac addr failed for miss"); 1413 return -ENOENT; 1414 } 1415 1416 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1417 resp_code); 1418 return -EIO; 1419 } 1420 1421 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1422 op); 1423 1424 return -EINVAL; 1425 } 1426 1427 static int 1428 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1429 struct hns3_mac_vlan_tbl_entry_cmd *req, 1430 struct hns3_cmd_desc *desc, bool is_mc) 1431 { 1432 uint8_t resp_code; 1433 uint16_t retval; 1434 int ret; 1435 1436 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1437 if (is_mc) { 1438 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1439 memcpy(desc[0].data, req, 1440 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1441 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1442 true); 1443 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1444 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1445 true); 1446 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1447 } else { 1448 memcpy(desc[0].data, req, 1449 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1450 ret = hns3_cmd_send(hw, desc, 1); 1451 } 1452 if (ret) { 1453 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1454 ret); 1455 return ret; 1456 } 1457 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1458 retval = rte_le_to_cpu_16(desc[0].retval); 1459 1460 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1461 HNS3_MAC_VLAN_LKUP); 1462 } 1463 1464 static int 1465 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1466 struct hns3_mac_vlan_tbl_entry_cmd *req, 1467 struct hns3_cmd_desc *mc_desc) 1468 { 1469 uint8_t resp_code; 1470 uint16_t retval; 1471 int cfg_status; 1472 int ret; 1473 1474 if (mc_desc == NULL) { 1475 struct hns3_cmd_desc desc; 1476 1477 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1478 memcpy(desc.data, req, 1479 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1480 ret = hns3_cmd_send(hw, &desc, 1); 1481 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1482 retval = rte_le_to_cpu_16(desc.retval); 1483 1484 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1485 HNS3_MAC_VLAN_ADD); 1486 } else { 1487 hns3_cmd_reuse_desc(&mc_desc[0], false); 1488 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1489 hns3_cmd_reuse_desc(&mc_desc[1], false); 1490 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1491 hns3_cmd_reuse_desc(&mc_desc[2], false); 1492 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1493 memcpy(mc_desc[0].data, req, 1494 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1495 mc_desc[0].retval = 0; 1496 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1497 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1498 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1499 1500 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1501 HNS3_MAC_VLAN_ADD); 1502 } 1503 1504 if (ret) { 1505 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1506 return ret; 1507 } 1508 1509 return cfg_status; 1510 } 1511 1512 static int 1513 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1514 struct hns3_mac_vlan_tbl_entry_cmd *req) 1515 { 1516 struct hns3_cmd_desc desc; 1517 uint8_t resp_code; 1518 uint16_t retval; 1519 int ret; 1520 1521 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1522 1523 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1524 1525 ret = hns3_cmd_send(hw, &desc, 1); 1526 if (ret) { 1527 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1528 return ret; 1529 } 1530 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1531 retval = rte_le_to_cpu_16(desc.retval); 1532 1533 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1534 HNS3_MAC_VLAN_REMOVE); 1535 } 1536 1537 static int 1538 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1539 { 1540 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1541 struct hns3_mac_vlan_tbl_entry_cmd req; 1542 struct hns3_pf *pf = &hns->pf; 1543 struct hns3_cmd_desc desc[3]; 1544 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1545 uint16_t egress_port = 0; 1546 uint8_t vf_id; 1547 int ret; 1548 1549 /* check if mac addr is valid */ 1550 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1551 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1552 mac_addr); 1553 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1554 mac_str); 1555 return -EINVAL; 1556 } 1557 1558 memset(&req, 0, sizeof(req)); 1559 1560 /* 1561 * In current version VF is not supported when PF is driven by DPDK 1562 * driver, just need to configure parameters for PF vport. 1563 */ 1564 vf_id = HNS3_PF_FUNC_ID; 1565 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1566 HNS3_MAC_EPORT_VFID_S, vf_id); 1567 1568 req.egress_port = rte_cpu_to_le_16(egress_port); 1569 1570 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1571 1572 /* 1573 * Lookup the mac address in the mac_vlan table, and add 1574 * it if the entry is inexistent. Repeated unicast entry 1575 * is not allowed in the mac vlan table. 1576 */ 1577 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1578 if (ret == -ENOENT) { 1579 if (!hns3_is_umv_space_full(hw)) { 1580 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1581 if (!ret) 1582 hns3_update_umv_space(hw, false); 1583 return ret; 1584 } 1585 1586 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1587 1588 return -ENOSPC; 1589 } 1590 1591 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1592 1593 /* check if we just hit the duplicate */ 1594 if (ret == 0) { 1595 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1596 return 0; 1597 } 1598 1599 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1600 mac_str); 1601 1602 return ret; 1603 } 1604 1605 static int 1606 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1607 { 1608 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1609 struct rte_ether_addr *addr; 1610 int ret; 1611 int i; 1612 1613 for (i = 0; i < hw->mc_addrs_num; i++) { 1614 addr = &hw->mc_addrs[i]; 1615 /* Check if there are duplicate addresses */ 1616 if (rte_is_same_ether_addr(addr, mac_addr)) { 1617 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1618 addr); 1619 hns3_err(hw, "failed to add mc mac addr, same addrs" 1620 "(%s) is added by the set_mc_mac_addr_list " 1621 "API", mac_str); 1622 return -EINVAL; 1623 } 1624 } 1625 1626 ret = hns3_add_mc_addr(hw, mac_addr); 1627 if (ret) { 1628 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1629 mac_addr); 1630 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1631 mac_str, ret); 1632 } 1633 return ret; 1634 } 1635 1636 static int 1637 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1638 { 1639 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1640 int ret; 1641 1642 ret = hns3_remove_mc_addr(hw, mac_addr); 1643 if (ret) { 1644 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1645 mac_addr); 1646 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1647 mac_str, ret); 1648 } 1649 return ret; 1650 } 1651 1652 static int 1653 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1654 uint32_t idx, __rte_unused uint32_t pool) 1655 { 1656 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1657 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1658 int ret; 1659 1660 rte_spinlock_lock(&hw->lock); 1661 1662 /* 1663 * In hns3 network engine adding UC and MC mac address with different 1664 * commands with firmware. We need to determine whether the input 1665 * address is a UC or a MC address to call different commands. 1666 * By the way, it is recommended calling the API function named 1667 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1668 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1669 * may affect the specifications of UC mac addresses. 1670 */ 1671 if (rte_is_multicast_ether_addr(mac_addr)) 1672 ret = hns3_add_mc_addr_common(hw, mac_addr); 1673 else 1674 ret = hns3_add_uc_addr_common(hw, mac_addr); 1675 1676 if (ret) { 1677 rte_spinlock_unlock(&hw->lock); 1678 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1679 mac_addr); 1680 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1681 ret); 1682 return ret; 1683 } 1684 1685 if (idx == 0) 1686 hw->mac.default_addr_setted = true; 1687 rte_spinlock_unlock(&hw->lock); 1688 1689 return ret; 1690 } 1691 1692 static int 1693 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1694 { 1695 struct hns3_mac_vlan_tbl_entry_cmd req; 1696 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1697 int ret; 1698 1699 /* check if mac addr is valid */ 1700 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1701 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1702 mac_addr); 1703 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1704 mac_str); 1705 return -EINVAL; 1706 } 1707 1708 memset(&req, 0, sizeof(req)); 1709 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1710 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1711 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1712 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1713 return 0; 1714 else if (ret == 0) 1715 hns3_update_umv_space(hw, true); 1716 1717 return ret; 1718 } 1719 1720 static void 1721 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1722 { 1723 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1724 /* index will be checked by upper level rte interface */ 1725 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1726 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1727 int ret; 1728 1729 rte_spinlock_lock(&hw->lock); 1730 1731 if (rte_is_multicast_ether_addr(mac_addr)) 1732 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1733 else 1734 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1735 rte_spinlock_unlock(&hw->lock); 1736 if (ret) { 1737 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1738 mac_addr); 1739 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1740 ret); 1741 } 1742 } 1743 1744 static int 1745 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1746 struct rte_ether_addr *mac_addr) 1747 { 1748 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1749 struct rte_ether_addr *oaddr; 1750 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1751 bool default_addr_setted; 1752 int ret, ret_val; 1753 1754 /* 1755 * It has been guaranteed that input parameter named mac_addr is valid 1756 * address in the rte layer of DPDK framework. 1757 */ 1758 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1759 default_addr_setted = hw->mac.default_addr_setted; 1760 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1761 return 0; 1762 1763 rte_spinlock_lock(&hw->lock); 1764 if (default_addr_setted) { 1765 ret = hns3_remove_uc_addr_common(hw, oaddr); 1766 if (ret) { 1767 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1768 oaddr); 1769 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1770 mac_str, ret); 1771 1772 rte_spinlock_unlock(&hw->lock); 1773 return ret; 1774 } 1775 } 1776 1777 ret = hns3_add_uc_addr_common(hw, mac_addr); 1778 if (ret) { 1779 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1780 mac_addr); 1781 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1782 goto err_add_uc_addr; 1783 } 1784 1785 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1786 if (ret) { 1787 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1788 goto err_pause_addr_cfg; 1789 } 1790 1791 rte_ether_addr_copy(mac_addr, 1792 (struct rte_ether_addr *)hw->mac.mac_addr); 1793 hw->mac.default_addr_setted = true; 1794 rte_spinlock_unlock(&hw->lock); 1795 1796 return 0; 1797 1798 err_pause_addr_cfg: 1799 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1800 if (ret_val) { 1801 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1802 mac_addr); 1803 hns3_warn(hw, 1804 "Failed to roll back to del setted mac addr(%s): %d", 1805 mac_str, ret_val); 1806 } 1807 1808 err_add_uc_addr: 1809 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1810 if (ret_val) { 1811 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1812 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1813 mac_str, ret_val); 1814 hw->mac.default_addr_setted = false; 1815 } 1816 rte_spinlock_unlock(&hw->lock); 1817 1818 return ret; 1819 } 1820 1821 static int 1822 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1823 { 1824 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1825 struct hns3_hw *hw = &hns->hw; 1826 struct rte_ether_addr *addr; 1827 int err = 0; 1828 int ret; 1829 int i; 1830 1831 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1832 addr = &hw->data->mac_addrs[i]; 1833 if (rte_is_zero_ether_addr(addr)) 1834 continue; 1835 if (rte_is_multicast_ether_addr(addr)) 1836 ret = del ? hns3_remove_mc_addr(hw, addr) : 1837 hns3_add_mc_addr(hw, addr); 1838 else 1839 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1840 hns3_add_uc_addr_common(hw, addr); 1841 1842 if (ret) { 1843 err = ret; 1844 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1845 addr); 1846 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1847 "ret = %d.", del ? "remove" : "restore", 1848 mac_str, i, ret); 1849 } 1850 } 1851 return err; 1852 } 1853 1854 static void 1855 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1856 { 1857 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1858 uint8_t word_num; 1859 uint8_t bit_num; 1860 1861 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1862 word_num = vfid / 32; 1863 bit_num = vfid % 32; 1864 if (clr) 1865 desc[1].data[word_num] &= 1866 rte_cpu_to_le_32(~(1UL << bit_num)); 1867 else 1868 desc[1].data[word_num] |= 1869 rte_cpu_to_le_32(1UL << bit_num); 1870 } else { 1871 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1872 bit_num = vfid % 32; 1873 if (clr) 1874 desc[2].data[word_num] &= 1875 rte_cpu_to_le_32(~(1UL << bit_num)); 1876 else 1877 desc[2].data[word_num] |= 1878 rte_cpu_to_le_32(1UL << bit_num); 1879 } 1880 } 1881 1882 static int 1883 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1884 { 1885 struct hns3_mac_vlan_tbl_entry_cmd req; 1886 struct hns3_cmd_desc desc[3]; 1887 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1888 uint8_t vf_id; 1889 int ret; 1890 1891 /* Check if mac addr is valid */ 1892 if (!rte_is_multicast_ether_addr(mac_addr)) { 1893 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1894 mac_addr); 1895 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1896 mac_str); 1897 return -EINVAL; 1898 } 1899 1900 memset(&req, 0, sizeof(req)); 1901 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1902 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1903 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1904 if (ret) { 1905 /* This mac addr do not exist, add new entry for it */ 1906 memset(desc[0].data, 0, sizeof(desc[0].data)); 1907 memset(desc[1].data, 0, sizeof(desc[0].data)); 1908 memset(desc[2].data, 0, sizeof(desc[0].data)); 1909 } 1910 1911 /* 1912 * In current version VF is not supported when PF is driven by DPDK 1913 * driver, just need to configure parameters for PF vport. 1914 */ 1915 vf_id = HNS3_PF_FUNC_ID; 1916 hns3_update_desc_vfid(desc, vf_id, false); 1917 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1918 if (ret) { 1919 if (ret == -ENOSPC) 1920 hns3_err(hw, "mc mac vlan table is full"); 1921 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1922 mac_addr); 1923 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1924 } 1925 1926 return ret; 1927 } 1928 1929 static int 1930 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1931 { 1932 struct hns3_mac_vlan_tbl_entry_cmd req; 1933 struct hns3_cmd_desc desc[3]; 1934 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1935 uint8_t vf_id; 1936 int ret; 1937 1938 /* Check if mac addr is valid */ 1939 if (!rte_is_multicast_ether_addr(mac_addr)) { 1940 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1941 mac_addr); 1942 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1943 mac_str); 1944 return -EINVAL; 1945 } 1946 1947 memset(&req, 0, sizeof(req)); 1948 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1949 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1950 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1951 if (ret == 0) { 1952 /* 1953 * This mac addr exist, remove this handle's VFID for it. 1954 * In current version VF is not supported when PF is driven by 1955 * DPDK driver, just need to configure parameters for PF vport. 1956 */ 1957 vf_id = HNS3_PF_FUNC_ID; 1958 hns3_update_desc_vfid(desc, vf_id, true); 1959 1960 /* All the vfid is zero, so need to delete this entry */ 1961 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1962 } else if (ret == -ENOENT) { 1963 /* This mac addr doesn't exist. */ 1964 return 0; 1965 } 1966 1967 if (ret) { 1968 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1969 mac_addr); 1970 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1971 } 1972 1973 return ret; 1974 } 1975 1976 static int 1977 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1978 struct rte_ether_addr *mc_addr_set, 1979 uint32_t nb_mc_addr) 1980 { 1981 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1982 struct rte_ether_addr *addr; 1983 uint32_t i; 1984 uint32_t j; 1985 1986 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1987 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1988 "invalid. valid range: 0~%d", 1989 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1990 return -EINVAL; 1991 } 1992 1993 /* Check if input mac addresses are valid */ 1994 for (i = 0; i < nb_mc_addr; i++) { 1995 addr = &mc_addr_set[i]; 1996 if (!rte_is_multicast_ether_addr(addr)) { 1997 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1998 addr); 1999 hns3_err(hw, 2000 "failed to set mc mac addr, addr(%s) invalid.", 2001 mac_str); 2002 return -EINVAL; 2003 } 2004 2005 /* Check if there are duplicate addresses */ 2006 for (j = i + 1; j < nb_mc_addr; j++) { 2007 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2008 hns3_ether_format_addr(mac_str, 2009 RTE_ETHER_ADDR_FMT_SIZE, 2010 addr); 2011 hns3_err(hw, "failed to set mc mac addr, " 2012 "addrs invalid. two same addrs(%s).", 2013 mac_str); 2014 return -EINVAL; 2015 } 2016 } 2017 2018 /* 2019 * Check if there are duplicate addresses between mac_addrs 2020 * and mc_addr_set 2021 */ 2022 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 2023 if (rte_is_same_ether_addr(addr, 2024 &hw->data->mac_addrs[j])) { 2025 hns3_ether_format_addr(mac_str, 2026 RTE_ETHER_ADDR_FMT_SIZE, 2027 addr); 2028 hns3_err(hw, "failed to set mc mac addr, " 2029 "addrs invalid. addrs(%s) has already " 2030 "configured in mac_addr add API", 2031 mac_str); 2032 return -EINVAL; 2033 } 2034 } 2035 } 2036 2037 return 0; 2038 } 2039 2040 static void 2041 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2042 struct rte_ether_addr *mc_addr_set, 2043 int mc_addr_num, 2044 struct rte_ether_addr *reserved_addr_list, 2045 int *reserved_addr_num, 2046 struct rte_ether_addr *add_addr_list, 2047 int *add_addr_num, 2048 struct rte_ether_addr *rm_addr_list, 2049 int *rm_addr_num) 2050 { 2051 struct rte_ether_addr *addr; 2052 int current_addr_num; 2053 int reserved_num = 0; 2054 int add_num = 0; 2055 int rm_num = 0; 2056 int num; 2057 int i; 2058 int j; 2059 bool same_addr; 2060 2061 /* Calculate the mc mac address list that should be removed */ 2062 current_addr_num = hw->mc_addrs_num; 2063 for (i = 0; i < current_addr_num; i++) { 2064 addr = &hw->mc_addrs[i]; 2065 same_addr = false; 2066 for (j = 0; j < mc_addr_num; j++) { 2067 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2068 same_addr = true; 2069 break; 2070 } 2071 } 2072 2073 if (!same_addr) { 2074 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2075 rm_num++; 2076 } else { 2077 rte_ether_addr_copy(addr, 2078 &reserved_addr_list[reserved_num]); 2079 reserved_num++; 2080 } 2081 } 2082 2083 /* Calculate the mc mac address list that should be added */ 2084 for (i = 0; i < mc_addr_num; i++) { 2085 addr = &mc_addr_set[i]; 2086 same_addr = false; 2087 for (j = 0; j < current_addr_num; j++) { 2088 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2089 same_addr = true; 2090 break; 2091 } 2092 } 2093 2094 if (!same_addr) { 2095 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2096 add_num++; 2097 } 2098 } 2099 2100 /* Reorder the mc mac address list maintained by driver */ 2101 for (i = 0; i < reserved_num; i++) 2102 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2103 2104 for (i = 0; i < rm_num; i++) { 2105 num = reserved_num + i; 2106 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2107 } 2108 2109 *reserved_addr_num = reserved_num; 2110 *add_addr_num = add_num; 2111 *rm_addr_num = rm_num; 2112 } 2113 2114 static int 2115 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2116 struct rte_ether_addr *mc_addr_set, 2117 uint32_t nb_mc_addr) 2118 { 2119 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2120 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2121 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2122 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2123 struct rte_ether_addr *addr; 2124 int reserved_addr_num; 2125 int add_addr_num; 2126 int rm_addr_num; 2127 int mc_addr_num; 2128 int num; 2129 int ret; 2130 int i; 2131 2132 /* Check if input parameters are valid */ 2133 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2134 if (ret) 2135 return ret; 2136 2137 rte_spinlock_lock(&hw->lock); 2138 2139 /* 2140 * Calculate the mc mac address lists those should be removed and be 2141 * added, Reorder the mc mac address list maintained by driver. 2142 */ 2143 mc_addr_num = (int)nb_mc_addr; 2144 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2145 reserved_addr_list, &reserved_addr_num, 2146 add_addr_list, &add_addr_num, 2147 rm_addr_list, &rm_addr_num); 2148 2149 /* Remove mc mac addresses */ 2150 for (i = 0; i < rm_addr_num; i++) { 2151 num = rm_addr_num - i - 1; 2152 addr = &rm_addr_list[num]; 2153 ret = hns3_remove_mc_addr(hw, addr); 2154 if (ret) { 2155 rte_spinlock_unlock(&hw->lock); 2156 return ret; 2157 } 2158 hw->mc_addrs_num--; 2159 } 2160 2161 /* Add mc mac addresses */ 2162 for (i = 0; i < add_addr_num; i++) { 2163 addr = &add_addr_list[i]; 2164 ret = hns3_add_mc_addr(hw, addr); 2165 if (ret) { 2166 rte_spinlock_unlock(&hw->lock); 2167 return ret; 2168 } 2169 2170 num = reserved_addr_num + i; 2171 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2172 hw->mc_addrs_num++; 2173 } 2174 rte_spinlock_unlock(&hw->lock); 2175 2176 return 0; 2177 } 2178 2179 static int 2180 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2181 { 2182 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2183 struct hns3_hw *hw = &hns->hw; 2184 struct rte_ether_addr *addr; 2185 int err = 0; 2186 int ret; 2187 int i; 2188 2189 for (i = 0; i < hw->mc_addrs_num; i++) { 2190 addr = &hw->mc_addrs[i]; 2191 if (!rte_is_multicast_ether_addr(addr)) 2192 continue; 2193 if (del) 2194 ret = hns3_remove_mc_addr(hw, addr); 2195 else 2196 ret = hns3_add_mc_addr(hw, addr); 2197 if (ret) { 2198 err = ret; 2199 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2200 addr); 2201 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2202 del ? "Remove" : "Restore", mac_str, ret); 2203 } 2204 } 2205 return err; 2206 } 2207 2208 static int 2209 hns3_check_mq_mode(struct rte_eth_dev *dev) 2210 { 2211 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2212 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2213 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2214 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2215 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2216 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2217 uint8_t num_tc; 2218 int max_tc = 0; 2219 int i; 2220 2221 if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) || 2222 (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB || 2223 tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) { 2224 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 2225 rx_mq_mode, tx_mq_mode); 2226 return -EOPNOTSUPP; 2227 } 2228 2229 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2230 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2231 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { 2232 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2233 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2234 dcb_rx_conf->nb_tcs, pf->tc_max); 2235 return -EINVAL; 2236 } 2237 2238 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2239 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2240 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2241 "nb_tcs(%d) != %d or %d in rx direction.", 2242 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2243 return -EINVAL; 2244 } 2245 2246 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2247 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2248 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2249 return -EINVAL; 2250 } 2251 2252 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2253 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2254 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2255 "is not equal to one in tx direction.", 2256 i, dcb_rx_conf->dcb_tc[i]); 2257 return -EINVAL; 2258 } 2259 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2260 max_tc = dcb_rx_conf->dcb_tc[i]; 2261 } 2262 2263 num_tc = max_tc + 1; 2264 if (num_tc > dcb_rx_conf->nb_tcs) { 2265 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2266 num_tc, dcb_rx_conf->nb_tcs); 2267 return -EINVAL; 2268 } 2269 } 2270 2271 return 0; 2272 } 2273 2274 static int 2275 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2276 enum hns3_ring_type queue_type, uint16_t queue_id) 2277 { 2278 struct hns3_cmd_desc desc; 2279 struct hns3_ctrl_vector_chain_cmd *req = 2280 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2281 enum hns3_opcode_type op; 2282 uint16_t tqp_type_and_id = 0; 2283 uint16_t type; 2284 uint16_t gl; 2285 int ret; 2286 2287 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2288 hns3_cmd_setup_basic_desc(&desc, op, false); 2289 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2290 HNS3_TQP_INT_ID_L_S); 2291 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2292 HNS3_TQP_INT_ID_H_S); 2293 2294 if (queue_type == HNS3_RING_TYPE_RX) 2295 gl = HNS3_RING_GL_RX; 2296 else 2297 gl = HNS3_RING_GL_TX; 2298 2299 type = queue_type; 2300 2301 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2302 type); 2303 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2304 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2305 gl); 2306 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2307 req->int_cause_num = 1; 2308 ret = hns3_cmd_send(hw, &desc, 1); 2309 if (ret) { 2310 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2311 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2312 return ret; 2313 } 2314 2315 return 0; 2316 } 2317 2318 static int 2319 hns3_init_ring_with_vector(struct hns3_hw *hw) 2320 { 2321 uint16_t vec; 2322 int ret; 2323 int i; 2324 2325 /* 2326 * In hns3 network engine, vector 0 is always the misc interrupt of this 2327 * function, vector 1~N can be used respectively for the queues of the 2328 * function. Tx and Rx queues with the same number share the interrupt 2329 * vector. In the initialization clearing the all hardware mapping 2330 * relationship configurations between queues and interrupt vectors is 2331 * needed, so some error caused by the residual configurations, such as 2332 * the unexpected Tx interrupt, can be avoid. 2333 */ 2334 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2335 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2336 vec = vec - 1; /* the last interrupt is reserved */ 2337 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2338 for (i = 0; i < hw->intr_tqps_num; i++) { 2339 /* 2340 * Set gap limiter/rate limiter/quanity limiter algorithm 2341 * configuration for interrupt coalesce of queue's interrupt. 2342 */ 2343 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2344 HNS3_TQP_INTR_GL_DEFAULT); 2345 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2346 HNS3_TQP_INTR_GL_DEFAULT); 2347 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2348 /* 2349 * QL(quantity limiter) is not used currently, just set 0 to 2350 * close it. 2351 */ 2352 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2353 2354 ret = hns3_bind_ring_with_vector(hw, vec, false, 2355 HNS3_RING_TYPE_TX, i); 2356 if (ret) { 2357 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2358 "vector: %u, ret=%d", i, vec, ret); 2359 return ret; 2360 } 2361 2362 ret = hns3_bind_ring_with_vector(hw, vec, false, 2363 HNS3_RING_TYPE_RX, i); 2364 if (ret) { 2365 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2366 "vector: %u, ret=%d", i, vec, ret); 2367 return ret; 2368 } 2369 } 2370 2371 return 0; 2372 } 2373 2374 static int 2375 hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf) 2376 { 2377 struct hns3_adapter *hns = dev->data->dev_private; 2378 struct hns3_hw *hw = &hns->hw; 2379 uint32_t max_rx_pkt_len; 2380 uint16_t mtu; 2381 int ret; 2382 2383 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) 2384 return 0; 2385 2386 /* 2387 * If jumbo frames are enabled, MTU needs to be refreshed 2388 * according to the maximum RX packet length. 2389 */ 2390 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; 2391 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || 2392 max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { 2393 hns3_err(hw, "maximum Rx packet length must be greater than %u " 2394 "and no more than %u when jumbo frame enabled.", 2395 (uint16_t)HNS3_DEFAULT_FRAME_LEN, 2396 (uint16_t)HNS3_MAX_FRAME_LEN); 2397 return -EINVAL; 2398 } 2399 2400 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); 2401 ret = hns3_dev_mtu_set(dev, mtu); 2402 if (ret) 2403 return ret; 2404 dev->data->mtu = mtu; 2405 2406 return 0; 2407 } 2408 2409 static int 2410 hns3_setup_dcb(struct rte_eth_dev *dev) 2411 { 2412 struct hns3_adapter *hns = dev->data->dev_private; 2413 struct hns3_hw *hw = &hns->hw; 2414 int ret; 2415 2416 if (!hns3_dev_dcb_supported(hw)) { 2417 hns3_err(hw, "this port does not support dcb configurations."); 2418 return -EOPNOTSUPP; 2419 } 2420 2421 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2422 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2423 return -EOPNOTSUPP; 2424 } 2425 2426 ret = hns3_dcb_configure(hns); 2427 if (ret) 2428 hns3_err(hw, "failed to config dcb: %d", ret); 2429 2430 return ret; 2431 } 2432 2433 static int 2434 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2435 { 2436 int ret; 2437 2438 /* 2439 * Some hardware doesn't support auto-negotiation, but users may not 2440 * configure link_speeds (default 0), which means auto-negotiation. 2441 * In this case, it should return success. 2442 */ 2443 if (link_speeds == ETH_LINK_SPEED_AUTONEG && 2444 hw->mac.support_autoneg == 0) 2445 return 0; 2446 2447 if (link_speeds != ETH_LINK_SPEED_AUTONEG) { 2448 ret = hns3_check_port_speed(hw, link_speeds); 2449 if (ret) 2450 return ret; 2451 } 2452 2453 return 0; 2454 } 2455 2456 static int 2457 hns3_check_dev_conf(struct rte_eth_dev *dev) 2458 { 2459 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2460 struct rte_eth_conf *conf = &dev->data->dev_conf; 2461 int ret; 2462 2463 ret = hns3_check_mq_mode(dev); 2464 if (ret) 2465 return ret; 2466 2467 return hns3_check_link_speed(hw, conf->link_speeds); 2468 } 2469 2470 static int 2471 hns3_dev_configure(struct rte_eth_dev *dev) 2472 { 2473 struct hns3_adapter *hns = dev->data->dev_private; 2474 struct rte_eth_conf *conf = &dev->data->dev_conf; 2475 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2476 struct hns3_hw *hw = &hns->hw; 2477 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2478 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2479 struct rte_eth_rss_conf rss_conf; 2480 bool gro_en; 2481 int ret; 2482 2483 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2484 2485 /* 2486 * Some versions of hardware network engine does not support 2487 * individually enable/disable/reset the Tx or Rx queue. These devices 2488 * must enable/disable/reset Tx and Rx queues at the same time. When the 2489 * numbers of Tx queues allocated by upper applications are not equal to 2490 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2491 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2492 * work as usual. But these fake queues are imperceptible, and can not 2493 * be used by upper applications. 2494 */ 2495 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2496 if (ret) { 2497 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2498 hw->cfg_max_queues = 0; 2499 return ret; 2500 } 2501 2502 hw->adapter_state = HNS3_NIC_CONFIGURING; 2503 ret = hns3_check_dev_conf(dev); 2504 if (ret) 2505 goto cfg_err; 2506 2507 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2508 ret = hns3_setup_dcb(dev); 2509 if (ret) 2510 goto cfg_err; 2511 } 2512 2513 /* When RSS is not configured, redirect the packet queue 0 */ 2514 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2515 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2516 rss_conf = conf->rx_adv_conf.rss_conf; 2517 hw->rss_dis_flag = false; 2518 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2519 if (ret) 2520 goto cfg_err; 2521 } 2522 2523 ret = hns3_refresh_mtu(dev, conf); 2524 if (ret) 2525 goto cfg_err; 2526 2527 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2528 if (ret) 2529 goto cfg_err; 2530 2531 ret = hns3_dev_configure_vlan(dev); 2532 if (ret) 2533 goto cfg_err; 2534 2535 /* config hardware GRO */ 2536 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2537 ret = hns3_config_gro(hw, gro_en); 2538 if (ret) 2539 goto cfg_err; 2540 2541 hns3_init_rx_ptype_tble(dev); 2542 hw->adapter_state = HNS3_NIC_CONFIGURED; 2543 2544 return 0; 2545 2546 cfg_err: 2547 hw->cfg_max_queues = 0; 2548 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2549 hw->adapter_state = HNS3_NIC_INITIALIZED; 2550 2551 return ret; 2552 } 2553 2554 static int 2555 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2556 { 2557 struct hns3_config_max_frm_size_cmd *req; 2558 struct hns3_cmd_desc desc; 2559 2560 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2561 2562 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2563 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2564 req->min_frm_size = RTE_ETHER_MIN_LEN; 2565 2566 return hns3_cmd_send(hw, &desc, 1); 2567 } 2568 2569 static int 2570 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2571 { 2572 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2573 uint16_t original_mps = hns->pf.mps; 2574 int err; 2575 int ret; 2576 2577 ret = hns3_set_mac_mtu(hw, mps); 2578 if (ret) { 2579 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2580 return ret; 2581 } 2582 2583 hns->pf.mps = mps; 2584 ret = hns3_buffer_alloc(hw); 2585 if (ret) { 2586 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2587 goto rollback; 2588 } 2589 2590 return 0; 2591 2592 rollback: 2593 err = hns3_set_mac_mtu(hw, original_mps); 2594 if (err) { 2595 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2596 return ret; 2597 } 2598 hns->pf.mps = original_mps; 2599 2600 return ret; 2601 } 2602 2603 static int 2604 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2605 { 2606 struct hns3_adapter *hns = dev->data->dev_private; 2607 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2608 struct hns3_hw *hw = &hns->hw; 2609 bool is_jumbo_frame; 2610 int ret; 2611 2612 if (dev->data->dev_started) { 2613 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2614 "before configuration", dev->data->port_id); 2615 return -EBUSY; 2616 } 2617 2618 rte_spinlock_lock(&hw->lock); 2619 is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false; 2620 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2621 2622 /* 2623 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2624 * assign to "uint16_t" type variable. 2625 */ 2626 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2627 if (ret) { 2628 rte_spinlock_unlock(&hw->lock); 2629 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2630 dev->data->port_id, mtu, ret); 2631 return ret; 2632 } 2633 2634 if (is_jumbo_frame) 2635 dev->data->dev_conf.rxmode.offloads |= 2636 DEV_RX_OFFLOAD_JUMBO_FRAME; 2637 else 2638 dev->data->dev_conf.rxmode.offloads &= 2639 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2640 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2641 rte_spinlock_unlock(&hw->lock); 2642 2643 return 0; 2644 } 2645 2646 static uint32_t 2647 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2648 { 2649 uint32_t speed_capa = 0; 2650 2651 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2652 speed_capa |= ETH_LINK_SPEED_10M_HD; 2653 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2654 speed_capa |= ETH_LINK_SPEED_10M; 2655 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2656 speed_capa |= ETH_LINK_SPEED_100M_HD; 2657 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2658 speed_capa |= ETH_LINK_SPEED_100M; 2659 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2660 speed_capa |= ETH_LINK_SPEED_1G; 2661 2662 return speed_capa; 2663 } 2664 2665 static uint32_t 2666 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2667 { 2668 uint32_t speed_capa = 0; 2669 2670 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2671 speed_capa |= ETH_LINK_SPEED_1G; 2672 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2673 speed_capa |= ETH_LINK_SPEED_10G; 2674 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2675 speed_capa |= ETH_LINK_SPEED_25G; 2676 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2677 speed_capa |= ETH_LINK_SPEED_40G; 2678 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2679 speed_capa |= ETH_LINK_SPEED_50G; 2680 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2681 speed_capa |= ETH_LINK_SPEED_100G; 2682 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2683 speed_capa |= ETH_LINK_SPEED_200G; 2684 2685 return speed_capa; 2686 } 2687 2688 static uint32_t 2689 hns3_get_speed_capa(struct hns3_hw *hw) 2690 { 2691 struct hns3_mac *mac = &hw->mac; 2692 uint32_t speed_capa; 2693 2694 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2695 speed_capa = 2696 hns3_get_copper_port_speed_capa(mac->supported_speed); 2697 else 2698 speed_capa = 2699 hns3_get_firber_port_speed_capa(mac->supported_speed); 2700 2701 if (mac->support_autoneg == 0) 2702 speed_capa |= ETH_LINK_SPEED_FIXED; 2703 2704 return speed_capa; 2705 } 2706 2707 int 2708 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2709 { 2710 struct hns3_adapter *hns = eth_dev->data->dev_private; 2711 struct hns3_hw *hw = &hns->hw; 2712 uint16_t queue_num = hw->tqps_num; 2713 2714 /* 2715 * In interrupt mode, 'max_rx_queues' is set based on the number of 2716 * MSI-X interrupt resources of the hardware. 2717 */ 2718 if (hw->data->dev_conf.intr_conf.rxq == 1) 2719 queue_num = hw->intr_tqps_num; 2720 2721 info->max_rx_queues = queue_num; 2722 info->max_tx_queues = hw->tqps_num; 2723 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2724 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2725 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2726 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2727 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2728 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2729 DEV_RX_OFFLOAD_TCP_CKSUM | 2730 DEV_RX_OFFLOAD_UDP_CKSUM | 2731 DEV_RX_OFFLOAD_SCTP_CKSUM | 2732 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2733 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2734 DEV_RX_OFFLOAD_KEEP_CRC | 2735 DEV_RX_OFFLOAD_SCATTER | 2736 DEV_RX_OFFLOAD_VLAN_STRIP | 2737 DEV_RX_OFFLOAD_VLAN_FILTER | 2738 DEV_RX_OFFLOAD_JUMBO_FRAME | 2739 DEV_RX_OFFLOAD_RSS_HASH | 2740 DEV_RX_OFFLOAD_TCP_LRO); 2741 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2742 DEV_TX_OFFLOAD_IPV4_CKSUM | 2743 DEV_TX_OFFLOAD_TCP_CKSUM | 2744 DEV_TX_OFFLOAD_UDP_CKSUM | 2745 DEV_TX_OFFLOAD_SCTP_CKSUM | 2746 DEV_TX_OFFLOAD_MULTI_SEGS | 2747 DEV_TX_OFFLOAD_TCP_TSO | 2748 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2749 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2750 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2751 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2752 hns3_txvlan_cap_get(hw)); 2753 2754 if (hns3_dev_outer_udp_cksum_supported(hw)) 2755 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; 2756 2757 if (hns3_dev_indep_txrx_supported(hw)) 2758 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2759 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2760 2761 if (hns3_dev_ptp_supported(hw)) 2762 info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 2763 2764 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2765 .nb_max = HNS3_MAX_RING_DESC, 2766 .nb_min = HNS3_MIN_RING_DESC, 2767 .nb_align = HNS3_ALIGN_RING_DESC, 2768 }; 2769 2770 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2771 .nb_max = HNS3_MAX_RING_DESC, 2772 .nb_min = HNS3_MIN_RING_DESC, 2773 .nb_align = HNS3_ALIGN_RING_DESC, 2774 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2775 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2776 }; 2777 2778 info->speed_capa = hns3_get_speed_capa(hw); 2779 info->default_rxconf = (struct rte_eth_rxconf) { 2780 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2781 /* 2782 * If there are no available Rx buffer descriptors, incoming 2783 * packets are always dropped by hardware based on hns3 network 2784 * engine. 2785 */ 2786 .rx_drop_en = 1, 2787 .offloads = 0, 2788 }; 2789 info->default_txconf = (struct rte_eth_txconf) { 2790 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2791 .offloads = 0, 2792 }; 2793 2794 info->reta_size = hw->rss_ind_tbl_size; 2795 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2796 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2797 2798 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2799 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2800 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2801 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2802 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2803 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2804 2805 return 0; 2806 } 2807 2808 static int 2809 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2810 size_t fw_size) 2811 { 2812 struct hns3_adapter *hns = eth_dev->data->dev_private; 2813 struct hns3_hw *hw = &hns->hw; 2814 uint32_t version = hw->fw_version; 2815 int ret; 2816 2817 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2818 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2819 HNS3_FW_VERSION_BYTE3_S), 2820 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2821 HNS3_FW_VERSION_BYTE2_S), 2822 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2823 HNS3_FW_VERSION_BYTE1_S), 2824 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2825 HNS3_FW_VERSION_BYTE0_S)); 2826 if (ret < 0) 2827 return -EINVAL; 2828 2829 ret += 1; /* add the size of '\0' */ 2830 if (fw_size < (size_t)ret) 2831 return ret; 2832 else 2833 return 0; 2834 } 2835 2836 static int 2837 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2838 { 2839 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2840 int ret; 2841 2842 (void)hns3_update_link_status(hw); 2843 2844 ret = hns3_update_link_info(eth_dev); 2845 if (ret) 2846 hw->mac.link_status = ETH_LINK_DOWN; 2847 2848 return ret; 2849 } 2850 2851 static void 2852 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2853 struct rte_eth_link *new_link) 2854 { 2855 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2856 struct hns3_mac *mac = &hw->mac; 2857 2858 switch (mac->link_speed) { 2859 case ETH_SPEED_NUM_10M: 2860 case ETH_SPEED_NUM_100M: 2861 case ETH_SPEED_NUM_1G: 2862 case ETH_SPEED_NUM_10G: 2863 case ETH_SPEED_NUM_25G: 2864 case ETH_SPEED_NUM_40G: 2865 case ETH_SPEED_NUM_50G: 2866 case ETH_SPEED_NUM_100G: 2867 case ETH_SPEED_NUM_200G: 2868 if (mac->link_status) 2869 new_link->link_speed = mac->link_speed; 2870 break; 2871 default: 2872 if (mac->link_status) 2873 new_link->link_speed = ETH_SPEED_NUM_UNKNOWN; 2874 break; 2875 } 2876 2877 if (!mac->link_status) 2878 new_link->link_speed = ETH_SPEED_NUM_NONE; 2879 2880 new_link->link_duplex = mac->link_duplex; 2881 new_link->link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2882 new_link->link_autoneg = mac->link_autoneg; 2883 } 2884 2885 static int 2886 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2887 { 2888 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2889 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2890 2891 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2892 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2893 struct hns3_mac *mac = &hw->mac; 2894 struct rte_eth_link new_link; 2895 int ret; 2896 2897 /* When port is stopped, report link down. */ 2898 if (eth_dev->data->dev_started == 0) { 2899 new_link.link_autoneg = mac->link_autoneg; 2900 new_link.link_duplex = mac->link_duplex; 2901 new_link.link_speed = ETH_SPEED_NUM_NONE; 2902 new_link.link_status = ETH_LINK_DOWN; 2903 goto out; 2904 } 2905 2906 do { 2907 ret = hns3_update_port_link_info(eth_dev); 2908 if (ret) { 2909 hns3_err(hw, "failed to get port link info, ret = %d.", 2910 ret); 2911 break; 2912 } 2913 2914 if (!wait_to_complete || mac->link_status == ETH_LINK_UP) 2915 break; 2916 2917 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2918 } while (retry_cnt--); 2919 2920 memset(&new_link, 0, sizeof(new_link)); 2921 hns3_setup_linkstatus(eth_dev, &new_link); 2922 2923 out: 2924 return rte_eth_linkstatus_set(eth_dev, &new_link); 2925 } 2926 2927 static int 2928 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2929 { 2930 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2931 int ret; 2932 2933 /* 2934 * The "tx_pkt_burst" will be restored. But the secondary process does 2935 * not support the mechanism for notifying the primary process. 2936 */ 2937 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2938 hns3_err(hw, "secondary process does not support to set link up."); 2939 return -ENOTSUP; 2940 } 2941 2942 /* 2943 * If device isn't started Rx/Tx function is still disabled, setting 2944 * link up is not allowed. But it is probably better to return success 2945 * to reduce the impact on the upper layer. 2946 */ 2947 if (hw->adapter_state != HNS3_NIC_STARTED) { 2948 hns3_info(hw, "device isn't started, can't set link up."); 2949 return 0; 2950 } 2951 2952 if (!hw->set_link_down) 2953 return 0; 2954 2955 rte_spinlock_lock(&hw->lock); 2956 ret = hns3_cfg_mac_mode(hw, true); 2957 if (ret) { 2958 rte_spinlock_unlock(&hw->lock); 2959 hns3_err(hw, "failed to set link up, ret = %d", ret); 2960 return ret; 2961 } 2962 2963 hw->set_link_down = false; 2964 hns3_start_tx_datapath(dev); 2965 rte_spinlock_unlock(&hw->lock); 2966 2967 return 0; 2968 } 2969 2970 static int 2971 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2972 { 2973 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2974 int ret; 2975 2976 /* 2977 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2978 * process does not support the mechanism for notifying the primary 2979 * process. 2980 */ 2981 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2982 hns3_err(hw, "secondary process does not support to set link down."); 2983 return -ENOTSUP; 2984 } 2985 2986 /* 2987 * If device isn't started or the API has been called, link status is 2988 * down, return success. 2989 */ 2990 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2991 return 0; 2992 2993 rte_spinlock_lock(&hw->lock); 2994 hns3_stop_tx_datapath(dev); 2995 ret = hns3_cfg_mac_mode(hw, false); 2996 if (ret) { 2997 hns3_start_tx_datapath(dev); 2998 rte_spinlock_unlock(&hw->lock); 2999 hns3_err(hw, "failed to set link down, ret = %d", ret); 3000 return ret; 3001 } 3002 3003 hw->set_link_down = true; 3004 rte_spinlock_unlock(&hw->lock); 3005 3006 return 0; 3007 } 3008 3009 static int 3010 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 3011 { 3012 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3013 struct hns3_pf *pf = &hns->pf; 3014 3015 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 3016 return -EINVAL; 3017 3018 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 3019 3020 return 0; 3021 } 3022 3023 static int 3024 hns3_query_function_status(struct hns3_hw *hw) 3025 { 3026 #define HNS3_QUERY_MAX_CNT 10 3027 #define HNS3_QUERY_SLEEP_MSCOEND 1 3028 struct hns3_func_status_cmd *req; 3029 struct hns3_cmd_desc desc; 3030 int timeout = 0; 3031 int ret; 3032 3033 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 3034 req = (struct hns3_func_status_cmd *)desc.data; 3035 3036 do { 3037 ret = hns3_cmd_send(hw, &desc, 1); 3038 if (ret) { 3039 PMD_INIT_LOG(ERR, "query function status failed %d", 3040 ret); 3041 return ret; 3042 } 3043 3044 /* Check pf reset is done */ 3045 if (req->pf_state) 3046 break; 3047 3048 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 3049 } while (timeout++ < HNS3_QUERY_MAX_CNT); 3050 3051 return hns3_parse_func_status(hw, req); 3052 } 3053 3054 static int 3055 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 3056 { 3057 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3058 struct hns3_pf *pf = &hns->pf; 3059 3060 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 3061 /* 3062 * The total_tqps_num obtained from firmware is maximum tqp 3063 * numbers of this port, which should be used for PF and VFs. 3064 * There is no need for pf to have so many tqp numbers in 3065 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3066 * coming from config file, is assigned to maximum queue number 3067 * for the PF of this port by user. So users can modify the 3068 * maximum queue number of PF according to their own application 3069 * scenarios, which is more flexible to use. In addition, many 3070 * memories can be saved due to allocating queue statistics 3071 * room according to the actual number of queues required. The 3072 * maximum queue number of PF for network engine with 3073 * revision_id greater than 0x30 is assigned by config file. 3074 */ 3075 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 3076 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 3077 "must be greater than 0.", 3078 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 3079 return -EINVAL; 3080 } 3081 3082 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3083 hw->total_tqps_num); 3084 } else { 3085 /* 3086 * Due to the limitation on the number of PF interrupts 3087 * available, the maximum queue number assigned to PF on 3088 * the network engine with revision_id 0x21 is 64. 3089 */ 3090 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 3091 HNS3_MAX_TQP_NUM_HIP08_PF); 3092 } 3093 3094 return 0; 3095 } 3096 3097 static int 3098 hns3_query_pf_resource(struct hns3_hw *hw) 3099 { 3100 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3101 struct hns3_pf *pf = &hns->pf; 3102 struct hns3_pf_res_cmd *req; 3103 struct hns3_cmd_desc desc; 3104 int ret; 3105 3106 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 3107 ret = hns3_cmd_send(hw, &desc, 1); 3108 if (ret) { 3109 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 3110 return ret; 3111 } 3112 3113 req = (struct hns3_pf_res_cmd *)desc.data; 3114 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 3115 rte_le_to_cpu_16(req->ext_tqp_num); 3116 ret = hns3_get_pf_max_tqp_num(hw); 3117 if (ret) 3118 return ret; 3119 3120 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 3121 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 3122 3123 if (req->tx_buf_size) 3124 pf->tx_buf_size = 3125 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 3126 else 3127 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 3128 3129 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 3130 3131 if (req->dv_buf_size) 3132 pf->dv_buf_size = 3133 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 3134 else 3135 pf->dv_buf_size = HNS3_DEFAULT_DV; 3136 3137 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 3138 3139 hw->num_msi = 3140 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 3141 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 3142 3143 return 0; 3144 } 3145 3146 static void 3147 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 3148 { 3149 struct hns3_cfg_param_cmd *req; 3150 uint64_t mac_addr_tmp_high; 3151 uint8_t ext_rss_size_max; 3152 uint64_t mac_addr_tmp; 3153 uint32_t i; 3154 3155 req = (struct hns3_cfg_param_cmd *)desc[0].data; 3156 3157 /* get the configuration */ 3158 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3159 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3160 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3161 HNS3_CFG_TQP_DESC_N_M, 3162 HNS3_CFG_TQP_DESC_N_S); 3163 3164 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3165 HNS3_CFG_PHY_ADDR_M, 3166 HNS3_CFG_PHY_ADDR_S); 3167 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3168 HNS3_CFG_MEDIA_TP_M, 3169 HNS3_CFG_MEDIA_TP_S); 3170 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3171 HNS3_CFG_RX_BUF_LEN_M, 3172 HNS3_CFG_RX_BUF_LEN_S); 3173 /* get mac address */ 3174 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3175 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3176 HNS3_CFG_MAC_ADDR_H_M, 3177 HNS3_CFG_MAC_ADDR_H_S); 3178 3179 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3180 3181 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3182 HNS3_CFG_DEFAULT_SPEED_M, 3183 HNS3_CFG_DEFAULT_SPEED_S); 3184 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3185 HNS3_CFG_RSS_SIZE_M, 3186 HNS3_CFG_RSS_SIZE_S); 3187 3188 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3189 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3190 3191 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3192 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3193 3194 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3195 HNS3_CFG_SPEED_ABILITY_M, 3196 HNS3_CFG_SPEED_ABILITY_S); 3197 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3198 HNS3_CFG_UMV_TBL_SPACE_M, 3199 HNS3_CFG_UMV_TBL_SPACE_S); 3200 if (!cfg->umv_space) 3201 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3202 3203 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3204 HNS3_CFG_EXT_RSS_SIZE_M, 3205 HNS3_CFG_EXT_RSS_SIZE_S); 3206 /* 3207 * Field ext_rss_size_max obtained from firmware will be more flexible 3208 * for future changes and expansions, which is an exponent of 2, instead 3209 * of reading out directly. If this field is not zero, hns3 PF PMD 3210 * driver uses it as rss_size_max under one TC. Device, whose revision 3211 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3212 * maximum number of queues supported under a TC through this field. 3213 */ 3214 if (ext_rss_size_max) 3215 cfg->rss_size_max = 1U << ext_rss_size_max; 3216 } 3217 3218 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3219 * @hw: pointer to struct hns3_hw 3220 * @hcfg: the config structure to be getted 3221 */ 3222 static int 3223 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3224 { 3225 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3226 struct hns3_cfg_param_cmd *req; 3227 uint32_t offset; 3228 uint32_t i; 3229 int ret; 3230 3231 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3232 offset = 0; 3233 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3234 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3235 true); 3236 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3237 i * HNS3_CFG_RD_LEN_BYTES); 3238 /* Len should be divided by 4 when send to hardware */ 3239 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3240 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3241 req->offset = rte_cpu_to_le_32(offset); 3242 } 3243 3244 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3245 if (ret) { 3246 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3247 return ret; 3248 } 3249 3250 hns3_parse_cfg(hcfg, desc); 3251 3252 return 0; 3253 } 3254 3255 static int 3256 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3257 { 3258 switch (speed_cmd) { 3259 case HNS3_CFG_SPEED_10M: 3260 *speed = ETH_SPEED_NUM_10M; 3261 break; 3262 case HNS3_CFG_SPEED_100M: 3263 *speed = ETH_SPEED_NUM_100M; 3264 break; 3265 case HNS3_CFG_SPEED_1G: 3266 *speed = ETH_SPEED_NUM_1G; 3267 break; 3268 case HNS3_CFG_SPEED_10G: 3269 *speed = ETH_SPEED_NUM_10G; 3270 break; 3271 case HNS3_CFG_SPEED_25G: 3272 *speed = ETH_SPEED_NUM_25G; 3273 break; 3274 case HNS3_CFG_SPEED_40G: 3275 *speed = ETH_SPEED_NUM_40G; 3276 break; 3277 case HNS3_CFG_SPEED_50G: 3278 *speed = ETH_SPEED_NUM_50G; 3279 break; 3280 case HNS3_CFG_SPEED_100G: 3281 *speed = ETH_SPEED_NUM_100G; 3282 break; 3283 case HNS3_CFG_SPEED_200G: 3284 *speed = ETH_SPEED_NUM_200G; 3285 break; 3286 default: 3287 return -EINVAL; 3288 } 3289 3290 return 0; 3291 } 3292 3293 static void 3294 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3295 { 3296 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3297 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3298 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3299 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3300 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3301 } 3302 3303 static void 3304 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3305 { 3306 struct hns3_dev_specs_0_cmd *req0; 3307 3308 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3309 3310 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3311 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3312 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3313 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3314 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3315 } 3316 3317 static int 3318 hns3_check_dev_specifications(struct hns3_hw *hw) 3319 { 3320 if (hw->rss_ind_tbl_size == 0 || 3321 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3322 hns3_err(hw, "the size of hash lookup table configured (%u)" 3323 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3324 HNS3_RSS_IND_TBL_SIZE_MAX); 3325 return -EINVAL; 3326 } 3327 3328 return 0; 3329 } 3330 3331 static int 3332 hns3_query_dev_specifications(struct hns3_hw *hw) 3333 { 3334 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3335 int ret; 3336 int i; 3337 3338 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3339 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3340 true); 3341 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3342 } 3343 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3344 3345 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3346 if (ret) 3347 return ret; 3348 3349 hns3_parse_dev_specifications(hw, desc); 3350 3351 return hns3_check_dev_specifications(hw); 3352 } 3353 3354 static int 3355 hns3_get_capability(struct hns3_hw *hw) 3356 { 3357 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3358 struct rte_pci_device *pci_dev; 3359 struct hns3_pf *pf = &hns->pf; 3360 struct rte_eth_dev *eth_dev; 3361 uint16_t device_id; 3362 uint8_t revision; 3363 int ret; 3364 3365 eth_dev = &rte_eth_devices[hw->data->port_id]; 3366 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3367 device_id = pci_dev->id.device_id; 3368 3369 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3370 device_id == HNS3_DEV_ID_50GE_RDMA || 3371 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3372 device_id == HNS3_DEV_ID_200G_RDMA) 3373 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3374 3375 /* Get PCI revision id */ 3376 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3377 HNS3_PCI_REVISION_ID); 3378 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3379 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3380 ret); 3381 return -EIO; 3382 } 3383 hw->revision = revision; 3384 3385 if (revision < PCI_REVISION_ID_HIP09_A) { 3386 hns3_set_default_dev_specifications(hw); 3387 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3388 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3389 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3390 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3391 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3392 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3393 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3394 hw->rss_info.ipv6_sctp_offload_supported = false; 3395 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3396 pf->support_multi_tc_pause = false; 3397 return 0; 3398 } 3399 3400 ret = hns3_query_dev_specifications(hw); 3401 if (ret) { 3402 PMD_INIT_LOG(ERR, 3403 "failed to query dev specifications, ret = %d", 3404 ret); 3405 return ret; 3406 } 3407 3408 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3409 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3410 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3411 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3412 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3413 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3414 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3415 hw->rss_info.ipv6_sctp_offload_supported = true; 3416 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3417 pf->support_multi_tc_pause = true; 3418 3419 return 0; 3420 } 3421 3422 static int 3423 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3424 { 3425 int ret; 3426 3427 switch (media_type) { 3428 case HNS3_MEDIA_TYPE_COPPER: 3429 if (!hns3_dev_copper_supported(hw)) { 3430 PMD_INIT_LOG(ERR, 3431 "Media type is copper, not supported."); 3432 ret = -EOPNOTSUPP; 3433 } else { 3434 ret = 0; 3435 } 3436 break; 3437 case HNS3_MEDIA_TYPE_FIBER: 3438 ret = 0; 3439 break; 3440 case HNS3_MEDIA_TYPE_BACKPLANE: 3441 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3442 ret = -EOPNOTSUPP; 3443 break; 3444 default: 3445 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3446 ret = -EINVAL; 3447 break; 3448 } 3449 3450 return ret; 3451 } 3452 3453 static int 3454 hns3_get_board_configuration(struct hns3_hw *hw) 3455 { 3456 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3457 struct hns3_pf *pf = &hns->pf; 3458 struct hns3_cfg cfg; 3459 int ret; 3460 3461 ret = hns3_get_board_cfg(hw, &cfg); 3462 if (ret) { 3463 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3464 return ret; 3465 } 3466 3467 ret = hns3_check_media_type(hw, cfg.media_type); 3468 if (ret) 3469 return ret; 3470 3471 hw->mac.media_type = cfg.media_type; 3472 hw->rss_size_max = cfg.rss_size_max; 3473 hw->rss_dis_flag = false; 3474 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3475 hw->mac.phy_addr = cfg.phy_addr; 3476 hw->mac.default_addr_setted = false; 3477 hw->num_tx_desc = cfg.tqp_desc_num; 3478 hw->num_rx_desc = cfg.tqp_desc_num; 3479 hw->dcb_info.num_pg = 1; 3480 hw->dcb_info.hw_pfc_map = 0; 3481 3482 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3483 if (ret) { 3484 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3485 cfg.default_speed, ret); 3486 return ret; 3487 } 3488 3489 pf->tc_max = cfg.tc_num; 3490 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3491 PMD_INIT_LOG(WARNING, 3492 "Get TC num(%u) from flash, set TC num to 1", 3493 pf->tc_max); 3494 pf->tc_max = 1; 3495 } 3496 3497 /* Dev does not support DCB */ 3498 if (!hns3_dev_dcb_supported(hw)) { 3499 pf->tc_max = 1; 3500 pf->pfc_max = 0; 3501 } else 3502 pf->pfc_max = pf->tc_max; 3503 3504 hw->dcb_info.num_tc = 1; 3505 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3506 hw->tqps_num / hw->dcb_info.num_tc); 3507 hns3_set_bit(hw->hw_tc_map, 0, 1); 3508 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3509 3510 pf->wanted_umv_size = cfg.umv_space; 3511 3512 return ret; 3513 } 3514 3515 static int 3516 hns3_get_configuration(struct hns3_hw *hw) 3517 { 3518 int ret; 3519 3520 ret = hns3_query_function_status(hw); 3521 if (ret) { 3522 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3523 return ret; 3524 } 3525 3526 /* Get device capability */ 3527 ret = hns3_get_capability(hw); 3528 if (ret) { 3529 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3530 return ret; 3531 } 3532 3533 /* Get pf resource */ 3534 ret = hns3_query_pf_resource(hw); 3535 if (ret) { 3536 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3537 return ret; 3538 } 3539 3540 ret = hns3_get_board_configuration(hw); 3541 if (ret) { 3542 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3543 return ret; 3544 } 3545 3546 ret = hns3_query_dev_fec_info(hw); 3547 if (ret) 3548 PMD_INIT_LOG(ERR, 3549 "failed to query FEC information, ret = %d", ret); 3550 3551 return ret; 3552 } 3553 3554 static int 3555 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3556 uint16_t tqp_vid, bool is_pf) 3557 { 3558 struct hns3_tqp_map_cmd *req; 3559 struct hns3_cmd_desc desc; 3560 int ret; 3561 3562 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3563 3564 req = (struct hns3_tqp_map_cmd *)desc.data; 3565 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3566 req->tqp_vf = func_id; 3567 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3568 if (!is_pf) 3569 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3570 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3571 3572 ret = hns3_cmd_send(hw, &desc, 1); 3573 if (ret) 3574 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3575 3576 return ret; 3577 } 3578 3579 static int 3580 hns3_map_tqp(struct hns3_hw *hw) 3581 { 3582 int ret; 3583 int i; 3584 3585 /* 3586 * In current version, VF is not supported when PF is driven by DPDK 3587 * driver, so we assign total tqps_num tqps allocated to this port 3588 * to PF. 3589 */ 3590 for (i = 0; i < hw->total_tqps_num; i++) { 3591 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3592 if (ret) 3593 return ret; 3594 } 3595 3596 return 0; 3597 } 3598 3599 static int 3600 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3601 { 3602 struct hns3_config_mac_speed_dup_cmd *req; 3603 struct hns3_cmd_desc desc; 3604 int ret; 3605 3606 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3607 3608 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3609 3610 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3611 3612 switch (speed) { 3613 case ETH_SPEED_NUM_10M: 3614 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3615 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3616 break; 3617 case ETH_SPEED_NUM_100M: 3618 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3619 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3620 break; 3621 case ETH_SPEED_NUM_1G: 3622 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3623 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3624 break; 3625 case ETH_SPEED_NUM_10G: 3626 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3627 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3628 break; 3629 case ETH_SPEED_NUM_25G: 3630 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3631 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3632 break; 3633 case ETH_SPEED_NUM_40G: 3634 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3635 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3636 break; 3637 case ETH_SPEED_NUM_50G: 3638 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3639 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3640 break; 3641 case ETH_SPEED_NUM_100G: 3642 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3643 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3644 break; 3645 case ETH_SPEED_NUM_200G: 3646 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3647 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3648 break; 3649 default: 3650 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3651 return -EINVAL; 3652 } 3653 3654 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3655 3656 ret = hns3_cmd_send(hw, &desc, 1); 3657 if (ret) 3658 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3659 3660 return ret; 3661 } 3662 3663 static int 3664 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3665 { 3666 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3667 struct hns3_pf *pf = &hns->pf; 3668 struct hns3_priv_buf *priv; 3669 uint32_t i, total_size; 3670 3671 total_size = pf->pkt_buf_size; 3672 3673 /* alloc tx buffer for all enabled tc */ 3674 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3675 priv = &buf_alloc->priv_buf[i]; 3676 3677 if (hw->hw_tc_map & BIT(i)) { 3678 if (total_size < pf->tx_buf_size) 3679 return -ENOMEM; 3680 3681 priv->tx_buf_size = pf->tx_buf_size; 3682 } else 3683 priv->tx_buf_size = 0; 3684 3685 total_size -= priv->tx_buf_size; 3686 } 3687 3688 return 0; 3689 } 3690 3691 static int 3692 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3693 { 3694 /* TX buffer size is unit by 128 byte */ 3695 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3696 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3697 struct hns3_tx_buff_alloc_cmd *req; 3698 struct hns3_cmd_desc desc; 3699 uint32_t buf_size; 3700 uint32_t i; 3701 int ret; 3702 3703 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3704 3705 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3706 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3707 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3708 3709 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3710 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3711 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3712 } 3713 3714 ret = hns3_cmd_send(hw, &desc, 1); 3715 if (ret) 3716 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3717 3718 return ret; 3719 } 3720 3721 static int 3722 hns3_get_tc_num(struct hns3_hw *hw) 3723 { 3724 int cnt = 0; 3725 uint8_t i; 3726 3727 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3728 if (hw->hw_tc_map & BIT(i)) 3729 cnt++; 3730 return cnt; 3731 } 3732 3733 static uint32_t 3734 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3735 { 3736 struct hns3_priv_buf *priv; 3737 uint32_t rx_priv = 0; 3738 int i; 3739 3740 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3741 priv = &buf_alloc->priv_buf[i]; 3742 if (priv->enable) 3743 rx_priv += priv->buf_size; 3744 } 3745 return rx_priv; 3746 } 3747 3748 static uint32_t 3749 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3750 { 3751 uint32_t total_tx_size = 0; 3752 uint32_t i; 3753 3754 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3755 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3756 3757 return total_tx_size; 3758 } 3759 3760 /* Get the number of pfc enabled TCs, which have private buffer */ 3761 static int 3762 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3763 { 3764 struct hns3_priv_buf *priv; 3765 int cnt = 0; 3766 uint8_t i; 3767 3768 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3769 priv = &buf_alloc->priv_buf[i]; 3770 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3771 cnt++; 3772 } 3773 3774 return cnt; 3775 } 3776 3777 /* Get the number of pfc disabled TCs, which have private buffer */ 3778 static int 3779 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3780 struct hns3_pkt_buf_alloc *buf_alloc) 3781 { 3782 struct hns3_priv_buf *priv; 3783 int cnt = 0; 3784 uint8_t i; 3785 3786 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3787 priv = &buf_alloc->priv_buf[i]; 3788 if (hw->hw_tc_map & BIT(i) && 3789 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3790 cnt++; 3791 } 3792 3793 return cnt; 3794 } 3795 3796 static bool 3797 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3798 uint32_t rx_all) 3799 { 3800 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3801 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3802 struct hns3_pf *pf = &hns->pf; 3803 uint32_t shared_buf, aligned_mps; 3804 uint32_t rx_priv; 3805 uint8_t tc_num; 3806 uint8_t i; 3807 3808 tc_num = hns3_get_tc_num(hw); 3809 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3810 3811 if (hns3_dev_dcb_supported(hw)) 3812 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3813 pf->dv_buf_size; 3814 else 3815 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3816 + pf->dv_buf_size; 3817 3818 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3819 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3820 HNS3_BUF_SIZE_UNIT); 3821 3822 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3823 if (rx_all < rx_priv + shared_std) 3824 return false; 3825 3826 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3827 buf_alloc->s_buf.buf_size = shared_buf; 3828 if (hns3_dev_dcb_supported(hw)) { 3829 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3830 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3831 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3832 HNS3_BUF_SIZE_UNIT); 3833 } else { 3834 buf_alloc->s_buf.self.high = 3835 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3836 buf_alloc->s_buf.self.low = aligned_mps; 3837 } 3838 3839 if (hns3_dev_dcb_supported(hw)) { 3840 hi_thrd = shared_buf - pf->dv_buf_size; 3841 3842 if (tc_num <= NEED_RESERVE_TC_NUM) 3843 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3844 BUF_MAX_PERCENT; 3845 3846 if (tc_num) 3847 hi_thrd = hi_thrd / tc_num; 3848 3849 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3850 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3851 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3852 } else { 3853 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3854 lo_thrd = aligned_mps; 3855 } 3856 3857 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3858 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3859 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3860 } 3861 3862 return true; 3863 } 3864 3865 static bool 3866 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3867 struct hns3_pkt_buf_alloc *buf_alloc) 3868 { 3869 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3870 struct hns3_pf *pf = &hns->pf; 3871 struct hns3_priv_buf *priv; 3872 uint32_t aligned_mps; 3873 uint32_t rx_all; 3874 uint8_t i; 3875 3876 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3877 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3878 3879 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3880 priv = &buf_alloc->priv_buf[i]; 3881 3882 priv->enable = 0; 3883 priv->wl.low = 0; 3884 priv->wl.high = 0; 3885 priv->buf_size = 0; 3886 3887 if (!(hw->hw_tc_map & BIT(i))) 3888 continue; 3889 3890 priv->enable = 1; 3891 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3892 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3893 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3894 HNS3_BUF_SIZE_UNIT); 3895 } else { 3896 priv->wl.low = 0; 3897 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3898 aligned_mps; 3899 } 3900 3901 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3902 } 3903 3904 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3905 } 3906 3907 static bool 3908 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3909 struct hns3_pkt_buf_alloc *buf_alloc) 3910 { 3911 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3912 struct hns3_pf *pf = &hns->pf; 3913 struct hns3_priv_buf *priv; 3914 int no_pfc_priv_num; 3915 uint32_t rx_all; 3916 uint8_t mask; 3917 int i; 3918 3919 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3920 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3921 3922 /* let the last to be cleared first */ 3923 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3924 priv = &buf_alloc->priv_buf[i]; 3925 mask = BIT((uint8_t)i); 3926 if (hw->hw_tc_map & mask && 3927 !(hw->dcb_info.hw_pfc_map & mask)) { 3928 /* Clear the no pfc TC private buffer */ 3929 priv->wl.low = 0; 3930 priv->wl.high = 0; 3931 priv->buf_size = 0; 3932 priv->enable = 0; 3933 no_pfc_priv_num--; 3934 } 3935 3936 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3937 no_pfc_priv_num == 0) 3938 break; 3939 } 3940 3941 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3942 } 3943 3944 static bool 3945 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3946 struct hns3_pkt_buf_alloc *buf_alloc) 3947 { 3948 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3949 struct hns3_pf *pf = &hns->pf; 3950 struct hns3_priv_buf *priv; 3951 uint32_t rx_all; 3952 int pfc_priv_num; 3953 uint8_t mask; 3954 int i; 3955 3956 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3957 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3958 3959 /* let the last to be cleared first */ 3960 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3961 priv = &buf_alloc->priv_buf[i]; 3962 mask = BIT((uint8_t)i); 3963 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3964 /* Reduce the number of pfc TC with private buffer */ 3965 priv->wl.low = 0; 3966 priv->enable = 0; 3967 priv->wl.high = 0; 3968 priv->buf_size = 0; 3969 pfc_priv_num--; 3970 } 3971 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3972 pfc_priv_num == 0) 3973 break; 3974 } 3975 3976 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3977 } 3978 3979 static bool 3980 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3981 struct hns3_pkt_buf_alloc *buf_alloc) 3982 { 3983 #define COMPENSATE_BUFFER 0x3C00 3984 #define COMPENSATE_HALF_MPS_NUM 5 3985 #define PRIV_WL_GAP 0x1800 3986 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3987 struct hns3_pf *pf = &hns->pf; 3988 uint32_t tc_num = hns3_get_tc_num(hw); 3989 uint32_t half_mps = pf->mps >> 1; 3990 struct hns3_priv_buf *priv; 3991 uint32_t min_rx_priv; 3992 uint32_t rx_priv; 3993 uint8_t i; 3994 3995 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3996 if (tc_num) 3997 rx_priv = rx_priv / tc_num; 3998 3999 if (tc_num <= NEED_RESERVE_TC_NUM) 4000 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 4001 4002 /* 4003 * Minimum value of private buffer in rx direction (min_rx_priv) is 4004 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 4005 * buffer if rx_priv is greater than min_rx_priv. 4006 */ 4007 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 4008 COMPENSATE_HALF_MPS_NUM * half_mps; 4009 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 4010 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 4011 if (rx_priv < min_rx_priv) 4012 return false; 4013 4014 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 4015 priv = &buf_alloc->priv_buf[i]; 4016 priv->enable = 0; 4017 priv->wl.low = 0; 4018 priv->wl.high = 0; 4019 priv->buf_size = 0; 4020 4021 if (!(hw->hw_tc_map & BIT(i))) 4022 continue; 4023 4024 priv->enable = 1; 4025 priv->buf_size = rx_priv; 4026 priv->wl.high = rx_priv - pf->dv_buf_size; 4027 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 4028 } 4029 4030 buf_alloc->s_buf.buf_size = 0; 4031 4032 return true; 4033 } 4034 4035 /* 4036 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 4037 * @hw: pointer to struct hns3_hw 4038 * @buf_alloc: pointer to buffer calculation data 4039 * @return: 0: calculate sucessful, negative: fail 4040 */ 4041 static int 4042 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4043 { 4044 /* When DCB is not supported, rx private buffer is not allocated. */ 4045 if (!hns3_dev_dcb_supported(hw)) { 4046 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4047 struct hns3_pf *pf = &hns->pf; 4048 uint32_t rx_all = pf->pkt_buf_size; 4049 4050 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 4051 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 4052 return -ENOMEM; 4053 4054 return 0; 4055 } 4056 4057 /* 4058 * Try to allocate privated packet buffer for all TCs without share 4059 * buffer. 4060 */ 4061 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 4062 return 0; 4063 4064 /* 4065 * Try to allocate privated packet buffer for all TCs with share 4066 * buffer. 4067 */ 4068 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 4069 return 0; 4070 4071 /* 4072 * For different application scenes, the enabled port number, TC number 4073 * and no_drop TC number are different. In order to obtain the better 4074 * performance, software could allocate the buffer size and configure 4075 * the waterline by trying to decrease the private buffer size according 4076 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 4077 * enabled tc. 4078 */ 4079 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 4080 return 0; 4081 4082 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 4083 return 0; 4084 4085 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 4086 return 0; 4087 4088 return -ENOMEM; 4089 } 4090 4091 static int 4092 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4093 { 4094 struct hns3_rx_priv_buff_cmd *req; 4095 struct hns3_cmd_desc desc; 4096 uint32_t buf_size; 4097 int ret; 4098 int i; 4099 4100 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 4101 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 4102 4103 /* Alloc private buffer TCs */ 4104 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 4105 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 4106 4107 req->buf_num[i] = 4108 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 4109 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 4110 } 4111 4112 buf_size = buf_alloc->s_buf.buf_size; 4113 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 4114 (1 << HNS3_TC0_PRI_BUF_EN_B)); 4115 4116 ret = hns3_cmd_send(hw, &desc, 1); 4117 if (ret) 4118 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 4119 4120 return ret; 4121 } 4122 4123 static int 4124 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4125 { 4126 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 4127 struct hns3_rx_priv_wl_buf *req; 4128 struct hns3_priv_buf *priv; 4129 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 4130 int i, j; 4131 int ret; 4132 4133 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 4134 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 4135 false); 4136 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 4137 4138 /* The first descriptor set the NEXT bit to 1 */ 4139 if (i == 0) 4140 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4141 else 4142 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4143 4144 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4145 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 4146 4147 priv = &buf_alloc->priv_buf[idx]; 4148 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 4149 HNS3_BUF_UNIT_S); 4150 req->tc_wl[j].high |= 4151 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4152 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 4153 HNS3_BUF_UNIT_S); 4154 req->tc_wl[j].low |= 4155 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4156 } 4157 } 4158 4159 /* Send 2 descriptor at one time */ 4160 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4161 if (ret) 4162 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4163 ret); 4164 return ret; 4165 } 4166 4167 static int 4168 hns3_common_thrd_config(struct hns3_hw *hw, 4169 struct hns3_pkt_buf_alloc *buf_alloc) 4170 { 4171 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4172 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4173 struct hns3_rx_com_thrd *req; 4174 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4175 struct hns3_tc_thrd *tc; 4176 int tc_idx; 4177 int i, j; 4178 int ret; 4179 4180 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4181 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4182 false); 4183 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4184 4185 /* The first descriptor set the NEXT bit to 1 */ 4186 if (i == 0) 4187 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4188 else 4189 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4190 4191 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4192 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4193 tc = &s_buf->tc_thrd[tc_idx]; 4194 4195 req->com_thrd[j].high = 4196 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4197 req->com_thrd[j].high |= 4198 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4199 req->com_thrd[j].low = 4200 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4201 req->com_thrd[j].low |= 4202 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4203 } 4204 } 4205 4206 /* Send 2 descriptors at one time */ 4207 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4208 if (ret) 4209 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4210 4211 return ret; 4212 } 4213 4214 static int 4215 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4216 { 4217 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4218 struct hns3_rx_com_wl *req; 4219 struct hns3_cmd_desc desc; 4220 int ret; 4221 4222 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4223 4224 req = (struct hns3_rx_com_wl *)desc.data; 4225 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4226 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4227 4228 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4229 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4230 4231 ret = hns3_cmd_send(hw, &desc, 1); 4232 if (ret) 4233 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4234 4235 return ret; 4236 } 4237 4238 int 4239 hns3_buffer_alloc(struct hns3_hw *hw) 4240 { 4241 struct hns3_pkt_buf_alloc pkt_buf; 4242 int ret; 4243 4244 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4245 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4246 if (ret) { 4247 PMD_INIT_LOG(ERR, 4248 "could not calc tx buffer size for all TCs %d", 4249 ret); 4250 return ret; 4251 } 4252 4253 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4254 if (ret) { 4255 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4256 return ret; 4257 } 4258 4259 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4260 if (ret) { 4261 PMD_INIT_LOG(ERR, 4262 "could not calc rx priv buffer size for all TCs %d", 4263 ret); 4264 return ret; 4265 } 4266 4267 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4268 if (ret) { 4269 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4270 return ret; 4271 } 4272 4273 if (hns3_dev_dcb_supported(hw)) { 4274 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4275 if (ret) { 4276 PMD_INIT_LOG(ERR, 4277 "could not configure rx private waterline %d", 4278 ret); 4279 return ret; 4280 } 4281 4282 ret = hns3_common_thrd_config(hw, &pkt_buf); 4283 if (ret) { 4284 PMD_INIT_LOG(ERR, 4285 "could not configure common threshold %d", 4286 ret); 4287 return ret; 4288 } 4289 } 4290 4291 ret = hns3_common_wl_config(hw, &pkt_buf); 4292 if (ret) 4293 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4294 ret); 4295 4296 return ret; 4297 } 4298 4299 static int 4300 hns3_mac_init(struct hns3_hw *hw) 4301 { 4302 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4303 struct hns3_mac *mac = &hw->mac; 4304 struct hns3_pf *pf = &hns->pf; 4305 int ret; 4306 4307 pf->support_sfp_query = true; 4308 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 4309 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4310 if (ret) { 4311 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4312 return ret; 4313 } 4314 4315 mac->link_status = ETH_LINK_DOWN; 4316 4317 return hns3_config_mtu(hw, pf->mps); 4318 } 4319 4320 static int 4321 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4322 { 4323 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4324 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4325 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4326 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4327 int return_status; 4328 4329 if (cmdq_resp) { 4330 PMD_INIT_LOG(ERR, 4331 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4332 cmdq_resp); 4333 return -EIO; 4334 } 4335 4336 switch (resp_code) { 4337 case HNS3_ETHERTYPE_SUCCESS_ADD: 4338 case HNS3_ETHERTYPE_ALREADY_ADD: 4339 return_status = 0; 4340 break; 4341 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4342 PMD_INIT_LOG(ERR, 4343 "add mac ethertype failed for manager table overflow."); 4344 return_status = -EIO; 4345 break; 4346 case HNS3_ETHERTYPE_KEY_CONFLICT: 4347 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4348 return_status = -EIO; 4349 break; 4350 default: 4351 PMD_INIT_LOG(ERR, 4352 "add mac ethertype failed for undefined, code=%u.", 4353 resp_code); 4354 return_status = -EIO; 4355 break; 4356 } 4357 4358 return return_status; 4359 } 4360 4361 static int 4362 hns3_add_mgr_tbl(struct hns3_hw *hw, 4363 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4364 { 4365 struct hns3_cmd_desc desc; 4366 uint8_t resp_code; 4367 uint16_t retval; 4368 int ret; 4369 4370 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4371 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4372 4373 ret = hns3_cmd_send(hw, &desc, 1); 4374 if (ret) { 4375 PMD_INIT_LOG(ERR, 4376 "add mac ethertype failed for cmd_send, ret =%d.", 4377 ret); 4378 return ret; 4379 } 4380 4381 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4382 retval = rte_le_to_cpu_16(desc.retval); 4383 4384 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4385 } 4386 4387 static void 4388 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4389 int *table_item_num) 4390 { 4391 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4392 4393 /* 4394 * In current version, we add one item in management table as below: 4395 * 0x0180C200000E -- LLDP MC address 4396 */ 4397 tbl = mgr_table; 4398 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4399 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4400 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4401 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4402 tbl->i_port_bitmap = 0x1; 4403 *table_item_num = 1; 4404 } 4405 4406 static int 4407 hns3_init_mgr_tbl(struct hns3_hw *hw) 4408 { 4409 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4410 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4411 int table_item_num; 4412 int ret; 4413 int i; 4414 4415 memset(mgr_table, 0, sizeof(mgr_table)); 4416 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4417 for (i = 0; i < table_item_num; i++) { 4418 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4419 if (ret) { 4420 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4421 ret); 4422 return ret; 4423 } 4424 } 4425 4426 return 0; 4427 } 4428 4429 static void 4430 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4431 bool en_mc, bool en_bc, int vport_id) 4432 { 4433 if (!param) 4434 return; 4435 4436 memset(param, 0, sizeof(struct hns3_promisc_param)); 4437 if (en_uc) 4438 param->enable = HNS3_PROMISC_EN_UC; 4439 if (en_mc) 4440 param->enable |= HNS3_PROMISC_EN_MC; 4441 if (en_bc) 4442 param->enable |= HNS3_PROMISC_EN_BC; 4443 param->vf_id = vport_id; 4444 } 4445 4446 static int 4447 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4448 { 4449 struct hns3_promisc_cfg_cmd *req; 4450 struct hns3_cmd_desc desc; 4451 int ret; 4452 4453 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4454 4455 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4456 req->vf_id = param->vf_id; 4457 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4458 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4459 4460 ret = hns3_cmd_send(hw, &desc, 1); 4461 if (ret) 4462 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4463 4464 return ret; 4465 } 4466 4467 static int 4468 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4469 { 4470 struct hns3_promisc_param param; 4471 bool en_bc_pmc = true; 4472 uint8_t vf_id; 4473 4474 /* 4475 * In current version VF is not supported when PF is driven by DPDK 4476 * driver, just need to configure parameters for PF vport. 4477 */ 4478 vf_id = HNS3_PF_FUNC_ID; 4479 4480 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4481 return hns3_cmd_set_promisc_mode(hw, ¶m); 4482 } 4483 4484 static int 4485 hns3_promisc_init(struct hns3_hw *hw) 4486 { 4487 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4488 struct hns3_pf *pf = &hns->pf; 4489 struct hns3_promisc_param param; 4490 uint16_t func_id; 4491 int ret; 4492 4493 ret = hns3_set_promisc_mode(hw, false, false); 4494 if (ret) { 4495 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4496 return ret; 4497 } 4498 4499 /* 4500 * In current version VFs are not supported when PF is driven by DPDK 4501 * driver. After PF has been taken over by DPDK, the original VF will 4502 * be invalid. So, there is a possibility of entry residues. It should 4503 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4504 * during init. 4505 */ 4506 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4507 hns3_promisc_param_init(¶m, false, false, false, func_id); 4508 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4509 if (ret) { 4510 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4511 " ret = %d", func_id, ret); 4512 return ret; 4513 } 4514 } 4515 4516 return 0; 4517 } 4518 4519 static void 4520 hns3_promisc_uninit(struct hns3_hw *hw) 4521 { 4522 struct hns3_promisc_param param; 4523 uint16_t func_id; 4524 int ret; 4525 4526 func_id = HNS3_PF_FUNC_ID; 4527 4528 /* 4529 * In current version VFs are not supported when PF is driven by 4530 * DPDK driver, and VFs' promisc mode status has been cleared during 4531 * init and their status will not change. So just clear PF's promisc 4532 * mode status during uninit. 4533 */ 4534 hns3_promisc_param_init(¶m, false, false, false, func_id); 4535 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4536 if (ret) 4537 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4538 " uninit, ret = %d", ret); 4539 } 4540 4541 static int 4542 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4543 { 4544 bool allmulti = dev->data->all_multicast ? true : false; 4545 struct hns3_adapter *hns = dev->data->dev_private; 4546 struct hns3_hw *hw = &hns->hw; 4547 uint64_t offloads; 4548 int err; 4549 int ret; 4550 4551 rte_spinlock_lock(&hw->lock); 4552 ret = hns3_set_promisc_mode(hw, true, true); 4553 if (ret) { 4554 rte_spinlock_unlock(&hw->lock); 4555 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4556 ret); 4557 return ret; 4558 } 4559 4560 /* 4561 * When promiscuous mode was enabled, disable the vlan filter to let 4562 * all packets coming in in the receiving direction. 4563 */ 4564 offloads = dev->data->dev_conf.rxmode.offloads; 4565 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4566 ret = hns3_enable_vlan_filter(hns, false); 4567 if (ret) { 4568 hns3_err(hw, "failed to enable promiscuous mode due to " 4569 "failure to disable vlan filter, ret = %d", 4570 ret); 4571 err = hns3_set_promisc_mode(hw, false, allmulti); 4572 if (err) 4573 hns3_err(hw, "failed to restore promiscuous " 4574 "status after disable vlan filter " 4575 "failed during enabling promiscuous " 4576 "mode, ret = %d", ret); 4577 } 4578 } 4579 4580 rte_spinlock_unlock(&hw->lock); 4581 4582 return ret; 4583 } 4584 4585 static int 4586 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4587 { 4588 bool allmulti = dev->data->all_multicast ? true : false; 4589 struct hns3_adapter *hns = dev->data->dev_private; 4590 struct hns3_hw *hw = &hns->hw; 4591 uint64_t offloads; 4592 int err; 4593 int ret; 4594 4595 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4596 rte_spinlock_lock(&hw->lock); 4597 ret = hns3_set_promisc_mode(hw, false, allmulti); 4598 if (ret) { 4599 rte_spinlock_unlock(&hw->lock); 4600 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4601 ret); 4602 return ret; 4603 } 4604 /* when promiscuous mode was disabled, restore the vlan filter status */ 4605 offloads = dev->data->dev_conf.rxmode.offloads; 4606 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4607 ret = hns3_enable_vlan_filter(hns, true); 4608 if (ret) { 4609 hns3_err(hw, "failed to disable promiscuous mode due to" 4610 " failure to restore vlan filter, ret = %d", 4611 ret); 4612 err = hns3_set_promisc_mode(hw, true, true); 4613 if (err) 4614 hns3_err(hw, "failed to restore promiscuous " 4615 "status after enabling vlan filter " 4616 "failed during disabling promiscuous " 4617 "mode, ret = %d", ret); 4618 } 4619 } 4620 rte_spinlock_unlock(&hw->lock); 4621 4622 return ret; 4623 } 4624 4625 static int 4626 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4627 { 4628 struct hns3_adapter *hns = dev->data->dev_private; 4629 struct hns3_hw *hw = &hns->hw; 4630 int ret; 4631 4632 if (dev->data->promiscuous) 4633 return 0; 4634 4635 rte_spinlock_lock(&hw->lock); 4636 ret = hns3_set_promisc_mode(hw, false, true); 4637 rte_spinlock_unlock(&hw->lock); 4638 if (ret) 4639 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4640 ret); 4641 4642 return ret; 4643 } 4644 4645 static int 4646 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4647 { 4648 struct hns3_adapter *hns = dev->data->dev_private; 4649 struct hns3_hw *hw = &hns->hw; 4650 int ret; 4651 4652 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4653 if (dev->data->promiscuous) 4654 return 0; 4655 4656 rte_spinlock_lock(&hw->lock); 4657 ret = hns3_set_promisc_mode(hw, false, false); 4658 rte_spinlock_unlock(&hw->lock); 4659 if (ret) 4660 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4661 ret); 4662 4663 return ret; 4664 } 4665 4666 static int 4667 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4668 { 4669 struct hns3_hw *hw = &hns->hw; 4670 bool allmulti = hw->data->all_multicast ? true : false; 4671 int ret; 4672 4673 if (hw->data->promiscuous) { 4674 ret = hns3_set_promisc_mode(hw, true, true); 4675 if (ret) 4676 hns3_err(hw, "failed to restore promiscuous mode, " 4677 "ret = %d", ret); 4678 return ret; 4679 } 4680 4681 ret = hns3_set_promisc_mode(hw, false, allmulti); 4682 if (ret) 4683 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4684 ret); 4685 return ret; 4686 } 4687 4688 static int 4689 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4690 { 4691 struct hns3_sfp_info_cmd *resp; 4692 struct hns3_cmd_desc desc; 4693 int ret; 4694 4695 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4696 resp = (struct hns3_sfp_info_cmd *)desc.data; 4697 resp->query_type = HNS3_ACTIVE_QUERY; 4698 4699 ret = hns3_cmd_send(hw, &desc, 1); 4700 if (ret == -EOPNOTSUPP) { 4701 hns3_warn(hw, "firmware does not support get SFP info," 4702 " ret = %d.", ret); 4703 return ret; 4704 } else if (ret) { 4705 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4706 return ret; 4707 } 4708 4709 /* 4710 * In some case, the speed of MAC obtained from firmware may be 0, it 4711 * shouldn't be set to mac->speed. 4712 */ 4713 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4714 return 0; 4715 4716 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4717 /* 4718 * if resp->supported_speed is 0, it means it's an old version 4719 * firmware, do not update these params. 4720 */ 4721 if (resp->supported_speed) { 4722 mac_info->query_type = HNS3_ACTIVE_QUERY; 4723 mac_info->supported_speed = 4724 rte_le_to_cpu_32(resp->supported_speed); 4725 mac_info->support_autoneg = resp->autoneg_ability; 4726 mac_info->link_autoneg = (resp->autoneg == 0) ? ETH_LINK_FIXED 4727 : ETH_LINK_AUTONEG; 4728 } else { 4729 mac_info->query_type = HNS3_DEFAULT_QUERY; 4730 } 4731 4732 return 0; 4733 } 4734 4735 static uint8_t 4736 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4737 { 4738 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4739 duplex = ETH_LINK_FULL_DUPLEX; 4740 4741 return duplex; 4742 } 4743 4744 static int 4745 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4746 { 4747 struct hns3_mac *mac = &hw->mac; 4748 int ret; 4749 4750 duplex = hns3_check_speed_dup(duplex, speed); 4751 if (mac->link_speed == speed && mac->link_duplex == duplex) 4752 return 0; 4753 4754 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4755 if (ret) 4756 return ret; 4757 4758 ret = hns3_port_shaper_update(hw, speed); 4759 if (ret) 4760 return ret; 4761 4762 mac->link_speed = speed; 4763 mac->link_duplex = duplex; 4764 4765 return 0; 4766 } 4767 4768 static int 4769 hns3_update_fiber_link_info(struct hns3_hw *hw) 4770 { 4771 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4772 struct hns3_mac *mac = &hw->mac; 4773 struct hns3_mac mac_info; 4774 int ret; 4775 4776 /* If firmware do not support get SFP/qSFP speed, return directly */ 4777 if (!pf->support_sfp_query) 4778 return 0; 4779 4780 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4781 ret = hns3_get_sfp_info(hw, &mac_info); 4782 if (ret == -EOPNOTSUPP) { 4783 pf->support_sfp_query = false; 4784 return ret; 4785 } else if (ret) 4786 return ret; 4787 4788 /* Do nothing if no SFP */ 4789 if (mac_info.link_speed == ETH_SPEED_NUM_NONE) 4790 return 0; 4791 4792 /* 4793 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4794 * to reconfigure the speed of MAC. Otherwise, it indicates 4795 * that the current firmware only supports to obtain the 4796 * speed of the SFP, and the speed of MAC needs to reconfigure. 4797 */ 4798 mac->query_type = mac_info.query_type; 4799 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4800 if (mac_info.link_speed != mac->link_speed) { 4801 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4802 if (ret) 4803 return ret; 4804 } 4805 4806 mac->link_speed = mac_info.link_speed; 4807 mac->supported_speed = mac_info.supported_speed; 4808 mac->support_autoneg = mac_info.support_autoneg; 4809 mac->link_autoneg = mac_info.link_autoneg; 4810 4811 return 0; 4812 } 4813 4814 /* Config full duplex for SFP */ 4815 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4816 ETH_LINK_FULL_DUPLEX); 4817 } 4818 4819 static void 4820 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4821 { 4822 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4823 4824 struct hns3_phy_params_bd0_cmd *req; 4825 uint32_t supported; 4826 4827 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4828 mac->link_speed = rte_le_to_cpu_32(req->speed); 4829 mac->link_duplex = hns3_get_bit(req->duplex, 4830 HNS3_PHY_DUPLEX_CFG_B); 4831 mac->link_autoneg = hns3_get_bit(req->autoneg, 4832 HNS3_PHY_AUTONEG_CFG_B); 4833 mac->advertising = rte_le_to_cpu_32(req->advertising); 4834 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4835 supported = rte_le_to_cpu_32(req->supported); 4836 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4837 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4838 } 4839 4840 static int 4841 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4842 { 4843 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4844 uint16_t i; 4845 int ret; 4846 4847 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4848 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4849 true); 4850 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4851 } 4852 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4853 4854 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4855 if (ret) { 4856 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4857 return ret; 4858 } 4859 4860 hns3_parse_copper_phy_params(desc, mac); 4861 4862 return 0; 4863 } 4864 4865 static int 4866 hns3_update_copper_link_info(struct hns3_hw *hw) 4867 { 4868 struct hns3_mac *mac = &hw->mac; 4869 struct hns3_mac mac_info; 4870 int ret; 4871 4872 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4873 ret = hns3_get_copper_phy_params(hw, &mac_info); 4874 if (ret) 4875 return ret; 4876 4877 if (mac_info.link_speed != mac->link_speed) { 4878 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4879 if (ret) 4880 return ret; 4881 } 4882 4883 mac->link_speed = mac_info.link_speed; 4884 mac->link_duplex = mac_info.link_duplex; 4885 mac->link_autoneg = mac_info.link_autoneg; 4886 mac->supported_speed = mac_info.supported_speed; 4887 mac->advertising = mac_info.advertising; 4888 mac->lp_advertising = mac_info.lp_advertising; 4889 mac->support_autoneg = mac_info.support_autoneg; 4890 4891 return 0; 4892 } 4893 4894 static int 4895 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4896 { 4897 struct hns3_adapter *hns = eth_dev->data->dev_private; 4898 struct hns3_hw *hw = &hns->hw; 4899 int ret = 0; 4900 4901 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4902 ret = hns3_update_copper_link_info(hw); 4903 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4904 ret = hns3_update_fiber_link_info(hw); 4905 4906 return ret; 4907 } 4908 4909 static int 4910 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4911 { 4912 struct hns3_config_mac_mode_cmd *req; 4913 struct hns3_cmd_desc desc; 4914 uint32_t loop_en = 0; 4915 uint8_t val = 0; 4916 int ret; 4917 4918 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4919 4920 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4921 if (enable) 4922 val = 1; 4923 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4924 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4925 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4926 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4927 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4928 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4929 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4930 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4931 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4932 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4933 4934 /* 4935 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4936 * when receiving frames. Otherwise, CRC will be stripped. 4937 */ 4938 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4939 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4940 else 4941 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4942 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4943 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4944 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4945 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4946 4947 ret = hns3_cmd_send(hw, &desc, 1); 4948 if (ret) 4949 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4950 4951 return ret; 4952 } 4953 4954 static int 4955 hns3_get_mac_link_status(struct hns3_hw *hw) 4956 { 4957 struct hns3_link_status_cmd *req; 4958 struct hns3_cmd_desc desc; 4959 int link_status; 4960 int ret; 4961 4962 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4963 ret = hns3_cmd_send(hw, &desc, 1); 4964 if (ret) { 4965 hns3_err(hw, "get link status cmd failed %d", ret); 4966 return ETH_LINK_DOWN; 4967 } 4968 4969 req = (struct hns3_link_status_cmd *)desc.data; 4970 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4971 4972 return !!link_status; 4973 } 4974 4975 static bool 4976 hns3_update_link_status(struct hns3_hw *hw) 4977 { 4978 int state; 4979 4980 state = hns3_get_mac_link_status(hw); 4981 if (state != hw->mac.link_status) { 4982 hw->mac.link_status = state; 4983 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4984 return true; 4985 } 4986 4987 return false; 4988 } 4989 4990 void 4991 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4992 { 4993 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4994 struct rte_eth_link new_link; 4995 int ret; 4996 4997 if (query) 4998 hns3_update_port_link_info(dev); 4999 5000 memset(&new_link, 0, sizeof(new_link)); 5001 hns3_setup_linkstatus(dev, &new_link); 5002 5003 ret = rte_eth_linkstatus_set(dev, &new_link); 5004 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 5005 hns3_start_report_lse(dev); 5006 } 5007 5008 static void 5009 hns3_service_handler(void *param) 5010 { 5011 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 5012 struct hns3_adapter *hns = eth_dev->data->dev_private; 5013 struct hns3_hw *hw = &hns->hw; 5014 5015 if (!hns3_is_reset_pending(hns)) 5016 hns3_update_linkstatus_and_event(hw, true); 5017 else 5018 hns3_warn(hw, "Cancel the query when reset is pending"); 5019 5020 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 5021 } 5022 5023 static int 5024 hns3_init_hardware(struct hns3_adapter *hns) 5025 { 5026 struct hns3_hw *hw = &hns->hw; 5027 int ret; 5028 5029 ret = hns3_map_tqp(hw); 5030 if (ret) { 5031 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 5032 return ret; 5033 } 5034 5035 ret = hns3_init_umv_space(hw); 5036 if (ret) { 5037 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 5038 return ret; 5039 } 5040 5041 ret = hns3_mac_init(hw); 5042 if (ret) { 5043 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 5044 goto err_mac_init; 5045 } 5046 5047 ret = hns3_init_mgr_tbl(hw); 5048 if (ret) { 5049 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 5050 goto err_mac_init; 5051 } 5052 5053 ret = hns3_promisc_init(hw); 5054 if (ret) { 5055 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 5056 ret); 5057 goto err_mac_init; 5058 } 5059 5060 ret = hns3_init_vlan_config(hns); 5061 if (ret) { 5062 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 5063 goto err_mac_init; 5064 } 5065 5066 ret = hns3_dcb_init(hw); 5067 if (ret) { 5068 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 5069 goto err_mac_init; 5070 } 5071 5072 ret = hns3_init_fd_config(hns); 5073 if (ret) { 5074 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 5075 goto err_mac_init; 5076 } 5077 5078 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 5079 if (ret) { 5080 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 5081 goto err_mac_init; 5082 } 5083 5084 ret = hns3_config_gro(hw, false); 5085 if (ret) { 5086 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 5087 goto err_mac_init; 5088 } 5089 5090 /* 5091 * In the initialization clearing the all hardware mapping relationship 5092 * configurations between queues and interrupt vectors is needed, so 5093 * some error caused by the residual configurations, such as the 5094 * unexpected interrupt, can be avoid. 5095 */ 5096 ret = hns3_init_ring_with_vector(hw); 5097 if (ret) { 5098 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 5099 goto err_mac_init; 5100 } 5101 5102 return 0; 5103 5104 err_mac_init: 5105 hns3_uninit_umv_space(hw); 5106 return ret; 5107 } 5108 5109 static int 5110 hns3_clear_hw(struct hns3_hw *hw) 5111 { 5112 struct hns3_cmd_desc desc; 5113 int ret; 5114 5115 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 5116 5117 ret = hns3_cmd_send(hw, &desc, 1); 5118 if (ret && ret != -EOPNOTSUPP) 5119 return ret; 5120 5121 return 0; 5122 } 5123 5124 static void 5125 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 5126 { 5127 uint32_t val; 5128 5129 /* 5130 * The new firmware support report more hardware error types by 5131 * msix mode. These errors are defined as RAS errors in hardware 5132 * and belong to a different type from the MSI-x errors processed 5133 * by the network driver. 5134 * 5135 * Network driver should open the new error report on initialization. 5136 */ 5137 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5138 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 5139 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5140 } 5141 5142 static uint32_t 5143 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 5144 { 5145 struct hns3_mac *mac = &hw->mac; 5146 5147 switch (mac->link_speed) { 5148 case ETH_SPEED_NUM_1G: 5149 return HNS3_FIBER_LINK_SPEED_1G_BIT; 5150 case ETH_SPEED_NUM_10G: 5151 return HNS3_FIBER_LINK_SPEED_10G_BIT; 5152 case ETH_SPEED_NUM_25G: 5153 return HNS3_FIBER_LINK_SPEED_25G_BIT; 5154 case ETH_SPEED_NUM_40G: 5155 return HNS3_FIBER_LINK_SPEED_40G_BIT; 5156 case ETH_SPEED_NUM_50G: 5157 return HNS3_FIBER_LINK_SPEED_50G_BIT; 5158 case ETH_SPEED_NUM_100G: 5159 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5160 case ETH_SPEED_NUM_200G: 5161 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5162 default: 5163 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5164 return 0; 5165 } 5166 } 5167 5168 /* 5169 * Validity of supported_speed for firber and copper media type can be 5170 * guaranteed by the following policy: 5171 * Copper: 5172 * Although the initialization of the phy in the firmware may not be 5173 * completed, the firmware can guarantees that the supported_speed is 5174 * an valid value. 5175 * Firber: 5176 * If the version of firmware supports the acitive query way of the 5177 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5178 * through it. If unsupported, use the SFP's speed as the value of the 5179 * supported_speed. 5180 */ 5181 static int 5182 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5183 { 5184 struct hns3_adapter *hns = eth_dev->data->dev_private; 5185 struct hns3_hw *hw = &hns->hw; 5186 struct hns3_mac *mac = &hw->mac; 5187 int ret; 5188 5189 ret = hns3_update_link_info(eth_dev); 5190 if (ret) 5191 return ret; 5192 5193 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5194 /* 5195 * Some firmware does not support the report of supported_speed, 5196 * and only report the effective speed of SFP. In this case, it 5197 * is necessary to use the SFP's speed as the supported_speed. 5198 */ 5199 if (mac->supported_speed == 0) 5200 mac->supported_speed = 5201 hns3_set_firber_default_support_speed(hw); 5202 } 5203 5204 return 0; 5205 } 5206 5207 static void 5208 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5209 { 5210 struct hns3_mac *mac = &hns->hw.mac; 5211 5212 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5213 hns->pf.support_fc_autoneg = true; 5214 return; 5215 } 5216 5217 /* 5218 * Flow control auto-negotiation requires the cooperation of the driver 5219 * and firmware. Currently, the optical port does not support flow 5220 * control auto-negotiation. 5221 */ 5222 hns->pf.support_fc_autoneg = false; 5223 } 5224 5225 static int 5226 hns3_init_pf(struct rte_eth_dev *eth_dev) 5227 { 5228 struct rte_device *dev = eth_dev->device; 5229 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5230 struct hns3_adapter *hns = eth_dev->data->dev_private; 5231 struct hns3_hw *hw = &hns->hw; 5232 int ret; 5233 5234 PMD_INIT_FUNC_TRACE(); 5235 5236 /* Get hardware io base address from pcie BAR2 IO space */ 5237 hw->io_base = pci_dev->mem_resource[2].addr; 5238 5239 /* Firmware command queue initialize */ 5240 ret = hns3_cmd_init_queue(hw); 5241 if (ret) { 5242 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5243 goto err_cmd_init_queue; 5244 } 5245 5246 hns3_clear_all_event_cause(hw); 5247 5248 /* Firmware command initialize */ 5249 ret = hns3_cmd_init(hw); 5250 if (ret) { 5251 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5252 goto err_cmd_init; 5253 } 5254 5255 hns3_tx_push_init(eth_dev); 5256 5257 /* 5258 * To ensure that the hardware environment is clean during 5259 * initialization, the driver actively clear the hardware environment 5260 * during initialization, including PF and corresponding VFs' vlan, mac, 5261 * flow table configurations, etc. 5262 */ 5263 ret = hns3_clear_hw(hw); 5264 if (ret) { 5265 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5266 goto err_cmd_init; 5267 } 5268 5269 /* Hardware statistics of imissed registers cleared. */ 5270 ret = hns3_update_imissed_stats(hw, true); 5271 if (ret) { 5272 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5273 goto err_cmd_init; 5274 } 5275 5276 hns3_config_all_msix_error(hw, true); 5277 5278 ret = rte_intr_callback_register(&pci_dev->intr_handle, 5279 hns3_interrupt_handler, 5280 eth_dev); 5281 if (ret) { 5282 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5283 goto err_intr_callback_register; 5284 } 5285 5286 ret = hns3_ptp_init(hw); 5287 if (ret) 5288 goto err_get_config; 5289 5290 /* Enable interrupt */ 5291 rte_intr_enable(&pci_dev->intr_handle); 5292 hns3_pf_enable_irq0(hw); 5293 5294 /* Get configuration */ 5295 ret = hns3_get_configuration(hw); 5296 if (ret) { 5297 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5298 goto err_get_config; 5299 } 5300 5301 ret = hns3_tqp_stats_init(hw); 5302 if (ret) 5303 goto err_get_config; 5304 5305 ret = hns3_init_hardware(hns); 5306 if (ret) { 5307 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5308 goto err_init_hw; 5309 } 5310 5311 /* Initialize flow director filter list & hash */ 5312 ret = hns3_fdir_filter_init(hns); 5313 if (ret) { 5314 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5315 goto err_fdir; 5316 } 5317 5318 hns3_rss_set_default_args(hw); 5319 5320 ret = hns3_enable_hw_error_intr(hns, true); 5321 if (ret) { 5322 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5323 ret); 5324 goto err_enable_intr; 5325 } 5326 5327 ret = hns3_get_port_supported_speed(eth_dev); 5328 if (ret) { 5329 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5330 "by device, ret = %d.", ret); 5331 goto err_supported_speed; 5332 } 5333 5334 hns3_get_fc_autoneg_capability(hns); 5335 5336 hns3_tm_conf_init(eth_dev); 5337 5338 return 0; 5339 5340 err_supported_speed: 5341 (void)hns3_enable_hw_error_intr(hns, false); 5342 err_enable_intr: 5343 hns3_fdir_filter_uninit(hns); 5344 err_fdir: 5345 hns3_uninit_umv_space(hw); 5346 err_init_hw: 5347 hns3_tqp_stats_uninit(hw); 5348 err_get_config: 5349 hns3_pf_disable_irq0(hw); 5350 rte_intr_disable(&pci_dev->intr_handle); 5351 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5352 eth_dev); 5353 err_intr_callback_register: 5354 err_cmd_init: 5355 hns3_cmd_uninit(hw); 5356 hns3_cmd_destroy_queue(hw); 5357 err_cmd_init_queue: 5358 hw->io_base = NULL; 5359 5360 return ret; 5361 } 5362 5363 static void 5364 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5365 { 5366 struct hns3_adapter *hns = eth_dev->data->dev_private; 5367 struct rte_device *dev = eth_dev->device; 5368 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5369 struct hns3_hw *hw = &hns->hw; 5370 5371 PMD_INIT_FUNC_TRACE(); 5372 5373 hns3_tm_conf_uninit(eth_dev); 5374 hns3_enable_hw_error_intr(hns, false); 5375 hns3_rss_uninit(hns); 5376 (void)hns3_config_gro(hw, false); 5377 hns3_promisc_uninit(hw); 5378 hns3_flow_uninit(eth_dev); 5379 hns3_fdir_filter_uninit(hns); 5380 hns3_uninit_umv_space(hw); 5381 hns3_tqp_stats_uninit(hw); 5382 hns3_config_mac_tnl_int(hw, false); 5383 hns3_pf_disable_irq0(hw); 5384 rte_intr_disable(&pci_dev->intr_handle); 5385 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5386 eth_dev); 5387 hns3_config_all_msix_error(hw, false); 5388 hns3_cmd_uninit(hw); 5389 hns3_cmd_destroy_queue(hw); 5390 hw->io_base = NULL; 5391 } 5392 5393 static uint32_t 5394 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5395 { 5396 uint32_t speed_bit; 5397 5398 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5399 case ETH_LINK_SPEED_10M: 5400 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5401 break; 5402 case ETH_LINK_SPEED_10M_HD: 5403 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5404 break; 5405 case ETH_LINK_SPEED_100M: 5406 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5407 break; 5408 case ETH_LINK_SPEED_100M_HD: 5409 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5410 break; 5411 case ETH_LINK_SPEED_1G: 5412 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5413 break; 5414 default: 5415 speed_bit = 0; 5416 break; 5417 } 5418 5419 return speed_bit; 5420 } 5421 5422 static uint32_t 5423 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5424 { 5425 uint32_t speed_bit; 5426 5427 switch (link_speeds & ~ETH_LINK_SPEED_FIXED) { 5428 case ETH_LINK_SPEED_1G: 5429 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5430 break; 5431 case ETH_LINK_SPEED_10G: 5432 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5433 break; 5434 case ETH_LINK_SPEED_25G: 5435 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5436 break; 5437 case ETH_LINK_SPEED_40G: 5438 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5439 break; 5440 case ETH_LINK_SPEED_50G: 5441 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5442 break; 5443 case ETH_LINK_SPEED_100G: 5444 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5445 break; 5446 case ETH_LINK_SPEED_200G: 5447 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5448 break; 5449 default: 5450 speed_bit = 0; 5451 break; 5452 } 5453 5454 return speed_bit; 5455 } 5456 5457 static int 5458 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5459 { 5460 struct hns3_mac *mac = &hw->mac; 5461 uint32_t supported_speed = mac->supported_speed; 5462 uint32_t speed_bit = 0; 5463 5464 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5465 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5466 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5467 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5468 5469 if (!(speed_bit & supported_speed)) { 5470 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5471 link_speeds); 5472 return -EINVAL; 5473 } 5474 5475 return 0; 5476 } 5477 5478 static inline uint32_t 5479 hns3_get_link_speed(uint32_t link_speeds) 5480 { 5481 uint32_t speed = ETH_SPEED_NUM_NONE; 5482 5483 if (link_speeds & ETH_LINK_SPEED_10M || 5484 link_speeds & ETH_LINK_SPEED_10M_HD) 5485 speed = ETH_SPEED_NUM_10M; 5486 if (link_speeds & ETH_LINK_SPEED_100M || 5487 link_speeds & ETH_LINK_SPEED_100M_HD) 5488 speed = ETH_SPEED_NUM_100M; 5489 if (link_speeds & ETH_LINK_SPEED_1G) 5490 speed = ETH_SPEED_NUM_1G; 5491 if (link_speeds & ETH_LINK_SPEED_10G) 5492 speed = ETH_SPEED_NUM_10G; 5493 if (link_speeds & ETH_LINK_SPEED_25G) 5494 speed = ETH_SPEED_NUM_25G; 5495 if (link_speeds & ETH_LINK_SPEED_40G) 5496 speed = ETH_SPEED_NUM_40G; 5497 if (link_speeds & ETH_LINK_SPEED_50G) 5498 speed = ETH_SPEED_NUM_50G; 5499 if (link_speeds & ETH_LINK_SPEED_100G) 5500 speed = ETH_SPEED_NUM_100G; 5501 if (link_speeds & ETH_LINK_SPEED_200G) 5502 speed = ETH_SPEED_NUM_200G; 5503 5504 return speed; 5505 } 5506 5507 static uint8_t 5508 hns3_get_link_duplex(uint32_t link_speeds) 5509 { 5510 if ((link_speeds & ETH_LINK_SPEED_10M_HD) || 5511 (link_speeds & ETH_LINK_SPEED_100M_HD)) 5512 return ETH_LINK_HALF_DUPLEX; 5513 else 5514 return ETH_LINK_FULL_DUPLEX; 5515 } 5516 5517 static int 5518 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5519 struct hns3_set_link_speed_cfg *cfg) 5520 { 5521 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5522 struct hns3_phy_params_bd0_cmd *req; 5523 uint16_t i; 5524 5525 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5526 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5527 false); 5528 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5529 } 5530 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5531 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5532 req->autoneg = cfg->autoneg; 5533 5534 /* 5535 * The full speed capability is used to negotiate when 5536 * auto-negotiation is enabled. 5537 */ 5538 if (cfg->autoneg) { 5539 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5540 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5541 HNS3_PHY_LINK_SPEED_100M_BIT | 5542 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5543 HNS3_PHY_LINK_SPEED_1000M_BIT; 5544 } else { 5545 req->speed = cfg->speed; 5546 req->duplex = cfg->duplex; 5547 } 5548 5549 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5550 } 5551 5552 static int 5553 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5554 { 5555 struct hns3_config_auto_neg_cmd *req; 5556 struct hns3_cmd_desc desc; 5557 uint32_t flag = 0; 5558 int ret; 5559 5560 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5561 5562 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5563 if (enable) 5564 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5565 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5566 5567 ret = hns3_cmd_send(hw, &desc, 1); 5568 if (ret) 5569 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5570 5571 return ret; 5572 } 5573 5574 static int 5575 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5576 struct hns3_set_link_speed_cfg *cfg) 5577 { 5578 int ret; 5579 5580 if (hw->mac.support_autoneg) { 5581 ret = hns3_set_autoneg(hw, cfg->autoneg); 5582 if (ret) { 5583 hns3_err(hw, "failed to configure auto-negotiation."); 5584 return ret; 5585 } 5586 5587 /* 5588 * To enable auto-negotiation, we only need to open the switch 5589 * of auto-negotiation, then firmware sets all speed 5590 * capabilities. 5591 */ 5592 if (cfg->autoneg) 5593 return 0; 5594 } 5595 5596 /* 5597 * Some hardware doesn't support auto-negotiation, but users may not 5598 * configure link_speeds (default 0), which means auto-negotiation. 5599 * In this case, a warning message need to be printed, instead of 5600 * an error. 5601 */ 5602 if (cfg->autoneg) { 5603 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 5604 return 0; 5605 } 5606 5607 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5608 } 5609 5610 static int 5611 hns3_set_port_link_speed(struct hns3_hw *hw, 5612 struct hns3_set_link_speed_cfg *cfg) 5613 { 5614 int ret; 5615 5616 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5617 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5618 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5619 if (pf->is_tmp_phy) 5620 return 0; 5621 #endif 5622 5623 ret = hns3_set_copper_port_link_speed(hw, cfg); 5624 if (ret) { 5625 hns3_err(hw, "failed to set copper port link speed," 5626 "ret = %d.", ret); 5627 return ret; 5628 } 5629 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5630 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5631 if (ret) { 5632 hns3_err(hw, "failed to set fiber port link speed," 5633 "ret = %d.", ret); 5634 return ret; 5635 } 5636 } 5637 5638 return 0; 5639 } 5640 5641 static int 5642 hns3_apply_link_speed(struct hns3_hw *hw) 5643 { 5644 struct rte_eth_conf *conf = &hw->data->dev_conf; 5645 struct hns3_set_link_speed_cfg cfg; 5646 5647 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5648 cfg.autoneg = (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) ? 5649 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 5650 if (cfg.autoneg != ETH_LINK_AUTONEG) { 5651 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5652 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5653 } 5654 5655 return hns3_set_port_link_speed(hw, &cfg); 5656 } 5657 5658 static int 5659 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5660 { 5661 struct hns3_hw *hw = &hns->hw; 5662 bool link_en; 5663 int ret; 5664 5665 ret = hns3_update_queue_map_configure(hns); 5666 if (ret) { 5667 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5668 ret); 5669 return ret; 5670 } 5671 5672 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5673 ret = hns3_tm_conf_update(hw); 5674 if (ret) { 5675 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5676 return ret; 5677 } 5678 5679 hns3_enable_rxd_adv_layout(hw); 5680 5681 ret = hns3_init_queues(hns, reset_queue); 5682 if (ret) { 5683 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5684 return ret; 5685 } 5686 5687 link_en = hw->set_link_down ? false : true; 5688 ret = hns3_cfg_mac_mode(hw, link_en); 5689 if (ret) { 5690 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5691 goto err_config_mac_mode; 5692 } 5693 5694 ret = hns3_apply_link_speed(hw); 5695 if (ret) 5696 goto err_set_link_speed; 5697 5698 return 0; 5699 5700 err_set_link_speed: 5701 (void)hns3_cfg_mac_mode(hw, false); 5702 5703 err_config_mac_mode: 5704 hns3_dev_release_mbufs(hns); 5705 /* 5706 * Here is exception handling, hns3_reset_all_tqps will have the 5707 * corresponding error message if it is handled incorrectly, so it is 5708 * not necessary to check hns3_reset_all_tqps return value, here keep 5709 * ret as the error code causing the exception. 5710 */ 5711 (void)hns3_reset_all_tqps(hns); 5712 return ret; 5713 } 5714 5715 static int 5716 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5717 { 5718 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5719 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5720 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5721 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5722 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5723 uint32_t intr_vector; 5724 uint16_t q_id; 5725 int ret; 5726 5727 /* 5728 * hns3 needs a separate interrupt to be used as event interrupt which 5729 * could not be shared with task queue pair, so KERNEL drivers need 5730 * support multiple interrupt vectors. 5731 */ 5732 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5733 !rte_intr_cap_multiple(intr_handle)) 5734 return 0; 5735 5736 rte_intr_disable(intr_handle); 5737 intr_vector = hw->used_rx_queues; 5738 /* creates event fd for each intr vector when MSIX is used */ 5739 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5740 return -EINVAL; 5741 5742 if (intr_handle->intr_vec == NULL) { 5743 intr_handle->intr_vec = 5744 rte_zmalloc("intr_vec", 5745 hw->used_rx_queues * sizeof(int), 0); 5746 if (intr_handle->intr_vec == NULL) { 5747 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5748 hw->used_rx_queues); 5749 ret = -ENOMEM; 5750 goto alloc_intr_vec_error; 5751 } 5752 } 5753 5754 if (rte_intr_allow_others(intr_handle)) { 5755 vec = RTE_INTR_VEC_RXTX_OFFSET; 5756 base = RTE_INTR_VEC_RXTX_OFFSET; 5757 } 5758 5759 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5760 ret = hns3_bind_ring_with_vector(hw, vec, true, 5761 HNS3_RING_TYPE_RX, q_id); 5762 if (ret) 5763 goto bind_vector_error; 5764 intr_handle->intr_vec[q_id] = vec; 5765 /* 5766 * If there are not enough efds (e.g. not enough interrupt), 5767 * remaining queues will be bond to the last interrupt. 5768 */ 5769 if (vec < base + intr_handle->nb_efd - 1) 5770 vec++; 5771 } 5772 rte_intr_enable(intr_handle); 5773 return 0; 5774 5775 bind_vector_error: 5776 rte_free(intr_handle->intr_vec); 5777 intr_handle->intr_vec = NULL; 5778 alloc_intr_vec_error: 5779 rte_intr_efd_disable(intr_handle); 5780 return ret; 5781 } 5782 5783 static int 5784 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5785 { 5786 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5787 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5788 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5789 uint16_t q_id; 5790 int ret; 5791 5792 if (dev->data->dev_conf.intr_conf.rxq == 0) 5793 return 0; 5794 5795 if (rte_intr_dp_is_en(intr_handle)) { 5796 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5797 ret = hns3_bind_ring_with_vector(hw, 5798 intr_handle->intr_vec[q_id], true, 5799 HNS3_RING_TYPE_RX, q_id); 5800 if (ret) 5801 return ret; 5802 } 5803 } 5804 5805 return 0; 5806 } 5807 5808 static void 5809 hns3_restore_filter(struct rte_eth_dev *dev) 5810 { 5811 hns3_restore_rss_filter(dev); 5812 } 5813 5814 static int 5815 hns3_dev_start(struct rte_eth_dev *dev) 5816 { 5817 struct hns3_adapter *hns = dev->data->dev_private; 5818 struct hns3_hw *hw = &hns->hw; 5819 bool old_state = hw->set_link_down; 5820 int ret; 5821 5822 PMD_INIT_FUNC_TRACE(); 5823 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5824 return -EBUSY; 5825 5826 rte_spinlock_lock(&hw->lock); 5827 hw->adapter_state = HNS3_NIC_STARTING; 5828 5829 /* 5830 * If the dev_set_link_down() API has been called, the "set_link_down" 5831 * flag can be cleared by dev_start() API. In addition, the flag should 5832 * also be cleared before calling hns3_do_start() so that MAC can be 5833 * enabled in dev_start stage. 5834 */ 5835 hw->set_link_down = false; 5836 ret = hns3_do_start(hns, true); 5837 if (ret) 5838 goto do_start_fail; 5839 5840 ret = hns3_map_rx_interrupt(dev); 5841 if (ret) 5842 goto map_rx_inter_err; 5843 5844 /* 5845 * There are three register used to control the status of a TQP 5846 * (contains a pair of Tx queue and Rx queue) in the new version network 5847 * engine. One is used to control the enabling of Tx queue, the other is 5848 * used to control the enabling of Rx queue, and the last is the master 5849 * switch used to control the enabling of the tqp. The Tx register and 5850 * TQP register must be enabled at the same time to enable a Tx queue. 5851 * The same applies to the Rx queue. For the older network engine, this 5852 * function only refresh the enabled flag, and it is used to update the 5853 * status of queue in the dpdk framework. 5854 */ 5855 ret = hns3_start_all_txqs(dev); 5856 if (ret) 5857 goto map_rx_inter_err; 5858 5859 ret = hns3_start_all_rxqs(dev); 5860 if (ret) 5861 goto start_all_rxqs_fail; 5862 5863 hw->adapter_state = HNS3_NIC_STARTED; 5864 rte_spinlock_unlock(&hw->lock); 5865 5866 hns3_rx_scattered_calc(dev); 5867 hns3_set_rxtx_function(dev); 5868 hns3_mp_req_start_rxtx(dev); 5869 5870 hns3_restore_filter(dev); 5871 5872 /* Enable interrupt of all rx queues before enabling queues */ 5873 hns3_dev_all_rx_queue_intr_enable(hw, true); 5874 5875 /* 5876 * After finished the initialization, enable tqps to receive/transmit 5877 * packets and refresh all queue status. 5878 */ 5879 hns3_start_tqps(hw); 5880 5881 hns3_tm_dev_start_proc(hw); 5882 5883 if (dev->data->dev_conf.intr_conf.lsc != 0) 5884 hns3_dev_link_update(dev, 0); 5885 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5886 5887 hns3_info(hw, "hns3 dev start successful!"); 5888 5889 return 0; 5890 5891 start_all_rxqs_fail: 5892 hns3_stop_all_txqs(dev); 5893 map_rx_inter_err: 5894 (void)hns3_do_stop(hns); 5895 do_start_fail: 5896 hw->set_link_down = old_state; 5897 hw->adapter_state = HNS3_NIC_CONFIGURED; 5898 rte_spinlock_unlock(&hw->lock); 5899 5900 return ret; 5901 } 5902 5903 static int 5904 hns3_do_stop(struct hns3_adapter *hns) 5905 { 5906 struct hns3_hw *hw = &hns->hw; 5907 int ret; 5908 5909 /* 5910 * The "hns3_do_stop" function will also be called by .stop_service to 5911 * prepare reset. At the time of global or IMP reset, the command cannot 5912 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5913 * accessed during the reset process. So the mbuf can not be released 5914 * during reset and is required to be released after the reset is 5915 * completed. 5916 */ 5917 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5918 hns3_dev_release_mbufs(hns); 5919 5920 ret = hns3_cfg_mac_mode(hw, false); 5921 if (ret) 5922 return ret; 5923 hw->mac.link_status = ETH_LINK_DOWN; 5924 5925 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5926 hns3_configure_all_mac_addr(hns, true); 5927 ret = hns3_reset_all_tqps(hns); 5928 if (ret) { 5929 hns3_err(hw, "failed to reset all queues ret = %d.", 5930 ret); 5931 return ret; 5932 } 5933 } 5934 hw->mac.default_addr_setted = false; 5935 return 0; 5936 } 5937 5938 static void 5939 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5940 { 5941 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5942 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5943 struct hns3_adapter *hns = dev->data->dev_private; 5944 struct hns3_hw *hw = &hns->hw; 5945 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5946 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5947 uint16_t q_id; 5948 5949 if (dev->data->dev_conf.intr_conf.rxq == 0) 5950 return; 5951 5952 /* unmap the ring with vector */ 5953 if (rte_intr_allow_others(intr_handle)) { 5954 vec = RTE_INTR_VEC_RXTX_OFFSET; 5955 base = RTE_INTR_VEC_RXTX_OFFSET; 5956 } 5957 if (rte_intr_dp_is_en(intr_handle)) { 5958 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5959 (void)hns3_bind_ring_with_vector(hw, vec, false, 5960 HNS3_RING_TYPE_RX, 5961 q_id); 5962 if (vec < base + intr_handle->nb_efd - 1) 5963 vec++; 5964 } 5965 } 5966 /* Clean datapath event and queue/vec mapping */ 5967 rte_intr_efd_disable(intr_handle); 5968 if (intr_handle->intr_vec) { 5969 rte_free(intr_handle->intr_vec); 5970 intr_handle->intr_vec = NULL; 5971 } 5972 } 5973 5974 static int 5975 hns3_dev_stop(struct rte_eth_dev *dev) 5976 { 5977 struct hns3_adapter *hns = dev->data->dev_private; 5978 struct hns3_hw *hw = &hns->hw; 5979 5980 PMD_INIT_FUNC_TRACE(); 5981 dev->data->dev_started = 0; 5982 5983 hw->adapter_state = HNS3_NIC_STOPPING; 5984 hns3_set_rxtx_function(dev); 5985 rte_wmb(); 5986 /* Disable datapath on secondary process. */ 5987 hns3_mp_req_stop_rxtx(dev); 5988 /* Prevent crashes when queues are still in use. */ 5989 rte_delay_ms(hw->cfg_max_queues); 5990 5991 rte_spinlock_lock(&hw->lock); 5992 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5993 hns3_tm_dev_stop_proc(hw); 5994 hns3_config_mac_tnl_int(hw, false); 5995 hns3_stop_tqps(hw); 5996 hns3_do_stop(hns); 5997 hns3_unmap_rx_interrupt(dev); 5998 hw->adapter_state = HNS3_NIC_CONFIGURED; 5999 } 6000 hns3_rx_scattered_reset(dev); 6001 rte_eal_alarm_cancel(hns3_service_handler, dev); 6002 hns3_stop_report_lse(dev); 6003 rte_spinlock_unlock(&hw->lock); 6004 6005 return 0; 6006 } 6007 6008 static int 6009 hns3_dev_close(struct rte_eth_dev *eth_dev) 6010 { 6011 struct hns3_adapter *hns = eth_dev->data->dev_private; 6012 struct hns3_hw *hw = &hns->hw; 6013 int ret = 0; 6014 6015 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6016 return 0; 6017 6018 if (hw->adapter_state == HNS3_NIC_STARTED) 6019 ret = hns3_dev_stop(eth_dev); 6020 6021 hw->adapter_state = HNS3_NIC_CLOSING; 6022 hns3_reset_abort(hns); 6023 hw->adapter_state = HNS3_NIC_CLOSED; 6024 6025 hns3_configure_all_mc_mac_addr(hns, true); 6026 hns3_remove_all_vlan_table(hns); 6027 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 6028 hns3_uninit_pf(eth_dev); 6029 hns3_free_all_queues(eth_dev); 6030 rte_free(hw->reset.wait_data); 6031 hns3_mp_uninit_primary(); 6032 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 6033 6034 return ret; 6035 } 6036 6037 static void 6038 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 6039 bool *tx_pause) 6040 { 6041 struct hns3_mac *mac = &hw->mac; 6042 uint32_t advertising = mac->advertising; 6043 uint32_t lp_advertising = mac->lp_advertising; 6044 *rx_pause = false; 6045 *tx_pause = false; 6046 6047 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 6048 *rx_pause = true; 6049 *tx_pause = true; 6050 } else if (advertising & lp_advertising & 6051 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 6052 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 6053 *rx_pause = true; 6054 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 6055 *tx_pause = true; 6056 } 6057 } 6058 6059 static enum hns3_fc_mode 6060 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 6061 { 6062 enum hns3_fc_mode current_mode; 6063 bool rx_pause = false; 6064 bool tx_pause = false; 6065 6066 switch (hw->mac.media_type) { 6067 case HNS3_MEDIA_TYPE_COPPER: 6068 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 6069 break; 6070 6071 /* 6072 * Flow control auto-negotiation is not supported for fiber and 6073 * backpalne media type. 6074 */ 6075 case HNS3_MEDIA_TYPE_FIBER: 6076 case HNS3_MEDIA_TYPE_BACKPLANE: 6077 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 6078 current_mode = hw->requested_fc_mode; 6079 goto out; 6080 default: 6081 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 6082 hw->mac.media_type); 6083 current_mode = HNS3_FC_NONE; 6084 goto out; 6085 } 6086 6087 if (rx_pause && tx_pause) 6088 current_mode = HNS3_FC_FULL; 6089 else if (rx_pause) 6090 current_mode = HNS3_FC_RX_PAUSE; 6091 else if (tx_pause) 6092 current_mode = HNS3_FC_TX_PAUSE; 6093 else 6094 current_mode = HNS3_FC_NONE; 6095 6096 out: 6097 return current_mode; 6098 } 6099 6100 static enum hns3_fc_mode 6101 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 6102 { 6103 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6104 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6105 struct hns3_mac *mac = &hw->mac; 6106 6107 /* 6108 * When the flow control mode is obtained, the device may not complete 6109 * auto-negotiation. It is necessary to wait for link establishment. 6110 */ 6111 (void)hns3_dev_link_update(dev, 1); 6112 6113 /* 6114 * If the link auto-negotiation of the nic is disabled, or the flow 6115 * control auto-negotiation is not supported, the forced flow control 6116 * mode is used. 6117 */ 6118 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 6119 return hw->requested_fc_mode; 6120 6121 return hns3_get_autoneg_fc_mode(hw); 6122 } 6123 6124 static int 6125 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6126 { 6127 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6128 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6129 enum hns3_fc_mode current_mode; 6130 6131 current_mode = hns3_get_current_fc_mode(dev); 6132 switch (current_mode) { 6133 case HNS3_FC_FULL: 6134 fc_conf->mode = RTE_FC_FULL; 6135 break; 6136 case HNS3_FC_TX_PAUSE: 6137 fc_conf->mode = RTE_FC_TX_PAUSE; 6138 break; 6139 case HNS3_FC_RX_PAUSE: 6140 fc_conf->mode = RTE_FC_RX_PAUSE; 6141 break; 6142 case HNS3_FC_NONE: 6143 default: 6144 fc_conf->mode = RTE_FC_NONE; 6145 break; 6146 } 6147 6148 fc_conf->pause_time = pf->pause_time; 6149 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 6150 6151 return 0; 6152 } 6153 6154 static int 6155 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 6156 { 6157 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 6158 6159 if (!pf->support_fc_autoneg) { 6160 if (autoneg != 0) { 6161 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6162 return -EOPNOTSUPP; 6163 } 6164 6165 /* 6166 * Flow control auto-negotiation of the NIC is not supported, 6167 * but other auto-negotiation features may be supported. 6168 */ 6169 if (autoneg != hw->mac.link_autoneg) { 6170 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6171 return -EOPNOTSUPP; 6172 } 6173 6174 return 0; 6175 } 6176 6177 /* 6178 * If flow control auto-negotiation of the NIC is supported, all 6179 * auto-negotiation features are supported. 6180 */ 6181 if (autoneg != hw->mac.link_autoneg) { 6182 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6183 return -EOPNOTSUPP; 6184 } 6185 6186 return 0; 6187 } 6188 6189 static int 6190 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6191 { 6192 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6193 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6194 int ret; 6195 6196 if (fc_conf->high_water || fc_conf->low_water || 6197 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6198 hns3_err(hw, "Unsupported flow control settings specified, " 6199 "high_water(%u), low_water(%u), send_xon(%u) and " 6200 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6201 fc_conf->high_water, fc_conf->low_water, 6202 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6203 return -EINVAL; 6204 } 6205 6206 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6207 if (ret) 6208 return ret; 6209 6210 if (!fc_conf->pause_time) { 6211 hns3_err(hw, "Invalid pause time %u setting.", 6212 fc_conf->pause_time); 6213 return -EINVAL; 6214 } 6215 6216 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6217 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6218 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6219 "current_fc_status = %d", hw->current_fc_status); 6220 return -EOPNOTSUPP; 6221 } 6222 6223 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 6224 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6225 return -EOPNOTSUPP; 6226 } 6227 6228 rte_spinlock_lock(&hw->lock); 6229 ret = hns3_fc_enable(dev, fc_conf); 6230 rte_spinlock_unlock(&hw->lock); 6231 6232 return ret; 6233 } 6234 6235 static int 6236 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6237 struct rte_eth_pfc_conf *pfc_conf) 6238 { 6239 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6240 int ret; 6241 6242 if (!hns3_dev_dcb_supported(hw)) { 6243 hns3_err(hw, "This port does not support dcb configurations."); 6244 return -EOPNOTSUPP; 6245 } 6246 6247 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6248 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6249 hns3_err(hw, "Unsupported flow control settings specified, " 6250 "high_water(%u), low_water(%u), send_xon(%u) and " 6251 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6252 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6253 pfc_conf->fc.send_xon, 6254 pfc_conf->fc.mac_ctrl_frame_fwd); 6255 return -EINVAL; 6256 } 6257 if (pfc_conf->fc.autoneg) { 6258 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6259 return -EINVAL; 6260 } 6261 if (pfc_conf->fc.pause_time == 0) { 6262 hns3_err(hw, "Invalid pause time %u setting.", 6263 pfc_conf->fc.pause_time); 6264 return -EINVAL; 6265 } 6266 6267 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6268 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6269 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6270 "current_fc_status = %d", hw->current_fc_status); 6271 return -EOPNOTSUPP; 6272 } 6273 6274 rte_spinlock_lock(&hw->lock); 6275 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6276 rte_spinlock_unlock(&hw->lock); 6277 6278 return ret; 6279 } 6280 6281 static int 6282 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6283 { 6284 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6285 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6286 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6287 int i; 6288 6289 rte_spinlock_lock(&hw->lock); 6290 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 6291 dcb_info->nb_tcs = pf->local_max_tc; 6292 else 6293 dcb_info->nb_tcs = 1; 6294 6295 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6296 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6297 for (i = 0; i < dcb_info->nb_tcs; i++) 6298 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6299 6300 for (i = 0; i < hw->num_tc; i++) { 6301 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6302 dcb_info->tc_queue.tc_txq[0][i].base = 6303 hw->tc_queue[i].tqp_offset; 6304 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6305 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6306 hw->tc_queue[i].tqp_count; 6307 } 6308 rte_spinlock_unlock(&hw->lock); 6309 6310 return 0; 6311 } 6312 6313 static int 6314 hns3_reinit_dev(struct hns3_adapter *hns) 6315 { 6316 struct hns3_hw *hw = &hns->hw; 6317 int ret; 6318 6319 ret = hns3_cmd_init(hw); 6320 if (ret) { 6321 hns3_err(hw, "Failed to init cmd: %d", ret); 6322 return ret; 6323 } 6324 6325 ret = hns3_reset_all_tqps(hns); 6326 if (ret) { 6327 hns3_err(hw, "Failed to reset all queues: %d", ret); 6328 return ret; 6329 } 6330 6331 ret = hns3_init_hardware(hns); 6332 if (ret) { 6333 hns3_err(hw, "Failed to init hardware: %d", ret); 6334 return ret; 6335 } 6336 6337 ret = hns3_enable_hw_error_intr(hns, true); 6338 if (ret) { 6339 hns3_err(hw, "fail to enable hw error interrupts: %d", 6340 ret); 6341 return ret; 6342 } 6343 hns3_info(hw, "Reset done, driver initialization finished."); 6344 6345 return 0; 6346 } 6347 6348 static bool 6349 is_pf_reset_done(struct hns3_hw *hw) 6350 { 6351 uint32_t val, reg, reg_bit; 6352 6353 switch (hw->reset.level) { 6354 case HNS3_IMP_RESET: 6355 reg = HNS3_GLOBAL_RESET_REG; 6356 reg_bit = HNS3_IMP_RESET_BIT; 6357 break; 6358 case HNS3_GLOBAL_RESET: 6359 reg = HNS3_GLOBAL_RESET_REG; 6360 reg_bit = HNS3_GLOBAL_RESET_BIT; 6361 break; 6362 case HNS3_FUNC_RESET: 6363 reg = HNS3_FUN_RST_ING; 6364 reg_bit = HNS3_FUN_RST_ING_B; 6365 break; 6366 case HNS3_FLR_RESET: 6367 default: 6368 hns3_err(hw, "Wait for unsupported reset level: %d", 6369 hw->reset.level); 6370 return true; 6371 } 6372 val = hns3_read_dev(hw, reg); 6373 if (hns3_get_bit(val, reg_bit)) 6374 return false; 6375 else 6376 return true; 6377 } 6378 6379 bool 6380 hns3_is_reset_pending(struct hns3_adapter *hns) 6381 { 6382 struct hns3_hw *hw = &hns->hw; 6383 enum hns3_reset_level reset; 6384 6385 hns3_check_event_cause(hns, NULL); 6386 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6387 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6388 hw->reset.level < reset) { 6389 hns3_warn(hw, "High level reset %d is pending", reset); 6390 return true; 6391 } 6392 reset = hns3_get_reset_level(hns, &hw->reset.request); 6393 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6394 hw->reset.level < reset) { 6395 hns3_warn(hw, "High level reset %d is request", reset); 6396 return true; 6397 } 6398 return false; 6399 } 6400 6401 static int 6402 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6403 { 6404 struct hns3_hw *hw = &hns->hw; 6405 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6406 struct timeval tv; 6407 6408 if (wait_data->result == HNS3_WAIT_SUCCESS) 6409 return 0; 6410 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6411 hns3_clock_gettime(&tv); 6412 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6413 tv.tv_sec, tv.tv_usec); 6414 return -ETIME; 6415 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6416 return -EAGAIN; 6417 6418 wait_data->hns = hns; 6419 wait_data->check_completion = is_pf_reset_done; 6420 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6421 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 6422 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6423 wait_data->count = HNS3_RESET_WAIT_CNT; 6424 wait_data->result = HNS3_WAIT_REQUEST; 6425 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6426 return -EAGAIN; 6427 } 6428 6429 static int 6430 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6431 { 6432 struct hns3_cmd_desc desc; 6433 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6434 6435 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6436 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6437 req->fun_reset_vfid = func_id; 6438 6439 return hns3_cmd_send(hw, &desc, 1); 6440 } 6441 6442 static int 6443 hns3_imp_reset_cmd(struct hns3_hw *hw) 6444 { 6445 struct hns3_cmd_desc desc; 6446 6447 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6448 desc.data[0] = 0xeedd; 6449 6450 return hns3_cmd_send(hw, &desc, 1); 6451 } 6452 6453 static void 6454 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6455 { 6456 struct hns3_hw *hw = &hns->hw; 6457 struct timeval tv; 6458 uint32_t val; 6459 6460 hns3_clock_gettime(&tv); 6461 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6462 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6463 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6464 tv.tv_sec, tv.tv_usec); 6465 return; 6466 } 6467 6468 switch (reset_level) { 6469 case HNS3_IMP_RESET: 6470 hns3_imp_reset_cmd(hw); 6471 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6472 tv.tv_sec, tv.tv_usec); 6473 break; 6474 case HNS3_GLOBAL_RESET: 6475 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6476 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6477 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6478 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6479 tv.tv_sec, tv.tv_usec); 6480 break; 6481 case HNS3_FUNC_RESET: 6482 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6483 tv.tv_sec, tv.tv_usec); 6484 /* schedule again to check later */ 6485 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6486 hns3_schedule_reset(hns); 6487 break; 6488 default: 6489 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6490 return; 6491 } 6492 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6493 } 6494 6495 static enum hns3_reset_level 6496 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6497 { 6498 struct hns3_hw *hw = &hns->hw; 6499 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6500 6501 /* Return the highest priority reset level amongst all */ 6502 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6503 reset_level = HNS3_IMP_RESET; 6504 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6505 reset_level = HNS3_GLOBAL_RESET; 6506 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6507 reset_level = HNS3_FUNC_RESET; 6508 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6509 reset_level = HNS3_FLR_RESET; 6510 6511 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6512 return HNS3_NONE_RESET; 6513 6514 return reset_level; 6515 } 6516 6517 static void 6518 hns3_record_imp_error(struct hns3_adapter *hns) 6519 { 6520 struct hns3_hw *hw = &hns->hw; 6521 uint32_t reg_val; 6522 6523 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6524 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6525 hns3_warn(hw, "Detected IMP RD poison!"); 6526 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6527 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6528 } 6529 6530 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6531 hns3_warn(hw, "Detected IMP CMDQ error!"); 6532 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6533 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6534 } 6535 } 6536 6537 static int 6538 hns3_prepare_reset(struct hns3_adapter *hns) 6539 { 6540 struct hns3_hw *hw = &hns->hw; 6541 uint32_t reg_val; 6542 int ret; 6543 6544 switch (hw->reset.level) { 6545 case HNS3_FUNC_RESET: 6546 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6547 if (ret) 6548 return ret; 6549 6550 /* 6551 * After performaning pf reset, it is not necessary to do the 6552 * mailbox handling or send any command to firmware, because 6553 * any mailbox handling or command to firmware is only valid 6554 * after hns3_cmd_init is called. 6555 */ 6556 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6557 hw->reset.stats.request_cnt++; 6558 break; 6559 case HNS3_IMP_RESET: 6560 hns3_record_imp_error(hns); 6561 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6562 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6563 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6564 break; 6565 default: 6566 break; 6567 } 6568 return 0; 6569 } 6570 6571 static int 6572 hns3_set_rst_done(struct hns3_hw *hw) 6573 { 6574 struct hns3_pf_rst_done_cmd *req; 6575 struct hns3_cmd_desc desc; 6576 6577 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6578 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6579 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6580 return hns3_cmd_send(hw, &desc, 1); 6581 } 6582 6583 static int 6584 hns3_stop_service(struct hns3_adapter *hns) 6585 { 6586 struct hns3_hw *hw = &hns->hw; 6587 struct rte_eth_dev *eth_dev; 6588 6589 eth_dev = &rte_eth_devices[hw->data->port_id]; 6590 hw->mac.link_status = ETH_LINK_DOWN; 6591 if (hw->adapter_state == HNS3_NIC_STARTED) { 6592 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6593 hns3_update_linkstatus_and_event(hw, false); 6594 } 6595 6596 hns3_set_rxtx_function(eth_dev); 6597 rte_wmb(); 6598 /* Disable datapath on secondary process. */ 6599 hns3_mp_req_stop_rxtx(eth_dev); 6600 rte_delay_ms(hw->cfg_max_queues); 6601 6602 rte_spinlock_lock(&hw->lock); 6603 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6604 hw->adapter_state == HNS3_NIC_STOPPING) { 6605 hns3_enable_all_queues(hw, false); 6606 hns3_do_stop(hns); 6607 hw->reset.mbuf_deferred_free = true; 6608 } else 6609 hw->reset.mbuf_deferred_free = false; 6610 6611 /* 6612 * It is cumbersome for hardware to pick-and-choose entries for deletion 6613 * from table space. Hence, for function reset software intervention is 6614 * required to delete the entries 6615 */ 6616 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6617 hns3_configure_all_mc_mac_addr(hns, true); 6618 rte_spinlock_unlock(&hw->lock); 6619 6620 return 0; 6621 } 6622 6623 static int 6624 hns3_start_service(struct hns3_adapter *hns) 6625 { 6626 struct hns3_hw *hw = &hns->hw; 6627 struct rte_eth_dev *eth_dev; 6628 6629 if (hw->reset.level == HNS3_IMP_RESET || 6630 hw->reset.level == HNS3_GLOBAL_RESET) 6631 hns3_set_rst_done(hw); 6632 eth_dev = &rte_eth_devices[hw->data->port_id]; 6633 hns3_set_rxtx_function(eth_dev); 6634 hns3_mp_req_start_rxtx(eth_dev); 6635 if (hw->adapter_state == HNS3_NIC_STARTED) { 6636 /* 6637 * This API parent function already hold the hns3_hw.lock, the 6638 * hns3_service_handler may report lse, in bonding application 6639 * it will call driver's ops which may acquire the hns3_hw.lock 6640 * again, thus lead to deadlock. 6641 * We defer calls hns3_service_handler to avoid the deadlock. 6642 */ 6643 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6644 hns3_service_handler, eth_dev); 6645 6646 /* Enable interrupt of all rx queues before enabling queues */ 6647 hns3_dev_all_rx_queue_intr_enable(hw, true); 6648 /* 6649 * Enable state of each rxq and txq will be recovered after 6650 * reset, so we need to restore them before enable all tqps; 6651 */ 6652 hns3_restore_tqp_enable_state(hw); 6653 /* 6654 * When finished the initialization, enable queues to receive 6655 * and transmit packets. 6656 */ 6657 hns3_enable_all_queues(hw, true); 6658 } 6659 6660 return 0; 6661 } 6662 6663 static int 6664 hns3_restore_conf(struct hns3_adapter *hns) 6665 { 6666 struct hns3_hw *hw = &hns->hw; 6667 int ret; 6668 6669 ret = hns3_configure_all_mac_addr(hns, false); 6670 if (ret) 6671 return ret; 6672 6673 ret = hns3_configure_all_mc_mac_addr(hns, false); 6674 if (ret) 6675 goto err_mc_mac; 6676 6677 ret = hns3_dev_promisc_restore(hns); 6678 if (ret) 6679 goto err_promisc; 6680 6681 ret = hns3_restore_vlan_table(hns); 6682 if (ret) 6683 goto err_promisc; 6684 6685 ret = hns3_restore_vlan_conf(hns); 6686 if (ret) 6687 goto err_promisc; 6688 6689 ret = hns3_restore_all_fdir_filter(hns); 6690 if (ret) 6691 goto err_promisc; 6692 6693 ret = hns3_restore_ptp(hns); 6694 if (ret) 6695 goto err_promisc; 6696 6697 ret = hns3_restore_rx_interrupt(hw); 6698 if (ret) 6699 goto err_promisc; 6700 6701 ret = hns3_restore_gro_conf(hw); 6702 if (ret) 6703 goto err_promisc; 6704 6705 ret = hns3_restore_fec(hw); 6706 if (ret) 6707 goto err_promisc; 6708 6709 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6710 ret = hns3_do_start(hns, false); 6711 if (ret) 6712 goto err_promisc; 6713 hns3_info(hw, "hns3 dev restart successful!"); 6714 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6715 hw->adapter_state = HNS3_NIC_CONFIGURED; 6716 return 0; 6717 6718 err_promisc: 6719 hns3_configure_all_mc_mac_addr(hns, true); 6720 err_mc_mac: 6721 hns3_configure_all_mac_addr(hns, true); 6722 return ret; 6723 } 6724 6725 static void 6726 hns3_reset_service(void *param) 6727 { 6728 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6729 struct hns3_hw *hw = &hns->hw; 6730 enum hns3_reset_level reset_level; 6731 struct timeval tv_delta; 6732 struct timeval tv_start; 6733 struct timeval tv; 6734 uint64_t msec; 6735 int ret; 6736 6737 /* 6738 * The interrupt is not triggered within the delay time. 6739 * The interrupt may have been lost. It is necessary to handle 6740 * the interrupt to recover from the error. 6741 */ 6742 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6743 SCHEDULE_DEFERRED) { 6744 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6745 __ATOMIC_RELAXED); 6746 hns3_err(hw, "Handling interrupts in delayed tasks"); 6747 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6748 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6749 if (reset_level == HNS3_NONE_RESET) { 6750 hns3_err(hw, "No reset level is set, try IMP reset"); 6751 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6752 } 6753 } 6754 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6755 6756 /* 6757 * Check if there is any ongoing reset in the hardware. This status can 6758 * be checked from reset_pending. If there is then, we need to wait for 6759 * hardware to complete reset. 6760 * a. If we are able to figure out in reasonable time that hardware 6761 * has fully resetted then, we can proceed with driver, client 6762 * reset. 6763 * b. else, we can come back later to check this status so re-sched 6764 * now. 6765 */ 6766 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6767 if (reset_level != HNS3_NONE_RESET) { 6768 hns3_clock_gettime(&tv_start); 6769 ret = hns3_reset_process(hns, reset_level); 6770 hns3_clock_gettime(&tv); 6771 timersub(&tv, &tv_start, &tv_delta); 6772 msec = hns3_clock_calctime_ms(&tv_delta); 6773 if (msec > HNS3_RESET_PROCESS_MS) 6774 hns3_err(hw, "%d handle long time delta %" PRIu64 6775 " ms time=%ld.%.6ld", 6776 hw->reset.level, msec, 6777 tv.tv_sec, tv.tv_usec); 6778 if (ret == -EAGAIN) 6779 return; 6780 } 6781 6782 /* Check if we got any *new* reset requests to be honored */ 6783 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6784 if (reset_level != HNS3_NONE_RESET) 6785 hns3_msix_process(hns, reset_level); 6786 } 6787 6788 static unsigned int 6789 hns3_get_speed_capa_num(uint16_t device_id) 6790 { 6791 unsigned int num; 6792 6793 switch (device_id) { 6794 case HNS3_DEV_ID_25GE: 6795 case HNS3_DEV_ID_25GE_RDMA: 6796 num = 2; 6797 break; 6798 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6799 case HNS3_DEV_ID_200G_RDMA: 6800 num = 1; 6801 break; 6802 default: 6803 num = 0; 6804 break; 6805 } 6806 6807 return num; 6808 } 6809 6810 static int 6811 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6812 uint16_t device_id) 6813 { 6814 switch (device_id) { 6815 case HNS3_DEV_ID_25GE: 6816 /* fallthrough */ 6817 case HNS3_DEV_ID_25GE_RDMA: 6818 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6819 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6820 6821 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6822 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6823 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6824 break; 6825 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6826 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6827 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6828 break; 6829 case HNS3_DEV_ID_200G_RDMA: 6830 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6831 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6832 break; 6833 default: 6834 return -ENOTSUP; 6835 } 6836 6837 return 0; 6838 } 6839 6840 static int 6841 hns3_fec_get_capability(struct rte_eth_dev *dev, 6842 struct rte_eth_fec_capa *speed_fec_capa, 6843 unsigned int num) 6844 { 6845 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6846 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6847 uint16_t device_id = pci_dev->id.device_id; 6848 unsigned int capa_num; 6849 int ret; 6850 6851 capa_num = hns3_get_speed_capa_num(device_id); 6852 if (capa_num == 0) { 6853 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6854 device_id); 6855 return -ENOTSUP; 6856 } 6857 6858 if (speed_fec_capa == NULL || num < capa_num) 6859 return capa_num; 6860 6861 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6862 if (ret) 6863 return -ENOTSUP; 6864 6865 return capa_num; 6866 } 6867 6868 static int 6869 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6870 { 6871 struct hns3_config_fec_cmd *req; 6872 struct hns3_cmd_desc desc; 6873 int ret; 6874 6875 /* 6876 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6877 * in device of link speed 6878 * below 10 Gbps. 6879 */ 6880 if (hw->mac.link_speed < ETH_SPEED_NUM_10G) { 6881 *state = 0; 6882 return 0; 6883 } 6884 6885 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6886 req = (struct hns3_config_fec_cmd *)desc.data; 6887 ret = hns3_cmd_send(hw, &desc, 1); 6888 if (ret) { 6889 hns3_err(hw, "get current fec auto state failed, ret = %d", 6890 ret); 6891 return ret; 6892 } 6893 6894 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6895 return 0; 6896 } 6897 6898 static int 6899 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6900 { 6901 struct hns3_sfp_info_cmd *resp; 6902 uint32_t tmp_fec_capa; 6903 uint8_t auto_state; 6904 struct hns3_cmd_desc desc; 6905 int ret; 6906 6907 /* 6908 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6909 * configured FEC mode is returned. 6910 * If link is up, current FEC mode is returned. 6911 */ 6912 if (hw->mac.link_status == ETH_LINK_DOWN) { 6913 ret = get_current_fec_auto_state(hw, &auto_state); 6914 if (ret) 6915 return ret; 6916 6917 if (auto_state == 0x1) { 6918 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6919 return 0; 6920 } 6921 } 6922 6923 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6924 resp = (struct hns3_sfp_info_cmd *)desc.data; 6925 resp->query_type = HNS3_ACTIVE_QUERY; 6926 6927 ret = hns3_cmd_send(hw, &desc, 1); 6928 if (ret == -EOPNOTSUPP) { 6929 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6930 return ret; 6931 } else if (ret) { 6932 hns3_err(hw, "get FEC failed, ret = %d", ret); 6933 return ret; 6934 } 6935 6936 /* 6937 * FEC mode order defined in hns3 hardware is inconsistend with 6938 * that defined in the ethdev library. So the sequence needs 6939 * to be converted. 6940 */ 6941 switch (resp->active_fec) { 6942 case HNS3_HW_FEC_MODE_NOFEC: 6943 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6944 break; 6945 case HNS3_HW_FEC_MODE_BASER: 6946 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6947 break; 6948 case HNS3_HW_FEC_MODE_RS: 6949 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6950 break; 6951 default: 6952 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6953 break; 6954 } 6955 6956 *fec_capa = tmp_fec_capa; 6957 return 0; 6958 } 6959 6960 static int 6961 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6962 { 6963 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6964 6965 return hns3_fec_get_internal(hw, fec_capa); 6966 } 6967 6968 static int 6969 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6970 { 6971 struct hns3_config_fec_cmd *req; 6972 struct hns3_cmd_desc desc; 6973 int ret; 6974 6975 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6976 6977 req = (struct hns3_config_fec_cmd *)desc.data; 6978 switch (mode) { 6979 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6980 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6981 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6982 break; 6983 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6984 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6985 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6986 break; 6987 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6988 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6989 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6990 break; 6991 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6992 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6993 break; 6994 default: 6995 return 0; 6996 } 6997 ret = hns3_cmd_send(hw, &desc, 1); 6998 if (ret) 6999 hns3_err(hw, "set fec mode failed, ret = %d", ret); 7000 7001 return ret; 7002 } 7003 7004 static uint32_t 7005 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 7006 { 7007 struct hns3_mac *mac = &hw->mac; 7008 uint32_t cur_capa; 7009 7010 switch (mac->link_speed) { 7011 case ETH_SPEED_NUM_10G: 7012 cur_capa = fec_capa[1].capa; 7013 break; 7014 case ETH_SPEED_NUM_25G: 7015 case ETH_SPEED_NUM_100G: 7016 case ETH_SPEED_NUM_200G: 7017 cur_capa = fec_capa[0].capa; 7018 break; 7019 default: 7020 cur_capa = 0; 7021 break; 7022 } 7023 7024 return cur_capa; 7025 } 7026 7027 static bool 7028 is_fec_mode_one_bit_set(uint32_t mode) 7029 { 7030 int cnt = 0; 7031 uint8_t i; 7032 7033 for (i = 0; i < sizeof(mode); i++) 7034 if (mode >> i & 0x1) 7035 cnt++; 7036 7037 return cnt == 1 ? true : false; 7038 } 7039 7040 static int 7041 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 7042 { 7043 #define FEC_CAPA_NUM 2 7044 struct hns3_adapter *hns = dev->data->dev_private; 7045 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7046 struct hns3_pf *pf = &hns->pf; 7047 7048 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 7049 uint32_t cur_capa; 7050 uint32_t num = FEC_CAPA_NUM; 7051 int ret; 7052 7053 ret = hns3_fec_get_capability(dev, fec_capa, num); 7054 if (ret < 0) 7055 return ret; 7056 7057 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 7058 if (!is_fec_mode_one_bit_set(mode)) { 7059 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 7060 "FEC mode should be only one bit set", mode); 7061 return -EINVAL; 7062 } 7063 7064 /* 7065 * Check whether the configured mode is within the FEC capability. 7066 * If not, the configured mode will not be supported. 7067 */ 7068 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 7069 if (!(cur_capa & mode)) { 7070 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 7071 return -EINVAL; 7072 } 7073 7074 rte_spinlock_lock(&hw->lock); 7075 ret = hns3_set_fec_hw(hw, mode); 7076 if (ret) { 7077 rte_spinlock_unlock(&hw->lock); 7078 return ret; 7079 } 7080 7081 pf->fec_mode = mode; 7082 rte_spinlock_unlock(&hw->lock); 7083 7084 return 0; 7085 } 7086 7087 static int 7088 hns3_restore_fec(struct hns3_hw *hw) 7089 { 7090 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7091 struct hns3_pf *pf = &hns->pf; 7092 uint32_t mode = pf->fec_mode; 7093 int ret; 7094 7095 ret = hns3_set_fec_hw(hw, mode); 7096 if (ret) 7097 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 7098 mode, ret); 7099 7100 return ret; 7101 } 7102 7103 static int 7104 hns3_query_dev_fec_info(struct hns3_hw *hw) 7105 { 7106 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7107 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 7108 int ret; 7109 7110 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 7111 if (ret) 7112 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 7113 7114 return ret; 7115 } 7116 7117 static bool 7118 hns3_optical_module_existed(struct hns3_hw *hw) 7119 { 7120 struct hns3_cmd_desc desc; 7121 bool existed; 7122 int ret; 7123 7124 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 7125 ret = hns3_cmd_send(hw, &desc, 1); 7126 if (ret) { 7127 hns3_err(hw, 7128 "fail to get optical module exist state, ret = %d.\n", 7129 ret); 7130 return false; 7131 } 7132 existed = !!desc.data[0]; 7133 7134 return existed; 7135 } 7136 7137 static int 7138 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 7139 uint32_t len, uint8_t *data) 7140 { 7141 #define HNS3_SFP_INFO_CMD_NUM 6 7142 #define HNS3_SFP_INFO_MAX_LEN \ 7143 (HNS3_SFP_INFO_BD0_LEN + \ 7144 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 7145 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 7146 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 7147 uint16_t read_len; 7148 uint16_t copy_len; 7149 int ret; 7150 int i; 7151 7152 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7153 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 7154 true); 7155 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 7156 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 7157 } 7158 7159 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7160 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7161 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7162 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7163 7164 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7165 if (ret) { 7166 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7167 ret); 7168 return ret; 7169 } 7170 7171 /* The data format in BD0 is different with the others. */ 7172 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7173 memcpy(data, sfp_info_bd0->data, copy_len); 7174 read_len = copy_len; 7175 7176 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7177 if (read_len >= len) 7178 break; 7179 7180 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7181 memcpy(data + read_len, desc[i].data, copy_len); 7182 read_len += copy_len; 7183 } 7184 7185 return (int)read_len; 7186 } 7187 7188 static int 7189 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7190 struct rte_dev_eeprom_info *info) 7191 { 7192 struct hns3_adapter *hns = dev->data->dev_private; 7193 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7194 uint32_t offset = info->offset; 7195 uint32_t len = info->length; 7196 uint8_t *data = info->data; 7197 uint32_t read_len = 0; 7198 7199 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7200 return -ENOTSUP; 7201 7202 if (!hns3_optical_module_existed(hw)) { 7203 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7204 return -EIO; 7205 } 7206 7207 while (read_len < len) { 7208 int ret; 7209 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7210 len - read_len, 7211 data + read_len); 7212 if (ret < 0) 7213 return -EIO; 7214 read_len += ret; 7215 } 7216 7217 return 0; 7218 } 7219 7220 static int 7221 hns3_get_module_info(struct rte_eth_dev *dev, 7222 struct rte_eth_dev_module_info *modinfo) 7223 { 7224 #define HNS3_SFF8024_ID_SFP 0x03 7225 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7226 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7227 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7228 #define HNS3_SFF_8636_V1_3 0x03 7229 struct hns3_adapter *hns = dev->data->dev_private; 7230 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7231 struct rte_dev_eeprom_info info; 7232 struct hns3_sfp_type sfp_type; 7233 int ret; 7234 7235 memset(&sfp_type, 0, sizeof(sfp_type)); 7236 memset(&info, 0, sizeof(info)); 7237 info.data = (uint8_t *)&sfp_type; 7238 info.length = sizeof(sfp_type); 7239 ret = hns3_get_module_eeprom(dev, &info); 7240 if (ret) 7241 return ret; 7242 7243 switch (sfp_type.type) { 7244 case HNS3_SFF8024_ID_SFP: 7245 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7246 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7247 break; 7248 case HNS3_SFF8024_ID_QSFP_8438: 7249 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7250 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7251 break; 7252 case HNS3_SFF8024_ID_QSFP_8436_8636: 7253 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7254 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7255 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7256 } else { 7257 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7258 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7259 } 7260 break; 7261 case HNS3_SFF8024_ID_QSFP28_8636: 7262 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7263 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7264 break; 7265 default: 7266 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7267 sfp_type.type, sfp_type.ext_type); 7268 return -EINVAL; 7269 } 7270 7271 return 0; 7272 } 7273 7274 void 7275 hns3_clock_gettime(struct timeval *tv) 7276 { 7277 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 7278 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW 7279 #else 7280 #define CLOCK_TYPE CLOCK_MONOTONIC 7281 #endif 7282 #define NSEC_TO_USEC_DIV 1000 7283 7284 struct timespec spec; 7285 (void)clock_gettime(CLOCK_TYPE, &spec); 7286 7287 tv->tv_sec = spec.tv_sec; 7288 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; 7289 } 7290 7291 uint64_t 7292 hns3_clock_calctime_ms(struct timeval *tv) 7293 { 7294 return (uint64_t)tv->tv_sec * MSEC_PER_SEC + 7295 tv->tv_usec / USEC_PER_MSEC; 7296 } 7297 7298 uint64_t 7299 hns3_clock_gettime_ms(void) 7300 { 7301 struct timeval tv; 7302 7303 hns3_clock_gettime(&tv); 7304 return hns3_clock_calctime_ms(&tv); 7305 } 7306 7307 static int 7308 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7309 { 7310 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7311 7312 RTE_SET_USED(key); 7313 7314 if (strcmp(value, "vec") == 0) 7315 hint = HNS3_IO_FUNC_HINT_VEC; 7316 else if (strcmp(value, "sve") == 0) 7317 hint = HNS3_IO_FUNC_HINT_SVE; 7318 else if (strcmp(value, "simple") == 0) 7319 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7320 else if (strcmp(value, "common") == 0) 7321 hint = HNS3_IO_FUNC_HINT_COMMON; 7322 7323 /* If the hint is valid then update output parameters */ 7324 if (hint != HNS3_IO_FUNC_HINT_NONE) 7325 *(uint32_t *)extra_args = hint; 7326 7327 return 0; 7328 } 7329 7330 static const char * 7331 hns3_get_io_hint_func_name(uint32_t hint) 7332 { 7333 switch (hint) { 7334 case HNS3_IO_FUNC_HINT_VEC: 7335 return "vec"; 7336 case HNS3_IO_FUNC_HINT_SVE: 7337 return "sve"; 7338 case HNS3_IO_FUNC_HINT_SIMPLE: 7339 return "simple"; 7340 case HNS3_IO_FUNC_HINT_COMMON: 7341 return "common"; 7342 default: 7343 return "none"; 7344 } 7345 } 7346 7347 static int 7348 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) 7349 { 7350 uint64_t val; 7351 7352 RTE_SET_USED(key); 7353 7354 val = strtoull(value, NULL, 16); 7355 *(uint64_t *)extra_args = val; 7356 7357 return 0; 7358 } 7359 7360 void 7361 hns3_parse_devargs(struct rte_eth_dev *dev) 7362 { 7363 struct hns3_adapter *hns = dev->data->dev_private; 7364 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7365 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7366 struct hns3_hw *hw = &hns->hw; 7367 uint64_t dev_caps_mask = 0; 7368 struct rte_kvargs *kvlist; 7369 7370 if (dev->device->devargs == NULL) 7371 return; 7372 7373 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7374 if (!kvlist) 7375 return; 7376 7377 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7378 &hns3_parse_io_hint_func, &rx_func_hint); 7379 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7380 &hns3_parse_io_hint_func, &tx_func_hint); 7381 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, 7382 &hns3_parse_dev_caps_mask, &dev_caps_mask); 7383 rte_kvargs_free(kvlist); 7384 7385 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7386 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7387 hns3_get_io_hint_func_name(rx_func_hint)); 7388 hns->rx_func_hint = rx_func_hint; 7389 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7390 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7391 hns3_get_io_hint_func_name(tx_func_hint)); 7392 hns->tx_func_hint = tx_func_hint; 7393 7394 if (dev_caps_mask != 0) 7395 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", 7396 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); 7397 hns->dev_caps_mask = dev_caps_mask; 7398 } 7399 7400 static const struct eth_dev_ops hns3_eth_dev_ops = { 7401 .dev_configure = hns3_dev_configure, 7402 .dev_start = hns3_dev_start, 7403 .dev_stop = hns3_dev_stop, 7404 .dev_close = hns3_dev_close, 7405 .promiscuous_enable = hns3_dev_promiscuous_enable, 7406 .promiscuous_disable = hns3_dev_promiscuous_disable, 7407 .allmulticast_enable = hns3_dev_allmulticast_enable, 7408 .allmulticast_disable = hns3_dev_allmulticast_disable, 7409 .mtu_set = hns3_dev_mtu_set, 7410 .stats_get = hns3_stats_get, 7411 .stats_reset = hns3_stats_reset, 7412 .xstats_get = hns3_dev_xstats_get, 7413 .xstats_get_names = hns3_dev_xstats_get_names, 7414 .xstats_reset = hns3_dev_xstats_reset, 7415 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7416 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7417 .dev_infos_get = hns3_dev_infos_get, 7418 .fw_version_get = hns3_fw_version_get, 7419 .rx_queue_setup = hns3_rx_queue_setup, 7420 .tx_queue_setup = hns3_tx_queue_setup, 7421 .rx_queue_release = hns3_dev_rx_queue_release, 7422 .tx_queue_release = hns3_dev_tx_queue_release, 7423 .rx_queue_start = hns3_dev_rx_queue_start, 7424 .rx_queue_stop = hns3_dev_rx_queue_stop, 7425 .tx_queue_start = hns3_dev_tx_queue_start, 7426 .tx_queue_stop = hns3_dev_tx_queue_stop, 7427 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7428 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7429 .rxq_info_get = hns3_rxq_info_get, 7430 .txq_info_get = hns3_txq_info_get, 7431 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7432 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7433 .flow_ctrl_get = hns3_flow_ctrl_get, 7434 .flow_ctrl_set = hns3_flow_ctrl_set, 7435 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7436 .mac_addr_add = hns3_add_mac_addr, 7437 .mac_addr_remove = hns3_remove_mac_addr, 7438 .mac_addr_set = hns3_set_default_mac_addr, 7439 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7440 .link_update = hns3_dev_link_update, 7441 .dev_set_link_up = hns3_dev_set_link_up, 7442 .dev_set_link_down = hns3_dev_set_link_down, 7443 .rss_hash_update = hns3_dev_rss_hash_update, 7444 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7445 .reta_update = hns3_dev_rss_reta_update, 7446 .reta_query = hns3_dev_rss_reta_query, 7447 .flow_ops_get = hns3_dev_flow_ops_get, 7448 .vlan_filter_set = hns3_vlan_filter_set, 7449 .vlan_tpid_set = hns3_vlan_tpid_set, 7450 .vlan_offload_set = hns3_vlan_offload_set, 7451 .vlan_pvid_set = hns3_vlan_pvid_set, 7452 .get_reg = hns3_get_regs, 7453 .get_module_info = hns3_get_module_info, 7454 .get_module_eeprom = hns3_get_module_eeprom, 7455 .get_dcb_info = hns3_get_dcb_info, 7456 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7457 .fec_get_capability = hns3_fec_get_capability, 7458 .fec_get = hns3_fec_get, 7459 .fec_set = hns3_fec_set, 7460 .tm_ops_get = hns3_tm_ops_get, 7461 .tx_done_cleanup = hns3_tx_done_cleanup, 7462 .timesync_enable = hns3_timesync_enable, 7463 .timesync_disable = hns3_timesync_disable, 7464 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7465 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7466 .timesync_adjust_time = hns3_timesync_adjust_time, 7467 .timesync_read_time = hns3_timesync_read_time, 7468 .timesync_write_time = hns3_timesync_write_time, 7469 }; 7470 7471 static const struct hns3_reset_ops hns3_reset_ops = { 7472 .reset_service = hns3_reset_service, 7473 .stop_service = hns3_stop_service, 7474 .prepare_reset = hns3_prepare_reset, 7475 .wait_hardware_ready = hns3_wait_hardware_ready, 7476 .reinit_dev = hns3_reinit_dev, 7477 .restore_conf = hns3_restore_conf, 7478 .start_service = hns3_start_service, 7479 }; 7480 7481 static int 7482 hns3_dev_init(struct rte_eth_dev *eth_dev) 7483 { 7484 struct hns3_adapter *hns = eth_dev->data->dev_private; 7485 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7486 struct rte_ether_addr *eth_addr; 7487 struct hns3_hw *hw = &hns->hw; 7488 int ret; 7489 7490 PMD_INIT_FUNC_TRACE(); 7491 7492 hns3_flow_init(eth_dev); 7493 7494 hns3_set_rxtx_function(eth_dev); 7495 eth_dev->dev_ops = &hns3_eth_dev_ops; 7496 eth_dev->rx_queue_count = hns3_rx_queue_count; 7497 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7498 ret = hns3_mp_init_secondary(); 7499 if (ret) { 7500 PMD_INIT_LOG(ERR, "Failed to init for secondary " 7501 "process, ret = %d", ret); 7502 goto err_mp_init_secondary; 7503 } 7504 hw->secondary_cnt++; 7505 hns3_tx_push_init(eth_dev); 7506 return 0; 7507 } 7508 7509 ret = hns3_mp_init_primary(); 7510 if (ret) { 7511 PMD_INIT_LOG(ERR, 7512 "Failed to init for primary process, ret = %d", 7513 ret); 7514 goto err_mp_init_primary; 7515 } 7516 7517 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7518 hns->is_vf = false; 7519 hw->data = eth_dev->data; 7520 hns3_parse_devargs(eth_dev); 7521 7522 /* 7523 * Set default max packet size according to the mtu 7524 * default vale in DPDK frame. 7525 */ 7526 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7527 7528 ret = hns3_reset_init(hw); 7529 if (ret) 7530 goto err_init_reset; 7531 hw->reset.ops = &hns3_reset_ops; 7532 7533 ret = hns3_init_pf(eth_dev); 7534 if (ret) { 7535 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7536 goto err_init_pf; 7537 } 7538 7539 /* Allocate memory for storing MAC addresses */ 7540 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7541 sizeof(struct rte_ether_addr) * 7542 HNS3_UC_MACADDR_NUM, 0); 7543 if (eth_dev->data->mac_addrs == NULL) { 7544 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7545 "to store MAC addresses", 7546 sizeof(struct rte_ether_addr) * 7547 HNS3_UC_MACADDR_NUM); 7548 ret = -ENOMEM; 7549 goto err_rte_zmalloc; 7550 } 7551 7552 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7553 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7554 rte_eth_random_addr(hw->mac.mac_addr); 7555 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7556 (struct rte_ether_addr *)hw->mac.mac_addr); 7557 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7558 "unicast address, using random MAC address %s", 7559 mac_str); 7560 } 7561 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7562 ð_dev->data->mac_addrs[0]); 7563 7564 hw->adapter_state = HNS3_NIC_INITIALIZED; 7565 7566 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7567 SCHEDULE_PENDING) { 7568 hns3_err(hw, "Reschedule reset service after dev_init"); 7569 hns3_schedule_reset(hns); 7570 } else { 7571 /* IMP will wait ready flag before reset */ 7572 hns3_notify_reset_ready(hw, false); 7573 } 7574 7575 hns3_info(hw, "hns3 dev initialization successful!"); 7576 return 0; 7577 7578 err_rte_zmalloc: 7579 hns3_uninit_pf(eth_dev); 7580 7581 err_init_pf: 7582 rte_free(hw->reset.wait_data); 7583 7584 err_init_reset: 7585 hns3_mp_uninit_primary(); 7586 7587 err_mp_init_primary: 7588 err_mp_init_secondary: 7589 eth_dev->dev_ops = NULL; 7590 eth_dev->rx_pkt_burst = NULL; 7591 eth_dev->rx_descriptor_status = NULL; 7592 eth_dev->tx_pkt_burst = NULL; 7593 eth_dev->tx_pkt_prepare = NULL; 7594 eth_dev->tx_descriptor_status = NULL; 7595 return ret; 7596 } 7597 7598 static int 7599 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7600 { 7601 struct hns3_adapter *hns = eth_dev->data->dev_private; 7602 struct hns3_hw *hw = &hns->hw; 7603 7604 PMD_INIT_FUNC_TRACE(); 7605 7606 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 7607 return 0; 7608 7609 if (hw->adapter_state < HNS3_NIC_CLOSING) 7610 hns3_dev_close(eth_dev); 7611 7612 hw->adapter_state = HNS3_NIC_REMOVED; 7613 return 0; 7614 } 7615 7616 static int 7617 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7618 struct rte_pci_device *pci_dev) 7619 { 7620 return rte_eth_dev_pci_generic_probe(pci_dev, 7621 sizeof(struct hns3_adapter), 7622 hns3_dev_init); 7623 } 7624 7625 static int 7626 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7627 { 7628 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7629 } 7630 7631 static const struct rte_pci_id pci_id_hns3_map[] = { 7632 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7633 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7634 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7635 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7636 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7637 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7638 { .vendor_id = 0, }, /* sentinel */ 7639 }; 7640 7641 static struct rte_pci_driver rte_hns3_pmd = { 7642 .id_table = pci_id_hns3_map, 7643 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7644 .probe = eth_hns3_pci_probe, 7645 .remove = eth_hns3_pci_remove, 7646 }; 7647 7648 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7649 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7650 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7651 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7652 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7653 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 7654 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "); 7655 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 7656 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 7657