1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_bus_pci.h> 7 #include <ethdev_pci.h> 8 #include <rte_pci.h> 9 #include <rte_kvargs.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_rxtx.h" 14 #include "hns3_intr.h" 15 #include "hns3_regs.h" 16 #include "hns3_dcb.h" 17 #include "hns3_mp.h" 18 19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 20 #define HNS3_SERVICE_QUICK_INTERVAL 10 21 #define HNS3_INVALID_PVID 0xFFFF 22 23 #define HNS3_FILTER_TYPE_VF 0 24 #define HNS3_FILTER_TYPE_PORT 1 25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 31 | HNS3_FILTER_FE_ROCE_EGRESS_B) 32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 33 | HNS3_FILTER_FE_ROCE_INGRESS_B) 34 35 /* Reset related Registers */ 36 #define HNS3_GLOBAL_RESET_BIT 0 37 #define HNS3_CORE_RESET_BIT 1 38 #define HNS3_IMP_RESET_BIT 2 39 #define HNS3_FUN_RST_ING_B 0 40 41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 45 46 #define HNS3_RESET_WAIT_MS 100 47 #define HNS3_RESET_WAIT_CNT 200 48 49 /* FEC mode order defined in HNS3 hardware */ 50 #define HNS3_HW_FEC_MODE_NOFEC 0 51 #define HNS3_HW_FEC_MODE_BASER 1 52 #define HNS3_HW_FEC_MODE_RS 2 53 54 enum hns3_evt_cause { 55 HNS3_VECTOR0_EVENT_RST, 56 HNS3_VECTOR0_EVENT_MBX, 57 HNS3_VECTOR0_EVENT_ERR, 58 HNS3_VECTOR0_EVENT_PTP, 59 HNS3_VECTOR0_EVENT_OTHER, 60 }; 61 62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 63 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 66 67 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 71 72 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 75 76 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 80 81 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 84 85 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 88 }; 89 90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 91 uint64_t *levels); 92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 94 int on); 95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev); 96 static bool hns3_update_link_status(struct hns3_hw *hw); 97 98 static int hns3_add_mc_addr(struct hns3_hw *hw, 99 struct rte_ether_addr *mac_addr); 100 static int hns3_remove_mc_addr(struct hns3_hw *hw, 101 struct rte_ether_addr *mac_addr); 102 static int hns3_restore_fec(struct hns3_hw *hw); 103 static int hns3_query_dev_fec_info(struct hns3_hw *hw); 104 static int hns3_do_stop(struct hns3_adapter *hns); 105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); 106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); 107 108 void hns3_ether_format_addr(char *buf, uint16_t size, 109 const struct rte_ether_addr *ether_addr) 110 { 111 snprintf(buf, size, "%02X:**:**:**:%02X:%02X", 112 ether_addr->addr_bytes[0], 113 ether_addr->addr_bytes[4], 114 ether_addr->addr_bytes[5]); 115 } 116 117 static void 118 hns3_pf_disable_irq0(struct hns3_hw *hw) 119 { 120 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 121 } 122 123 static void 124 hns3_pf_enable_irq0(struct hns3_hw *hw) 125 { 126 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 127 } 128 129 static enum hns3_evt_cause 130 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, 131 uint32_t *vec_val) 132 { 133 struct hns3_hw *hw = &hns->hw; 134 135 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 136 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 137 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 138 if (!is_delay) { 139 hw->reset.stats.imp_cnt++; 140 hns3_warn(hw, "IMP reset detected, clear reset status"); 141 } else { 142 hns3_schedule_delayed_reset(hns); 143 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 144 } 145 146 return HNS3_VECTOR0_EVENT_RST; 147 } 148 149 static enum hns3_evt_cause 150 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, 151 uint32_t *vec_val) 152 { 153 struct hns3_hw *hw = &hns->hw; 154 155 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 156 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 157 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 158 if (!is_delay) { 159 hw->reset.stats.global_cnt++; 160 hns3_warn(hw, "Global reset detected, clear reset status"); 161 } else { 162 hns3_schedule_delayed_reset(hns); 163 hns3_warn(hw, 164 "Global reset detected, don't clear reset status"); 165 } 166 167 return HNS3_VECTOR0_EVENT_RST; 168 } 169 170 static enum hns3_evt_cause 171 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 172 { 173 struct hns3_hw *hw = &hns->hw; 174 uint32_t vector0_int_stats; 175 uint32_t cmdq_src_val; 176 uint32_t hw_err_src_reg; 177 uint32_t val; 178 enum hns3_evt_cause ret; 179 bool is_delay; 180 181 /* fetch the events from their corresponding regs */ 182 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 183 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 184 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 185 186 is_delay = clearval == NULL ? true : false; 187 /* 188 * Assumption: If by any chance reset and mailbox events are reported 189 * together then we will only process reset event and defer the 190 * processing of the mailbox events. Since, we would have not cleared 191 * RX CMDQ event this time we would receive again another interrupt 192 * from H/W just for the mailbox. 193 */ 194 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 195 ret = hns3_proc_imp_reset_event(hns, is_delay, &val); 196 goto out; 197 } 198 199 /* Global reset */ 200 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 201 ret = hns3_proc_global_reset_event(hns, is_delay, &val); 202 goto out; 203 } 204 205 /* Check for vector0 1588 event source */ 206 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { 207 val = BIT(HNS3_VECTOR0_1588_INT_B); 208 ret = HNS3_VECTOR0_EVENT_PTP; 209 goto out; 210 } 211 212 /* check for vector0 msix event source */ 213 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 214 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 215 val = vector0_int_stats | hw_err_src_reg; 216 ret = HNS3_VECTOR0_EVENT_ERR; 217 goto out; 218 } 219 220 /* check for vector0 mailbox(=CMDQ RX) event source */ 221 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 222 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 223 val = cmdq_src_val; 224 ret = HNS3_VECTOR0_EVENT_MBX; 225 goto out; 226 } 227 228 val = vector0_int_stats; 229 ret = HNS3_VECTOR0_EVENT_OTHER; 230 out: 231 232 if (clearval) 233 *clearval = val; 234 return ret; 235 } 236 237 static bool 238 hns3_is_1588_event_type(uint32_t event_type) 239 { 240 return (event_type == HNS3_VECTOR0_EVENT_PTP); 241 } 242 243 static void 244 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 245 { 246 if (event_type == HNS3_VECTOR0_EVENT_RST || 247 hns3_is_1588_event_type(event_type)) 248 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 249 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 250 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 251 } 252 253 static void 254 hns3_clear_all_event_cause(struct hns3_hw *hw) 255 { 256 uint32_t vector0_int_stats; 257 258 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 259 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 260 hns3_warn(hw, "Probe during IMP reset interrupt"); 261 262 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 263 hns3_warn(hw, "Probe during Global reset interrupt"); 264 265 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 266 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 267 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 268 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 269 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 270 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP, 271 BIT(HNS3_VECTOR0_1588_INT_B)); 272 } 273 274 static void 275 hns3_handle_mac_tnl(struct hns3_hw *hw) 276 { 277 struct hns3_cmd_desc desc; 278 uint32_t status; 279 int ret; 280 281 /* query and clear mac tnl interrupt */ 282 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true); 283 ret = hns3_cmd_send(hw, &desc, 1); 284 if (ret) { 285 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret); 286 return; 287 } 288 289 status = rte_le_to_cpu_32(desc.data[0]); 290 if (status) { 291 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status); 292 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT, 293 false); 294 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR); 295 ret = hns3_cmd_send(hw, &desc, 1); 296 if (ret) 297 hns3_err(hw, "failed to clear mac tnl int, ret = %d.", 298 ret); 299 } 300 } 301 302 static void 303 hns3_interrupt_handler(void *param) 304 { 305 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 306 struct hns3_adapter *hns = dev->data->dev_private; 307 struct hns3_hw *hw = &hns->hw; 308 enum hns3_evt_cause event_cause; 309 uint32_t clearval = 0; 310 uint32_t vector0_int; 311 uint32_t ras_int; 312 uint32_t cmdq_int; 313 314 /* Disable interrupt */ 315 hns3_pf_disable_irq0(hw); 316 317 event_cause = hns3_check_event_cause(hns, &clearval); 318 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 319 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 320 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 321 hns3_clear_event_cause(hw, event_cause, clearval); 322 /* vector 0 interrupt is shared with reset and mailbox source events. */ 323 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 324 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " 325 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 326 vector0_int, ras_int, cmdq_int); 327 hns3_handle_mac_tnl(hw); 328 hns3_handle_error(hns); 329 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 330 hns3_warn(hw, "received reset interrupt"); 331 hns3_schedule_reset(hns); 332 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { 333 hns3_dev_handle_mbx_msg(hw); 334 } else { 335 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " 336 "ras_int_stat:0x%x cmdq_int_stat:0x%x", 337 vector0_int, ras_int, cmdq_int); 338 } 339 340 /* Enable interrupt if it is not cause by reset */ 341 hns3_pf_enable_irq0(hw); 342 } 343 344 static int 345 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 346 { 347 #define HNS3_VLAN_ID_OFFSET_STEP 160 348 #define HNS3_VLAN_BYTE_SIZE 8 349 struct hns3_vlan_filter_pf_cfg_cmd *req; 350 struct hns3_hw *hw = &hns->hw; 351 uint8_t vlan_offset_byte_val; 352 struct hns3_cmd_desc desc; 353 uint8_t vlan_offset_byte; 354 uint8_t vlan_offset_base; 355 int ret; 356 357 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 358 359 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 360 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 361 HNS3_VLAN_BYTE_SIZE; 362 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 363 364 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 365 req->vlan_offset = vlan_offset_base; 366 req->vlan_cfg = on ? 0 : 1; 367 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 368 369 ret = hns3_cmd_send(hw, &desc, 1); 370 if (ret) 371 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 372 vlan_id, ret); 373 374 return ret; 375 } 376 377 static void 378 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 379 { 380 struct hns3_user_vlan_table *vlan_entry; 381 struct hns3_pf *pf = &hns->pf; 382 383 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 384 if (vlan_entry->vlan_id == vlan_id) { 385 if (vlan_entry->hd_tbl_status) 386 hns3_set_port_vlan_filter(hns, vlan_id, 0); 387 LIST_REMOVE(vlan_entry, next); 388 rte_free(vlan_entry); 389 break; 390 } 391 } 392 } 393 394 static void 395 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 396 bool writen_to_tbl) 397 { 398 struct hns3_user_vlan_table *vlan_entry; 399 struct hns3_hw *hw = &hns->hw; 400 struct hns3_pf *pf = &hns->pf; 401 402 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 403 if (vlan_entry->vlan_id == vlan_id) 404 return; 405 } 406 407 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 408 if (vlan_entry == NULL) { 409 hns3_err(hw, "Failed to malloc hns3 vlan table"); 410 return; 411 } 412 413 vlan_entry->hd_tbl_status = writen_to_tbl; 414 vlan_entry->vlan_id = vlan_id; 415 416 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 417 } 418 419 static int 420 hns3_restore_vlan_table(struct hns3_adapter *hns) 421 { 422 struct hns3_user_vlan_table *vlan_entry; 423 struct hns3_hw *hw = &hns->hw; 424 struct hns3_pf *pf = &hns->pf; 425 uint16_t vlan_id; 426 int ret = 0; 427 428 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 429 return hns3_vlan_pvid_configure(hns, 430 hw->port_base_vlan_cfg.pvid, 1); 431 432 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 433 if (vlan_entry->hd_tbl_status) { 434 vlan_id = vlan_entry->vlan_id; 435 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 436 if (ret) 437 break; 438 } 439 } 440 441 return ret; 442 } 443 444 static int 445 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 446 { 447 struct hns3_hw *hw = &hns->hw; 448 bool writen_to_tbl = false; 449 int ret = 0; 450 451 /* 452 * When vlan filter is enabled, hardware regards packets without vlan 453 * as packets with vlan 0. So, to receive packets without vlan, vlan id 454 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 455 */ 456 if (on == 0 && vlan_id == 0) 457 return 0; 458 459 /* 460 * When port base vlan enabled, we use port base vlan as the vlan 461 * filter condition. In this case, we don't update vlan filter table 462 * when user add new vlan or remove exist vlan, just update the 463 * vlan list. The vlan id in vlan list will be written in vlan filter 464 * table until port base vlan disabled 465 */ 466 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 467 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 468 writen_to_tbl = true; 469 } 470 471 if (ret == 0) { 472 if (on) 473 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 474 else 475 hns3_rm_dev_vlan_table(hns, vlan_id); 476 } 477 return ret; 478 } 479 480 static int 481 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 482 { 483 struct hns3_adapter *hns = dev->data->dev_private; 484 struct hns3_hw *hw = &hns->hw; 485 int ret; 486 487 rte_spinlock_lock(&hw->lock); 488 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 489 rte_spinlock_unlock(&hw->lock); 490 return ret; 491 } 492 493 static int 494 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 495 uint16_t tpid) 496 { 497 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 498 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 499 struct hns3_hw *hw = &hns->hw; 500 struct hns3_cmd_desc desc; 501 int ret; 502 503 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER && 504 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) { 505 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 506 return -EINVAL; 507 } 508 509 if (tpid != RTE_ETHER_TYPE_VLAN) { 510 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 511 return -EINVAL; 512 } 513 514 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 515 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 516 517 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 518 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 519 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 520 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 521 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 522 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 523 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 524 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 525 } 526 527 ret = hns3_cmd_send(hw, &desc, 1); 528 if (ret) { 529 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 530 ret); 531 return ret; 532 } 533 534 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 535 536 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 537 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 538 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 539 540 ret = hns3_cmd_send(hw, &desc, 1); 541 if (ret) 542 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 543 ret); 544 return ret; 545 } 546 547 static int 548 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 549 uint16_t tpid) 550 { 551 struct hns3_adapter *hns = dev->data->dev_private; 552 struct hns3_hw *hw = &hns->hw; 553 int ret; 554 555 rte_spinlock_lock(&hw->lock); 556 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 557 rte_spinlock_unlock(&hw->lock); 558 return ret; 559 } 560 561 static int 562 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 563 struct hns3_rx_vtag_cfg *vcfg) 564 { 565 struct hns3_vport_vtag_rx_cfg_cmd *req; 566 struct hns3_hw *hw = &hns->hw; 567 struct hns3_cmd_desc desc; 568 uint16_t vport_id; 569 uint8_t bitmap; 570 int ret; 571 572 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 573 574 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 575 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 576 vcfg->strip_tag1_en ? 1 : 0); 577 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 578 vcfg->strip_tag2_en ? 1 : 0); 579 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 580 vcfg->vlan1_vlan_prionly ? 1 : 0); 581 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 582 vcfg->vlan2_vlan_prionly ? 1 : 0); 583 584 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 585 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 586 vcfg->strip_tag1_discard_en ? 1 : 0); 587 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 588 vcfg->strip_tag2_discard_en ? 1 : 0); 589 /* 590 * In current version VF is not supported when PF is driven by DPDK 591 * driver, just need to configure parameters for PF vport. 592 */ 593 vport_id = HNS3_PF_FUNC_ID; 594 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 595 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 596 req->vf_bitmap[req->vf_offset] = bitmap; 597 598 ret = hns3_cmd_send(hw, &desc, 1); 599 if (ret) 600 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 601 return ret; 602 } 603 604 static void 605 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 606 struct hns3_rx_vtag_cfg *vcfg) 607 { 608 struct hns3_pf *pf = &hns->pf; 609 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 610 } 611 612 static void 613 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 614 struct hns3_tx_vtag_cfg *vcfg) 615 { 616 struct hns3_pf *pf = &hns->pf; 617 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 618 } 619 620 static int 621 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 622 { 623 struct hns3_rx_vtag_cfg rxvlan_cfg; 624 struct hns3_hw *hw = &hns->hw; 625 int ret; 626 627 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 628 rxvlan_cfg.strip_tag1_en = false; 629 rxvlan_cfg.strip_tag2_en = enable; 630 rxvlan_cfg.strip_tag2_discard_en = false; 631 } else { 632 rxvlan_cfg.strip_tag1_en = enable; 633 rxvlan_cfg.strip_tag2_en = true; 634 rxvlan_cfg.strip_tag2_discard_en = true; 635 } 636 637 rxvlan_cfg.strip_tag1_discard_en = false; 638 rxvlan_cfg.vlan1_vlan_prionly = false; 639 rxvlan_cfg.vlan2_vlan_prionly = false; 640 rxvlan_cfg.rx_vlan_offload_en = enable; 641 642 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 643 if (ret) { 644 hns3_err(hw, "%s strip rx vtag failed, ret = %d.", 645 enable ? "enable" : "disable", ret); 646 return ret; 647 } 648 649 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 650 651 return ret; 652 } 653 654 static int 655 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 656 uint8_t fe_type, bool filter_en, uint8_t vf_id) 657 { 658 struct hns3_vlan_filter_ctrl_cmd *req; 659 struct hns3_cmd_desc desc; 660 int ret; 661 662 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 663 664 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 665 req->vlan_type = vlan_type; 666 req->vlan_fe = filter_en ? fe_type : 0; 667 req->vf_id = vf_id; 668 669 ret = hns3_cmd_send(hw, &desc, 1); 670 if (ret) 671 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 672 673 return ret; 674 } 675 676 static int 677 hns3_vlan_filter_init(struct hns3_adapter *hns) 678 { 679 struct hns3_hw *hw = &hns->hw; 680 int ret; 681 682 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 683 HNS3_FILTER_FE_EGRESS, false, 684 HNS3_PF_FUNC_ID); 685 if (ret) { 686 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 687 return ret; 688 } 689 690 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 691 HNS3_FILTER_FE_INGRESS, false, 692 HNS3_PF_FUNC_ID); 693 if (ret) 694 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 695 696 return ret; 697 } 698 699 static int 700 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 701 { 702 struct hns3_hw *hw = &hns->hw; 703 int ret; 704 705 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 706 HNS3_FILTER_FE_INGRESS, enable, 707 HNS3_PF_FUNC_ID); 708 if (ret) 709 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 710 enable ? "enable" : "disable", ret); 711 712 return ret; 713 } 714 715 static int 716 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 717 { 718 struct hns3_adapter *hns = dev->data->dev_private; 719 struct hns3_hw *hw = &hns->hw; 720 struct rte_eth_rxmode *rxmode; 721 unsigned int tmp_mask; 722 bool enable; 723 int ret = 0; 724 725 rte_spinlock_lock(&hw->lock); 726 rxmode = &dev->data->dev_conf.rxmode; 727 tmp_mask = (unsigned int)mask; 728 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { 729 /* ignore vlan filter configuration during promiscuous mode */ 730 if (!dev->data->promiscuous) { 731 /* Enable or disable VLAN filter */ 732 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? 733 true : false; 734 735 ret = hns3_enable_vlan_filter(hns, enable); 736 if (ret) { 737 rte_spinlock_unlock(&hw->lock); 738 hns3_err(hw, "failed to %s rx filter, ret = %d", 739 enable ? "enable" : "disable", ret); 740 return ret; 741 } 742 } 743 } 744 745 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { 746 /* Enable or disable VLAN stripping */ 747 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? 748 true : false; 749 750 ret = hns3_en_hw_strip_rxvtag(hns, enable); 751 if (ret) { 752 rte_spinlock_unlock(&hw->lock); 753 hns3_err(hw, "failed to %s rx strip, ret = %d", 754 enable ? "enable" : "disable", ret); 755 return ret; 756 } 757 } 758 759 rte_spinlock_unlock(&hw->lock); 760 761 return ret; 762 } 763 764 static int 765 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 766 struct hns3_tx_vtag_cfg *vcfg) 767 { 768 struct hns3_vport_vtag_tx_cfg_cmd *req; 769 struct hns3_cmd_desc desc; 770 struct hns3_hw *hw = &hns->hw; 771 uint16_t vport_id; 772 uint8_t bitmap; 773 int ret; 774 775 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 776 777 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 778 req->def_vlan_tag1 = vcfg->default_tag1; 779 req->def_vlan_tag2 = vcfg->default_tag2; 780 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 781 vcfg->accept_tag1 ? 1 : 0); 782 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 783 vcfg->accept_untag1 ? 1 : 0); 784 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 785 vcfg->accept_tag2 ? 1 : 0); 786 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 787 vcfg->accept_untag2 ? 1 : 0); 788 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 789 vcfg->insert_tag1_en ? 1 : 0); 790 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 791 vcfg->insert_tag2_en ? 1 : 0); 792 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 793 794 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 795 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 796 vcfg->tag_shift_mode_en ? 1 : 0); 797 798 /* 799 * In current version VF is not supported when PF is driven by DPDK 800 * driver, just need to configure parameters for PF vport. 801 */ 802 vport_id = HNS3_PF_FUNC_ID; 803 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 804 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 805 req->vf_bitmap[req->vf_offset] = bitmap; 806 807 ret = hns3_cmd_send(hw, &desc, 1); 808 if (ret) 809 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 810 811 return ret; 812 } 813 814 static int 815 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 816 uint16_t pvid) 817 { 818 struct hns3_hw *hw = &hns->hw; 819 struct hns3_tx_vtag_cfg txvlan_cfg; 820 int ret; 821 822 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 823 txvlan_cfg.accept_tag1 = true; 824 txvlan_cfg.insert_tag1_en = false; 825 txvlan_cfg.default_tag1 = 0; 826 } else { 827 txvlan_cfg.accept_tag1 = 828 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 829 txvlan_cfg.insert_tag1_en = true; 830 txvlan_cfg.default_tag1 = pvid; 831 } 832 833 txvlan_cfg.accept_untag1 = true; 834 txvlan_cfg.accept_tag2 = true; 835 txvlan_cfg.accept_untag2 = true; 836 txvlan_cfg.insert_tag2_en = false; 837 txvlan_cfg.default_tag2 = 0; 838 txvlan_cfg.tag_shift_mode_en = true; 839 840 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 841 if (ret) { 842 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 843 ret); 844 return ret; 845 } 846 847 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 848 return ret; 849 } 850 851 852 static void 853 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 854 { 855 struct hns3_user_vlan_table *vlan_entry; 856 struct hns3_pf *pf = &hns->pf; 857 858 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 859 if (vlan_entry->hd_tbl_status) { 860 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 861 vlan_entry->hd_tbl_status = false; 862 } 863 } 864 865 if (is_del_list) { 866 vlan_entry = LIST_FIRST(&pf->vlan_list); 867 while (vlan_entry) { 868 LIST_REMOVE(vlan_entry, next); 869 rte_free(vlan_entry); 870 vlan_entry = LIST_FIRST(&pf->vlan_list); 871 } 872 } 873 } 874 875 static void 876 hns3_add_all_vlan_table(struct hns3_adapter *hns) 877 { 878 struct hns3_user_vlan_table *vlan_entry; 879 struct hns3_pf *pf = &hns->pf; 880 881 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 882 if (!vlan_entry->hd_tbl_status) { 883 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 884 vlan_entry->hd_tbl_status = true; 885 } 886 } 887 } 888 889 static void 890 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 891 { 892 struct hns3_hw *hw = &hns->hw; 893 int ret; 894 895 hns3_rm_all_vlan_table(hns, true); 896 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 897 ret = hns3_set_port_vlan_filter(hns, 898 hw->port_base_vlan_cfg.pvid, 0); 899 if (ret) { 900 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 901 ret); 902 return; 903 } 904 } 905 } 906 907 static int 908 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 909 uint16_t port_base_vlan_state, uint16_t new_pvid) 910 { 911 struct hns3_hw *hw = &hns->hw; 912 uint16_t old_pvid; 913 int ret; 914 915 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 916 old_pvid = hw->port_base_vlan_cfg.pvid; 917 if (old_pvid != HNS3_INVALID_PVID) { 918 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 919 if (ret) { 920 hns3_err(hw, "failed to remove old pvid %u, " 921 "ret = %d", old_pvid, ret); 922 return ret; 923 } 924 } 925 926 hns3_rm_all_vlan_table(hns, false); 927 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 928 if (ret) { 929 hns3_err(hw, "failed to add new pvid %u, ret = %d", 930 new_pvid, ret); 931 return ret; 932 } 933 } else { 934 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 935 if (ret) { 936 hns3_err(hw, "failed to remove pvid %u, ret = %d", 937 new_pvid, ret); 938 return ret; 939 } 940 941 hns3_add_all_vlan_table(hns); 942 } 943 return 0; 944 } 945 946 static int 947 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 948 { 949 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 950 struct hns3_rx_vtag_cfg rx_vlan_cfg; 951 bool rx_strip_en; 952 int ret; 953 954 rx_strip_en = old_cfg->rx_vlan_offload_en; 955 if (on) { 956 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 957 rx_vlan_cfg.strip_tag2_en = true; 958 rx_vlan_cfg.strip_tag2_discard_en = true; 959 } else { 960 rx_vlan_cfg.strip_tag1_en = false; 961 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 962 rx_vlan_cfg.strip_tag2_discard_en = false; 963 } 964 rx_vlan_cfg.strip_tag1_discard_en = false; 965 rx_vlan_cfg.vlan1_vlan_prionly = false; 966 rx_vlan_cfg.vlan2_vlan_prionly = false; 967 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 968 969 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 970 if (ret) 971 return ret; 972 973 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 974 return ret; 975 } 976 977 static int 978 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 979 { 980 struct hns3_hw *hw = &hns->hw; 981 uint16_t port_base_vlan_state; 982 int ret, err; 983 984 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 985 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 986 hns3_warn(hw, "Invalid operation! As current pvid set " 987 "is %u, disable pvid %u is invalid", 988 hw->port_base_vlan_cfg.pvid, pvid); 989 return 0; 990 } 991 992 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 993 HNS3_PORT_BASE_VLAN_DISABLE; 994 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 995 if (ret) { 996 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 997 ret); 998 return ret; 999 } 1000 1001 ret = hns3_en_pvid_strip(hns, on); 1002 if (ret) { 1003 hns3_err(hw, "failed to config rx vlan strip for pvid, " 1004 "ret = %d", ret); 1005 goto pvid_vlan_strip_fail; 1006 } 1007 1008 if (pvid == HNS3_INVALID_PVID) 1009 goto out; 1010 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 1011 if (ret) { 1012 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 1013 ret); 1014 goto vlan_filter_set_fail; 1015 } 1016 1017 out: 1018 hw->port_base_vlan_cfg.state = port_base_vlan_state; 1019 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 1020 return ret; 1021 1022 vlan_filter_set_fail: 1023 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state == 1024 HNS3_PORT_BASE_VLAN_ENABLE); 1025 if (err) 1026 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err); 1027 1028 pvid_vlan_strip_fail: 1029 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state, 1030 hw->port_base_vlan_cfg.pvid); 1031 if (err) 1032 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err); 1033 1034 return ret; 1035 } 1036 1037 static int 1038 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 1039 { 1040 struct hns3_adapter *hns = dev->data->dev_private; 1041 struct hns3_hw *hw = &hns->hw; 1042 bool pvid_en_state_change; 1043 uint16_t pvid_state; 1044 int ret; 1045 1046 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 1047 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 1048 RTE_ETHER_MAX_VLAN_ID); 1049 return -EINVAL; 1050 } 1051 1052 /* 1053 * If PVID configuration state change, should refresh the PVID 1054 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 1055 */ 1056 pvid_state = hw->port_base_vlan_cfg.state; 1057 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 1058 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 1059 pvid_en_state_change = false; 1060 else 1061 pvid_en_state_change = true; 1062 1063 rte_spinlock_lock(&hw->lock); 1064 ret = hns3_vlan_pvid_configure(hns, pvid, on); 1065 rte_spinlock_unlock(&hw->lock); 1066 if (ret) 1067 return ret; 1068 /* 1069 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 1070 * need be processed by PMD driver. 1071 */ 1072 if (pvid_en_state_change && 1073 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1074 hns3_update_all_queues_pvid_proc_en(hw); 1075 1076 return 0; 1077 } 1078 1079 static int 1080 hns3_default_vlan_config(struct hns3_adapter *hns) 1081 { 1082 struct hns3_hw *hw = &hns->hw; 1083 int ret; 1084 1085 /* 1086 * When vlan filter is enabled, hardware regards packets without vlan 1087 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1088 * table, packets without vlan won't be received. So, add vlan 0 as 1089 * the default vlan. 1090 */ 1091 ret = hns3_vlan_filter_configure(hns, 0, 1); 1092 if (ret) 1093 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1094 return ret; 1095 } 1096 1097 static int 1098 hns3_init_vlan_config(struct hns3_adapter *hns) 1099 { 1100 struct hns3_hw *hw = &hns->hw; 1101 int ret; 1102 1103 /* 1104 * This function can be called in the initialization and reset process, 1105 * when in reset process, it means that hardware had been reseted 1106 * successfully and we need to restore the hardware configuration to 1107 * ensure that the hardware configuration remains unchanged before and 1108 * after reset. 1109 */ 1110 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1111 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1112 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1113 } 1114 1115 ret = hns3_vlan_filter_init(hns); 1116 if (ret) { 1117 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1118 return ret; 1119 } 1120 1121 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER, 1122 RTE_ETHER_TYPE_VLAN); 1123 if (ret) { 1124 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1125 return ret; 1126 } 1127 1128 /* 1129 * When in the reinit dev stage of the reset process, the following 1130 * vlan-related configurations may differ from those at initialization, 1131 * we will restore configurations to hardware in hns3_restore_vlan_table 1132 * and hns3_restore_vlan_conf later. 1133 */ 1134 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 1135 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1136 if (ret) { 1137 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1138 return ret; 1139 } 1140 1141 ret = hns3_en_hw_strip_rxvtag(hns, false); 1142 if (ret) { 1143 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1144 ret); 1145 return ret; 1146 } 1147 } 1148 1149 return hns3_default_vlan_config(hns); 1150 } 1151 1152 static int 1153 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1154 { 1155 struct hns3_pf *pf = &hns->pf; 1156 struct hns3_hw *hw = &hns->hw; 1157 uint64_t offloads; 1158 bool enable; 1159 int ret; 1160 1161 if (!hw->data->promiscuous) { 1162 /* restore vlan filter states */ 1163 offloads = hw->data->dev_conf.rxmode.offloads; 1164 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false; 1165 ret = hns3_enable_vlan_filter(hns, enable); 1166 if (ret) { 1167 hns3_err(hw, "failed to restore vlan rx filter conf, " 1168 "ret = %d", ret); 1169 return ret; 1170 } 1171 } 1172 1173 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1174 if (ret) { 1175 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1176 return ret; 1177 } 1178 1179 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1180 if (ret) 1181 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1182 1183 return ret; 1184 } 1185 1186 static int 1187 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1188 { 1189 struct hns3_adapter *hns = dev->data->dev_private; 1190 struct rte_eth_dev_data *data = dev->data; 1191 struct rte_eth_txmode *txmode; 1192 struct hns3_hw *hw = &hns->hw; 1193 int mask; 1194 int ret; 1195 1196 txmode = &data->dev_conf.txmode; 1197 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1198 hns3_warn(hw, 1199 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1200 "configuration is not supported! Ignore these two " 1201 "parameters: hw_vlan_reject_tagged(%u), " 1202 "hw_vlan_reject_untagged(%u)", 1203 txmode->hw_vlan_reject_tagged, 1204 txmode->hw_vlan_reject_untagged); 1205 1206 /* Apply vlan offload setting */ 1207 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK; 1208 ret = hns3_vlan_offload_set(dev, mask); 1209 if (ret) { 1210 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1211 ret); 1212 return ret; 1213 } 1214 1215 /* 1216 * If pvid config is not set in rte_eth_conf, driver needn't to set 1217 * VLAN pvid related configuration to hardware. 1218 */ 1219 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1220 return 0; 1221 1222 /* Apply pvid setting */ 1223 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1224 txmode->hw_vlan_insert_pvid); 1225 if (ret) 1226 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", 1227 txmode->pvid, ret); 1228 1229 return ret; 1230 } 1231 1232 static int 1233 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1234 unsigned int tso_mss_max) 1235 { 1236 struct hns3_cfg_tso_status_cmd *req; 1237 struct hns3_cmd_desc desc; 1238 uint16_t tso_mss; 1239 1240 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1241 1242 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1243 1244 tso_mss = 0; 1245 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1246 tso_mss_min); 1247 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1248 1249 tso_mss = 0; 1250 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1251 tso_mss_max); 1252 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1253 1254 return hns3_cmd_send(hw, &desc, 1); 1255 } 1256 1257 static int 1258 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1259 uint16_t *allocated_size, bool is_alloc) 1260 { 1261 struct hns3_umv_spc_alc_cmd *req; 1262 struct hns3_cmd_desc desc; 1263 int ret; 1264 1265 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1266 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1267 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1268 req->space_size = rte_cpu_to_le_32(space_size); 1269 1270 ret = hns3_cmd_send(hw, &desc, 1); 1271 if (ret) { 1272 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1273 is_alloc ? "allocate" : "free", ret); 1274 return ret; 1275 } 1276 1277 if (is_alloc && allocated_size) 1278 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1279 1280 return 0; 1281 } 1282 1283 static int 1284 hns3_init_umv_space(struct hns3_hw *hw) 1285 { 1286 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1287 struct hns3_pf *pf = &hns->pf; 1288 uint16_t allocated_size = 0; 1289 int ret; 1290 1291 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1292 true); 1293 if (ret) 1294 return ret; 1295 1296 if (allocated_size < pf->wanted_umv_size) 1297 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1298 pf->wanted_umv_size, allocated_size); 1299 1300 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1301 pf->wanted_umv_size; 1302 pf->used_umv_size = 0; 1303 return 0; 1304 } 1305 1306 static int 1307 hns3_uninit_umv_space(struct hns3_hw *hw) 1308 { 1309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1310 struct hns3_pf *pf = &hns->pf; 1311 int ret; 1312 1313 if (pf->max_umv_size == 0) 1314 return 0; 1315 1316 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1317 if (ret) 1318 return ret; 1319 1320 pf->max_umv_size = 0; 1321 1322 return 0; 1323 } 1324 1325 static bool 1326 hns3_is_umv_space_full(struct hns3_hw *hw) 1327 { 1328 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1329 struct hns3_pf *pf = &hns->pf; 1330 bool is_full; 1331 1332 is_full = (pf->used_umv_size >= pf->max_umv_size); 1333 1334 return is_full; 1335 } 1336 1337 static void 1338 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1339 { 1340 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1341 struct hns3_pf *pf = &hns->pf; 1342 1343 if (is_free) { 1344 if (pf->used_umv_size > 0) 1345 pf->used_umv_size--; 1346 } else 1347 pf->used_umv_size++; 1348 } 1349 1350 static void 1351 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1352 const uint8_t *addr, bool is_mc) 1353 { 1354 const unsigned char *mac_addr = addr; 1355 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1356 ((uint32_t)mac_addr[2] << 16) | 1357 ((uint32_t)mac_addr[1] << 8) | 1358 (uint32_t)mac_addr[0]; 1359 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1360 1361 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1362 if (is_mc) { 1363 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1364 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1365 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1366 } 1367 1368 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1369 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1370 } 1371 1372 static int 1373 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1374 uint8_t resp_code, 1375 enum hns3_mac_vlan_tbl_opcode op) 1376 { 1377 if (cmdq_resp) { 1378 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1379 cmdq_resp); 1380 return -EIO; 1381 } 1382 1383 if (op == HNS3_MAC_VLAN_ADD) { 1384 if (resp_code == 0 || resp_code == 1) { 1385 return 0; 1386 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1387 hns3_err(hw, "add mac addr failed for uc_overflow"); 1388 return -ENOSPC; 1389 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1390 hns3_err(hw, "add mac addr failed for mc_overflow"); 1391 return -ENOSPC; 1392 } 1393 1394 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1395 resp_code); 1396 return -EIO; 1397 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1398 if (resp_code == 0) { 1399 return 0; 1400 } else if (resp_code == 1) { 1401 hns3_dbg(hw, "remove mac addr failed for miss"); 1402 return -ENOENT; 1403 } 1404 1405 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1406 resp_code); 1407 return -EIO; 1408 } else if (op == HNS3_MAC_VLAN_LKUP) { 1409 if (resp_code == 0) { 1410 return 0; 1411 } else if (resp_code == 1) { 1412 hns3_dbg(hw, "lookup mac addr failed for miss"); 1413 return -ENOENT; 1414 } 1415 1416 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1417 resp_code); 1418 return -EIO; 1419 } 1420 1421 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1422 op); 1423 1424 return -EINVAL; 1425 } 1426 1427 static int 1428 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1429 struct hns3_mac_vlan_tbl_entry_cmd *req, 1430 struct hns3_cmd_desc *desc, uint8_t desc_num) 1431 { 1432 uint8_t resp_code; 1433 uint16_t retval; 1434 int ret; 1435 int i; 1436 1437 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { 1438 for (i = 0; i < desc_num - 1; i++) { 1439 hns3_cmd_setup_basic_desc(&desc[i], 1440 HNS3_OPC_MAC_VLAN_ADD, true); 1441 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1442 if (i == 0) 1443 memcpy(desc[i].data, req, 1444 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1445 } 1446 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, 1447 true); 1448 } else { 1449 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, 1450 true); 1451 memcpy(desc[0].data, req, 1452 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1453 } 1454 ret = hns3_cmd_send(hw, desc, desc_num); 1455 if (ret) { 1456 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1457 ret); 1458 return ret; 1459 } 1460 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1461 retval = rte_le_to_cpu_16(desc[0].retval); 1462 1463 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1464 HNS3_MAC_VLAN_LKUP); 1465 } 1466 1467 static int 1468 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1469 struct hns3_mac_vlan_tbl_entry_cmd *req, 1470 struct hns3_cmd_desc *desc, uint8_t desc_num) 1471 { 1472 uint8_t resp_code; 1473 uint16_t retval; 1474 int cfg_status; 1475 int ret; 1476 int i; 1477 1478 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { 1479 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); 1480 memcpy(desc->data, req, 1481 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1482 ret = hns3_cmd_send(hw, desc, desc_num); 1483 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; 1484 retval = rte_le_to_cpu_16(desc->retval); 1485 1486 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1487 HNS3_MAC_VLAN_ADD); 1488 } else { 1489 for (i = 0; i < desc_num; i++) { 1490 hns3_cmd_reuse_desc(&desc[i], false); 1491 if (i == desc_num - 1) 1492 desc[i].flag &= 1493 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1494 else 1495 desc[i].flag |= 1496 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1497 } 1498 memcpy(desc[0].data, req, 1499 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1500 desc[0].retval = 0; 1501 ret = hns3_cmd_send(hw, desc, desc_num); 1502 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1503 retval = rte_le_to_cpu_16(desc[0].retval); 1504 1505 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1506 HNS3_MAC_VLAN_ADD); 1507 } 1508 1509 if (ret) { 1510 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1511 return ret; 1512 } 1513 1514 return cfg_status; 1515 } 1516 1517 static int 1518 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1519 struct hns3_mac_vlan_tbl_entry_cmd *req) 1520 { 1521 struct hns3_cmd_desc desc; 1522 uint8_t resp_code; 1523 uint16_t retval; 1524 int ret; 1525 1526 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1527 1528 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1529 1530 ret = hns3_cmd_send(hw, &desc, 1); 1531 if (ret) { 1532 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1533 return ret; 1534 } 1535 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1536 retval = rte_le_to_cpu_16(desc.retval); 1537 1538 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1539 HNS3_MAC_VLAN_REMOVE); 1540 } 1541 1542 static int 1543 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1544 { 1545 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1546 struct hns3_mac_vlan_tbl_entry_cmd req; 1547 struct hns3_pf *pf = &hns->pf; 1548 struct hns3_cmd_desc desc; 1549 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1550 uint16_t egress_port = 0; 1551 uint8_t vf_id; 1552 int ret; 1553 1554 /* check if mac addr is valid */ 1555 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1556 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1557 mac_addr); 1558 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1559 mac_str); 1560 return -EINVAL; 1561 } 1562 1563 memset(&req, 0, sizeof(req)); 1564 1565 /* 1566 * In current version VF is not supported when PF is driven by DPDK 1567 * driver, just need to configure parameters for PF vport. 1568 */ 1569 vf_id = HNS3_PF_FUNC_ID; 1570 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1571 HNS3_MAC_EPORT_VFID_S, vf_id); 1572 1573 req.egress_port = rte_cpu_to_le_16(egress_port); 1574 1575 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1576 1577 /* 1578 * Lookup the mac address in the mac_vlan table, and add 1579 * it if the entry is inexistent. Repeated unicast entry 1580 * is not allowed in the mac vlan table. 1581 */ 1582 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, 1583 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1584 if (ret == -ENOENT) { 1585 if (!hns3_is_umv_space_full(hw)) { 1586 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, 1587 HNS3_UC_MAC_VLAN_OPS_DESC_NUM); 1588 if (!ret) 1589 hns3_update_umv_space(hw, false); 1590 return ret; 1591 } 1592 1593 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1594 1595 return -ENOSPC; 1596 } 1597 1598 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1599 1600 /* check if we just hit the duplicate */ 1601 if (ret == 0) { 1602 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1603 return 0; 1604 } 1605 1606 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1607 mac_str); 1608 1609 return ret; 1610 } 1611 1612 static int 1613 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1614 { 1615 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1616 struct rte_ether_addr *addr; 1617 int ret; 1618 int i; 1619 1620 for (i = 0; i < hw->mc_addrs_num; i++) { 1621 addr = &hw->mc_addrs[i]; 1622 /* Check if there are duplicate addresses */ 1623 if (rte_is_same_ether_addr(addr, mac_addr)) { 1624 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1625 addr); 1626 hns3_err(hw, "failed to add mc mac addr, same addrs" 1627 "(%s) is added by the set_mc_mac_addr_list " 1628 "API", mac_str); 1629 return -EINVAL; 1630 } 1631 } 1632 1633 ret = hns3_add_mc_addr(hw, mac_addr); 1634 if (ret) { 1635 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1636 mac_addr); 1637 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1638 mac_str, ret); 1639 } 1640 return ret; 1641 } 1642 1643 static int 1644 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1645 { 1646 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1647 int ret; 1648 1649 ret = hns3_remove_mc_addr(hw, mac_addr); 1650 if (ret) { 1651 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1652 mac_addr); 1653 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1654 mac_str, ret); 1655 } 1656 return ret; 1657 } 1658 1659 static int 1660 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1661 __rte_unused uint32_t idx, __rte_unused uint32_t pool) 1662 { 1663 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1664 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1665 int ret; 1666 1667 rte_spinlock_lock(&hw->lock); 1668 1669 /* 1670 * In hns3 network engine adding UC and MC mac address with different 1671 * commands with firmware. We need to determine whether the input 1672 * address is a UC or a MC address to call different commands. 1673 * By the way, it is recommended calling the API function named 1674 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1675 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1676 * may affect the specifications of UC mac addresses. 1677 */ 1678 if (rte_is_multicast_ether_addr(mac_addr)) 1679 ret = hns3_add_mc_addr_common(hw, mac_addr); 1680 else 1681 ret = hns3_add_uc_addr_common(hw, mac_addr); 1682 1683 if (ret) { 1684 rte_spinlock_unlock(&hw->lock); 1685 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1686 mac_addr); 1687 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1688 ret); 1689 return ret; 1690 } 1691 1692 rte_spinlock_unlock(&hw->lock); 1693 1694 return ret; 1695 } 1696 1697 static int 1698 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1699 { 1700 struct hns3_mac_vlan_tbl_entry_cmd req; 1701 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1702 int ret; 1703 1704 /* check if mac addr is valid */ 1705 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1706 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1707 mac_addr); 1708 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1709 mac_str); 1710 return -EINVAL; 1711 } 1712 1713 memset(&req, 0, sizeof(req)); 1714 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1715 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1716 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1717 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1718 return 0; 1719 else if (ret == 0) 1720 hns3_update_umv_space(hw, true); 1721 1722 return ret; 1723 } 1724 1725 static void 1726 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1727 { 1728 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1729 /* index will be checked by upper level rte interface */ 1730 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1731 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1732 int ret; 1733 1734 rte_spinlock_lock(&hw->lock); 1735 1736 if (rte_is_multicast_ether_addr(mac_addr)) 1737 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1738 else 1739 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1740 rte_spinlock_unlock(&hw->lock); 1741 if (ret) { 1742 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1743 mac_addr); 1744 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1745 ret); 1746 } 1747 } 1748 1749 static int 1750 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1751 struct rte_ether_addr *mac_addr) 1752 { 1753 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1754 struct rte_ether_addr *oaddr; 1755 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1756 int ret, ret_val; 1757 1758 rte_spinlock_lock(&hw->lock); 1759 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1760 ret = hns3_remove_uc_addr_common(hw, oaddr); 1761 if (ret) { 1762 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1763 oaddr); 1764 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1765 mac_str, ret); 1766 1767 rte_spinlock_unlock(&hw->lock); 1768 return ret; 1769 } 1770 1771 ret = hns3_add_uc_addr_common(hw, mac_addr); 1772 if (ret) { 1773 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1774 mac_addr); 1775 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1776 goto err_add_uc_addr; 1777 } 1778 1779 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1780 if (ret) { 1781 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1782 goto err_pause_addr_cfg; 1783 } 1784 1785 rte_ether_addr_copy(mac_addr, 1786 (struct rte_ether_addr *)hw->mac.mac_addr); 1787 rte_spinlock_unlock(&hw->lock); 1788 1789 return 0; 1790 1791 err_pause_addr_cfg: 1792 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1793 if (ret_val) { 1794 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1795 mac_addr); 1796 hns3_warn(hw, 1797 "Failed to roll back to del setted mac addr(%s): %d", 1798 mac_str, ret_val); 1799 } 1800 1801 err_add_uc_addr: 1802 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1803 if (ret_val) { 1804 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); 1805 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", 1806 mac_str, ret_val); 1807 } 1808 rte_spinlock_unlock(&hw->lock); 1809 1810 return ret; 1811 } 1812 1813 static int 1814 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1815 { 1816 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1817 struct hns3_hw *hw = &hns->hw; 1818 struct rte_ether_addr *addr; 1819 int err = 0; 1820 int ret; 1821 int i; 1822 1823 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1824 addr = &hw->data->mac_addrs[i]; 1825 if (rte_is_zero_ether_addr(addr)) 1826 continue; 1827 if (rte_is_multicast_ether_addr(addr)) 1828 ret = del ? hns3_remove_mc_addr(hw, addr) : 1829 hns3_add_mc_addr(hw, addr); 1830 else 1831 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1832 hns3_add_uc_addr_common(hw, addr); 1833 1834 if (ret) { 1835 err = ret; 1836 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1837 addr); 1838 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1839 "ret = %d.", del ? "remove" : "restore", 1840 mac_str, i, ret); 1841 } 1842 } 1843 return err; 1844 } 1845 1846 static void 1847 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1848 { 1849 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1850 uint8_t word_num; 1851 uint8_t bit_num; 1852 1853 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1854 word_num = vfid / 32; 1855 bit_num = vfid % 32; 1856 if (clr) 1857 desc[1].data[word_num] &= 1858 rte_cpu_to_le_32(~(1UL << bit_num)); 1859 else 1860 desc[1].data[word_num] |= 1861 rte_cpu_to_le_32(1UL << bit_num); 1862 } else { 1863 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1864 bit_num = vfid % 32; 1865 if (clr) 1866 desc[2].data[word_num] &= 1867 rte_cpu_to_le_32(~(1UL << bit_num)); 1868 else 1869 desc[2].data[word_num] |= 1870 rte_cpu_to_le_32(1UL << bit_num); 1871 } 1872 } 1873 1874 static int 1875 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1876 { 1877 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; 1878 struct hns3_mac_vlan_tbl_entry_cmd req; 1879 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1880 uint8_t vf_id; 1881 int ret; 1882 1883 /* Check if mac addr is valid */ 1884 if (!rte_is_multicast_ether_addr(mac_addr)) { 1885 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1886 mac_addr); 1887 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1888 mac_str); 1889 return -EINVAL; 1890 } 1891 1892 memset(&req, 0, sizeof(req)); 1893 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1894 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1895 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1896 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1897 if (ret) { 1898 /* This mac addr do not exist, add new entry for it */ 1899 memset(desc[0].data, 0, sizeof(desc[0].data)); 1900 memset(desc[1].data, 0, sizeof(desc[0].data)); 1901 memset(desc[2].data, 0, sizeof(desc[0].data)); 1902 } 1903 1904 /* 1905 * In current version VF is not supported when PF is driven by DPDK 1906 * driver, just need to configure parameters for PF vport. 1907 */ 1908 vf_id = HNS3_PF_FUNC_ID; 1909 hns3_update_desc_vfid(desc, vf_id, false); 1910 ret = hns3_add_mac_vlan_tbl(hw, &req, desc, 1911 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1912 if (ret) { 1913 if (ret == -ENOSPC) 1914 hns3_err(hw, "mc mac vlan table is full"); 1915 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1916 mac_addr); 1917 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1918 } 1919 1920 return ret; 1921 } 1922 1923 static int 1924 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1925 { 1926 struct hns3_mac_vlan_tbl_entry_cmd req; 1927 struct hns3_cmd_desc desc[3]; 1928 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1929 uint8_t vf_id; 1930 int ret; 1931 1932 /* Check if mac addr is valid */ 1933 if (!rte_is_multicast_ether_addr(mac_addr)) { 1934 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1935 mac_addr); 1936 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1937 mac_str); 1938 return -EINVAL; 1939 } 1940 1941 memset(&req, 0, sizeof(req)); 1942 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1943 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1944 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, 1945 HNS3_MC_MAC_VLAN_OPS_DESC_NUM); 1946 if (ret == 0) { 1947 /* 1948 * This mac addr exist, remove this handle's VFID for it. 1949 * In current version VF is not supported when PF is driven by 1950 * DPDK driver, just need to configure parameters for PF vport. 1951 */ 1952 vf_id = HNS3_PF_FUNC_ID; 1953 hns3_update_desc_vfid(desc, vf_id, true); 1954 1955 /* All the vfid is zero, so need to delete this entry */ 1956 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1957 } else if (ret == -ENOENT) { 1958 /* This mac addr doesn't exist. */ 1959 return 0; 1960 } 1961 1962 if (ret) { 1963 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1964 mac_addr); 1965 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1966 } 1967 1968 return ret; 1969 } 1970 1971 static int 1972 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1973 struct rte_ether_addr *mc_addr_set, 1974 uint32_t nb_mc_addr) 1975 { 1976 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1977 struct rte_ether_addr *addr; 1978 uint32_t i; 1979 uint32_t j; 1980 1981 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1982 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " 1983 "invalid. valid range: 0~%d", 1984 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1985 return -EINVAL; 1986 } 1987 1988 /* Check if input mac addresses are valid */ 1989 for (i = 0; i < nb_mc_addr; i++) { 1990 addr = &mc_addr_set[i]; 1991 if (!rte_is_multicast_ether_addr(addr)) { 1992 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1993 addr); 1994 hns3_err(hw, 1995 "failed to set mc mac addr, addr(%s) invalid.", 1996 mac_str); 1997 return -EINVAL; 1998 } 1999 2000 /* Check if there are duplicate addresses */ 2001 for (j = i + 1; j < nb_mc_addr; j++) { 2002 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2003 hns3_ether_format_addr(mac_str, 2004 RTE_ETHER_ADDR_FMT_SIZE, 2005 addr); 2006 hns3_err(hw, "failed to set mc mac addr, " 2007 "addrs invalid. two same addrs(%s).", 2008 mac_str); 2009 return -EINVAL; 2010 } 2011 } 2012 2013 /* 2014 * Check if there are duplicate addresses between mac_addrs 2015 * and mc_addr_set 2016 */ 2017 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 2018 if (rte_is_same_ether_addr(addr, 2019 &hw->data->mac_addrs[j])) { 2020 hns3_ether_format_addr(mac_str, 2021 RTE_ETHER_ADDR_FMT_SIZE, 2022 addr); 2023 hns3_err(hw, "failed to set mc mac addr, " 2024 "addrs invalid. addrs(%s) has already " 2025 "configured in mac_addr add API", 2026 mac_str); 2027 return -EINVAL; 2028 } 2029 } 2030 } 2031 2032 return 0; 2033 } 2034 2035 static void 2036 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 2037 struct rte_ether_addr *mc_addr_set, 2038 int mc_addr_num, 2039 struct rte_ether_addr *reserved_addr_list, 2040 int *reserved_addr_num, 2041 struct rte_ether_addr *add_addr_list, 2042 int *add_addr_num, 2043 struct rte_ether_addr *rm_addr_list, 2044 int *rm_addr_num) 2045 { 2046 struct rte_ether_addr *addr; 2047 int current_addr_num; 2048 int reserved_num = 0; 2049 int add_num = 0; 2050 int rm_num = 0; 2051 int num; 2052 int i; 2053 int j; 2054 bool same_addr; 2055 2056 /* Calculate the mc mac address list that should be removed */ 2057 current_addr_num = hw->mc_addrs_num; 2058 for (i = 0; i < current_addr_num; i++) { 2059 addr = &hw->mc_addrs[i]; 2060 same_addr = false; 2061 for (j = 0; j < mc_addr_num; j++) { 2062 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 2063 same_addr = true; 2064 break; 2065 } 2066 } 2067 2068 if (!same_addr) { 2069 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 2070 rm_num++; 2071 } else { 2072 rte_ether_addr_copy(addr, 2073 &reserved_addr_list[reserved_num]); 2074 reserved_num++; 2075 } 2076 } 2077 2078 /* Calculate the mc mac address list that should be added */ 2079 for (i = 0; i < mc_addr_num; i++) { 2080 addr = &mc_addr_set[i]; 2081 same_addr = false; 2082 for (j = 0; j < current_addr_num; j++) { 2083 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2084 same_addr = true; 2085 break; 2086 } 2087 } 2088 2089 if (!same_addr) { 2090 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2091 add_num++; 2092 } 2093 } 2094 2095 /* Reorder the mc mac address list maintained by driver */ 2096 for (i = 0; i < reserved_num; i++) 2097 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2098 2099 for (i = 0; i < rm_num; i++) { 2100 num = reserved_num + i; 2101 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2102 } 2103 2104 *reserved_addr_num = reserved_num; 2105 *add_addr_num = add_num; 2106 *rm_addr_num = rm_num; 2107 } 2108 2109 static int 2110 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2111 struct rte_ether_addr *mc_addr_set, 2112 uint32_t nb_mc_addr) 2113 { 2114 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2115 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2116 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2117 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2118 struct rte_ether_addr *addr; 2119 int reserved_addr_num; 2120 int add_addr_num; 2121 int rm_addr_num; 2122 int mc_addr_num; 2123 int num; 2124 int ret; 2125 int i; 2126 2127 /* Check if input parameters are valid */ 2128 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2129 if (ret) 2130 return ret; 2131 2132 rte_spinlock_lock(&hw->lock); 2133 2134 /* 2135 * Calculate the mc mac address lists those should be removed and be 2136 * added, Reorder the mc mac address list maintained by driver. 2137 */ 2138 mc_addr_num = (int)nb_mc_addr; 2139 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2140 reserved_addr_list, &reserved_addr_num, 2141 add_addr_list, &add_addr_num, 2142 rm_addr_list, &rm_addr_num); 2143 2144 /* Remove mc mac addresses */ 2145 for (i = 0; i < rm_addr_num; i++) { 2146 num = rm_addr_num - i - 1; 2147 addr = &rm_addr_list[num]; 2148 ret = hns3_remove_mc_addr(hw, addr); 2149 if (ret) { 2150 rte_spinlock_unlock(&hw->lock); 2151 return ret; 2152 } 2153 hw->mc_addrs_num--; 2154 } 2155 2156 /* Add mc mac addresses */ 2157 for (i = 0; i < add_addr_num; i++) { 2158 addr = &add_addr_list[i]; 2159 ret = hns3_add_mc_addr(hw, addr); 2160 if (ret) { 2161 rte_spinlock_unlock(&hw->lock); 2162 return ret; 2163 } 2164 2165 num = reserved_addr_num + i; 2166 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2167 hw->mc_addrs_num++; 2168 } 2169 rte_spinlock_unlock(&hw->lock); 2170 2171 return 0; 2172 } 2173 2174 static int 2175 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2176 { 2177 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2178 struct hns3_hw *hw = &hns->hw; 2179 struct rte_ether_addr *addr; 2180 int err = 0; 2181 int ret; 2182 int i; 2183 2184 for (i = 0; i < hw->mc_addrs_num; i++) { 2185 addr = &hw->mc_addrs[i]; 2186 if (!rte_is_multicast_ether_addr(addr)) 2187 continue; 2188 if (del) 2189 ret = hns3_remove_mc_addr(hw, addr); 2190 else 2191 ret = hns3_add_mc_addr(hw, addr); 2192 if (ret) { 2193 err = ret; 2194 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2195 addr); 2196 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2197 del ? "Remove" : "Restore", mac_str, ret); 2198 } 2199 } 2200 return err; 2201 } 2202 2203 static int 2204 hns3_check_mq_mode(struct rte_eth_dev *dev) 2205 { 2206 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2207 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2208 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2209 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2210 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2211 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2212 uint8_t num_tc; 2213 int max_tc = 0; 2214 int i; 2215 2216 if ((rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) || 2217 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB || 2218 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) { 2219 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", 2220 rx_mq_mode, tx_mq_mode); 2221 return -EOPNOTSUPP; 2222 } 2223 2224 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2225 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2226 if (rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2227 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2228 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2229 dcb_rx_conf->nb_tcs, pf->tc_max); 2230 return -EINVAL; 2231 } 2232 2233 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2234 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2235 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, " 2236 "nb_tcs(%d) != %d or %d in rx direction.", 2237 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2238 return -EINVAL; 2239 } 2240 2241 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2242 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2243 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2244 return -EINVAL; 2245 } 2246 2247 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2248 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2249 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " 2250 "is not equal to one in tx direction.", 2251 i, dcb_rx_conf->dcb_tc[i]); 2252 return -EINVAL; 2253 } 2254 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2255 max_tc = dcb_rx_conf->dcb_tc[i]; 2256 } 2257 2258 num_tc = max_tc + 1; 2259 if (num_tc > dcb_rx_conf->nb_tcs) { 2260 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2261 num_tc, dcb_rx_conf->nb_tcs); 2262 return -EINVAL; 2263 } 2264 } 2265 2266 return 0; 2267 } 2268 2269 static int 2270 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, 2271 enum hns3_ring_type queue_type, uint16_t queue_id) 2272 { 2273 struct hns3_cmd_desc desc; 2274 struct hns3_ctrl_vector_chain_cmd *req = 2275 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2276 enum hns3_opcode_type op; 2277 uint16_t tqp_type_and_id = 0; 2278 uint16_t type; 2279 uint16_t gl; 2280 int ret; 2281 2282 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2283 hns3_cmd_setup_basic_desc(&desc, op, false); 2284 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, 2285 HNS3_TQP_INT_ID_L_S); 2286 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, 2287 HNS3_TQP_INT_ID_H_S); 2288 2289 if (queue_type == HNS3_RING_TYPE_RX) 2290 gl = HNS3_RING_GL_RX; 2291 else 2292 gl = HNS3_RING_GL_TX; 2293 2294 type = queue_type; 2295 2296 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2297 type); 2298 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2299 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2300 gl); 2301 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2302 req->int_cause_num = 1; 2303 ret = hns3_cmd_send(hw, &desc, 1); 2304 if (ret) { 2305 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.", 2306 en ? "Map" : "Unmap", queue_id, vector_id, ret); 2307 return ret; 2308 } 2309 2310 return 0; 2311 } 2312 2313 static int 2314 hns3_init_ring_with_vector(struct hns3_hw *hw) 2315 { 2316 uint16_t vec; 2317 int ret; 2318 int i; 2319 2320 /* 2321 * In hns3 network engine, vector 0 is always the misc interrupt of this 2322 * function, vector 1~N can be used respectively for the queues of the 2323 * function. Tx and Rx queues with the same number share the interrupt 2324 * vector. In the initialization clearing the all hardware mapping 2325 * relationship configurations between queues and interrupt vectors is 2326 * needed, so some error caused by the residual configurations, such as 2327 * the unexpected Tx interrupt, can be avoid. 2328 */ 2329 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2330 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2331 vec = vec - 1; /* the last interrupt is reserved */ 2332 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2333 for (i = 0; i < hw->intr_tqps_num; i++) { 2334 /* 2335 * Set gap limiter/rate limiter/quanity limiter algorithm 2336 * configuration for interrupt coalesce of queue's interrupt. 2337 */ 2338 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2339 HNS3_TQP_INTR_GL_DEFAULT); 2340 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2341 HNS3_TQP_INTR_GL_DEFAULT); 2342 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2343 /* 2344 * QL(quantity limiter) is not used currently, just set 0 to 2345 * close it. 2346 */ 2347 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2348 2349 ret = hns3_bind_ring_with_vector(hw, vec, false, 2350 HNS3_RING_TYPE_TX, i); 2351 if (ret) { 2352 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2353 "vector: %u, ret=%d", i, vec, ret); 2354 return ret; 2355 } 2356 2357 ret = hns3_bind_ring_with_vector(hw, vec, false, 2358 HNS3_RING_TYPE_RX, i); 2359 if (ret) { 2360 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2361 "vector: %u, ret=%d", i, vec, ret); 2362 return ret; 2363 } 2364 } 2365 2366 return 0; 2367 } 2368 2369 static int 2370 hns3_setup_dcb(struct rte_eth_dev *dev) 2371 { 2372 struct hns3_adapter *hns = dev->data->dev_private; 2373 struct hns3_hw *hw = &hns->hw; 2374 int ret; 2375 2376 if (!hns3_dev_get_support(hw, DCB)) { 2377 hns3_err(hw, "this port does not support dcb configurations."); 2378 return -EOPNOTSUPP; 2379 } 2380 2381 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2382 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2383 return -EOPNOTSUPP; 2384 } 2385 2386 ret = hns3_dcb_configure(hns); 2387 if (ret) 2388 hns3_err(hw, "failed to config dcb: %d", ret); 2389 2390 return ret; 2391 } 2392 2393 static int 2394 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds) 2395 { 2396 int ret; 2397 2398 /* 2399 * Some hardware doesn't support auto-negotiation, but users may not 2400 * configure link_speeds (default 0), which means auto-negotiation. 2401 * In this case, it should return success. 2402 */ 2403 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG && 2404 hw->mac.support_autoneg == 0) 2405 return 0; 2406 2407 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) { 2408 ret = hns3_check_port_speed(hw, link_speeds); 2409 if (ret) 2410 return ret; 2411 } 2412 2413 return 0; 2414 } 2415 2416 static int 2417 hns3_check_dev_conf(struct rte_eth_dev *dev) 2418 { 2419 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2420 struct rte_eth_conf *conf = &dev->data->dev_conf; 2421 int ret; 2422 2423 ret = hns3_check_mq_mode(dev); 2424 if (ret) 2425 return ret; 2426 2427 return hns3_check_link_speed(hw, conf->link_speeds); 2428 } 2429 2430 static int 2431 hns3_dev_configure(struct rte_eth_dev *dev) 2432 { 2433 struct hns3_adapter *hns = dev->data->dev_private; 2434 struct rte_eth_conf *conf = &dev->data->dev_conf; 2435 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2436 struct hns3_hw *hw = &hns->hw; 2437 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2438 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2439 struct rte_eth_rss_conf rss_conf; 2440 bool gro_en; 2441 int ret; 2442 2443 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2444 2445 /* 2446 * Some versions of hardware network engine does not support 2447 * individually enable/disable/reset the Tx or Rx queue. These devices 2448 * must enable/disable/reset Tx and Rx queues at the same time. When the 2449 * numbers of Tx queues allocated by upper applications are not equal to 2450 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2451 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2452 * work as usual. But these fake queues are imperceptible, and can not 2453 * be used by upper applications. 2454 */ 2455 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2456 if (ret) { 2457 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); 2458 hw->cfg_max_queues = 0; 2459 return ret; 2460 } 2461 2462 hw->adapter_state = HNS3_NIC_CONFIGURING; 2463 ret = hns3_check_dev_conf(dev); 2464 if (ret) 2465 goto cfg_err; 2466 2467 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) { 2468 ret = hns3_setup_dcb(dev); 2469 if (ret) 2470 goto cfg_err; 2471 } 2472 2473 /* When RSS is not configured, redirect the packet queue 0 */ 2474 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2475 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2476 rss_conf = conf->rx_adv_conf.rss_conf; 2477 hw->rss_dis_flag = false; 2478 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2479 if (ret) 2480 goto cfg_err; 2481 } 2482 2483 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu); 2484 if (ret != 0) 2485 goto cfg_err; 2486 2487 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf); 2488 if (ret) 2489 goto cfg_err; 2490 2491 ret = hns3_dev_configure_vlan(dev); 2492 if (ret) 2493 goto cfg_err; 2494 2495 /* config hardware GRO */ 2496 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 2497 ret = hns3_config_gro(hw, gro_en); 2498 if (ret) 2499 goto cfg_err; 2500 2501 hns3_init_rx_ptype_tble(dev); 2502 hw->adapter_state = HNS3_NIC_CONFIGURED; 2503 2504 return 0; 2505 2506 cfg_err: 2507 hw->cfg_max_queues = 0; 2508 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2509 hw->adapter_state = HNS3_NIC_INITIALIZED; 2510 2511 return ret; 2512 } 2513 2514 static int 2515 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2516 { 2517 struct hns3_config_max_frm_size_cmd *req; 2518 struct hns3_cmd_desc desc; 2519 2520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2521 2522 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2523 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2524 req->min_frm_size = RTE_ETHER_MIN_LEN; 2525 2526 return hns3_cmd_send(hw, &desc, 1); 2527 } 2528 2529 static int 2530 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2531 { 2532 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2533 uint16_t original_mps = hns->pf.mps; 2534 int err; 2535 int ret; 2536 2537 ret = hns3_set_mac_mtu(hw, mps); 2538 if (ret) { 2539 hns3_err(hw, "failed to set mtu, ret = %d", ret); 2540 return ret; 2541 } 2542 2543 hns->pf.mps = mps; 2544 ret = hns3_buffer_alloc(hw); 2545 if (ret) { 2546 hns3_err(hw, "failed to allocate buffer, ret = %d", ret); 2547 goto rollback; 2548 } 2549 2550 return 0; 2551 2552 rollback: 2553 err = hns3_set_mac_mtu(hw, original_mps); 2554 if (err) { 2555 hns3_err(hw, "fail to rollback MTU, err = %d", err); 2556 return ret; 2557 } 2558 hns->pf.mps = original_mps; 2559 2560 return ret; 2561 } 2562 2563 static int 2564 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2565 { 2566 struct hns3_adapter *hns = dev->data->dev_private; 2567 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2568 struct hns3_hw *hw = &hns->hw; 2569 int ret; 2570 2571 if (dev->data->dev_started) { 2572 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2573 "before configuration", dev->data->port_id); 2574 return -EBUSY; 2575 } 2576 2577 rte_spinlock_lock(&hw->lock); 2578 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2579 2580 /* 2581 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2582 * assign to "uint16_t" type variable. 2583 */ 2584 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2585 if (ret) { 2586 rte_spinlock_unlock(&hw->lock); 2587 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2588 dev->data->port_id, mtu, ret); 2589 return ret; 2590 } 2591 2592 rte_spinlock_unlock(&hw->lock); 2593 2594 return 0; 2595 } 2596 2597 static uint32_t 2598 hns3_get_copper_port_speed_capa(uint32_t supported_speed) 2599 { 2600 uint32_t speed_capa = 0; 2601 2602 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT) 2603 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD; 2604 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT) 2605 speed_capa |= RTE_ETH_LINK_SPEED_10M; 2606 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT) 2607 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 2608 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT) 2609 speed_capa |= RTE_ETH_LINK_SPEED_100M; 2610 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT) 2611 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2612 2613 return speed_capa; 2614 } 2615 2616 static uint32_t 2617 hns3_get_firber_port_speed_capa(uint32_t supported_speed) 2618 { 2619 uint32_t speed_capa = 0; 2620 2621 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT) 2622 speed_capa |= RTE_ETH_LINK_SPEED_1G; 2623 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT) 2624 speed_capa |= RTE_ETH_LINK_SPEED_10G; 2625 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT) 2626 speed_capa |= RTE_ETH_LINK_SPEED_25G; 2627 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT) 2628 speed_capa |= RTE_ETH_LINK_SPEED_40G; 2629 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT) 2630 speed_capa |= RTE_ETH_LINK_SPEED_50G; 2631 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT) 2632 speed_capa |= RTE_ETH_LINK_SPEED_100G; 2633 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT) 2634 speed_capa |= RTE_ETH_LINK_SPEED_200G; 2635 2636 return speed_capa; 2637 } 2638 2639 static uint32_t 2640 hns3_get_speed_capa(struct hns3_hw *hw) 2641 { 2642 struct hns3_mac *mac = &hw->mac; 2643 uint32_t speed_capa; 2644 2645 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 2646 speed_capa = 2647 hns3_get_copper_port_speed_capa(mac->supported_speed); 2648 else 2649 speed_capa = 2650 hns3_get_firber_port_speed_capa(mac->supported_speed); 2651 2652 if (mac->support_autoneg == 0) 2653 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 2654 2655 return speed_capa; 2656 } 2657 2658 int 2659 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2660 { 2661 struct hns3_adapter *hns = eth_dev->data->dev_private; 2662 struct hns3_hw *hw = &hns->hw; 2663 uint16_t queue_num = hw->tqps_num; 2664 2665 /* 2666 * In interrupt mode, 'max_rx_queues' is set based on the number of 2667 * MSI-X interrupt resources of the hardware. 2668 */ 2669 if (hw->data->dev_conf.intr_conf.rxq == 1) 2670 queue_num = hw->intr_tqps_num; 2671 2672 info->max_rx_queues = queue_num; 2673 info->max_tx_queues = hw->tqps_num; 2674 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2675 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2676 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2677 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2678 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2679 info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 2680 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 2681 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 2682 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | 2683 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2684 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 2685 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 2686 RTE_ETH_RX_OFFLOAD_SCATTER | 2687 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 2688 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 2689 RTE_ETH_RX_OFFLOAD_RSS_HASH | 2690 RTE_ETH_RX_OFFLOAD_TCP_LRO); 2691 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2692 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 2693 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 2694 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 2695 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 2696 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 2697 RTE_ETH_TX_OFFLOAD_TCP_TSO | 2698 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 2699 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | 2700 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | 2701 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 2702 hns3_txvlan_cap_get(hw)); 2703 2704 if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) 2705 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; 2706 2707 if (hns3_dev_get_support(hw, INDEP_TXRX)) 2708 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2709 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2710 2711 if (hns3_dev_get_support(hw, PTP)) 2712 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 2713 2714 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2715 .nb_max = HNS3_MAX_RING_DESC, 2716 .nb_min = HNS3_MIN_RING_DESC, 2717 .nb_align = HNS3_ALIGN_RING_DESC, 2718 }; 2719 2720 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2721 .nb_max = HNS3_MAX_RING_DESC, 2722 .nb_min = HNS3_MIN_RING_DESC, 2723 .nb_align = HNS3_ALIGN_RING_DESC, 2724 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2725 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2726 }; 2727 2728 info->speed_capa = hns3_get_speed_capa(hw); 2729 info->default_rxconf = (struct rte_eth_rxconf) { 2730 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2731 /* 2732 * If there are no available Rx buffer descriptors, incoming 2733 * packets are always dropped by hardware based on hns3 network 2734 * engine. 2735 */ 2736 .rx_drop_en = 1, 2737 .offloads = 0, 2738 }; 2739 info->default_txconf = (struct rte_eth_txconf) { 2740 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2741 .offloads = 0, 2742 }; 2743 2744 info->reta_size = hw->rss_ind_tbl_size; 2745 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2746 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2747 2748 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2749 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2750 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2751 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2752 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2753 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2754 2755 return 0; 2756 } 2757 2758 static int 2759 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2760 size_t fw_size) 2761 { 2762 struct hns3_adapter *hns = eth_dev->data->dev_private; 2763 struct hns3_hw *hw = &hns->hw; 2764 uint32_t version = hw->fw_version; 2765 int ret; 2766 2767 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2768 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2769 HNS3_FW_VERSION_BYTE3_S), 2770 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2771 HNS3_FW_VERSION_BYTE2_S), 2772 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2773 HNS3_FW_VERSION_BYTE1_S), 2774 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2775 HNS3_FW_VERSION_BYTE0_S)); 2776 if (ret < 0) 2777 return -EINVAL; 2778 2779 ret += 1; /* add the size of '\0' */ 2780 if (fw_size < (size_t)ret) 2781 return ret; 2782 else 2783 return 0; 2784 } 2785 2786 static int 2787 hns3_update_port_link_info(struct rte_eth_dev *eth_dev) 2788 { 2789 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2790 int ret; 2791 2792 (void)hns3_update_link_status(hw); 2793 2794 ret = hns3_update_link_info(eth_dev); 2795 if (ret) 2796 hw->mac.link_status = RTE_ETH_LINK_DOWN; 2797 2798 return ret; 2799 } 2800 2801 static void 2802 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev, 2803 struct rte_eth_link *new_link) 2804 { 2805 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2806 struct hns3_mac *mac = &hw->mac; 2807 2808 switch (mac->link_speed) { 2809 case RTE_ETH_SPEED_NUM_10M: 2810 case RTE_ETH_SPEED_NUM_100M: 2811 case RTE_ETH_SPEED_NUM_1G: 2812 case RTE_ETH_SPEED_NUM_10G: 2813 case RTE_ETH_SPEED_NUM_25G: 2814 case RTE_ETH_SPEED_NUM_40G: 2815 case RTE_ETH_SPEED_NUM_50G: 2816 case RTE_ETH_SPEED_NUM_100G: 2817 case RTE_ETH_SPEED_NUM_200G: 2818 if (mac->link_status) 2819 new_link->link_speed = mac->link_speed; 2820 break; 2821 default: 2822 if (mac->link_status) 2823 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 2824 break; 2825 } 2826 2827 if (!mac->link_status) 2828 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE; 2829 2830 new_link->link_duplex = mac->link_duplex; 2831 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 2832 new_link->link_autoneg = mac->link_autoneg; 2833 } 2834 2835 static int 2836 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) 2837 { 2838 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */ 2839 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */ 2840 2841 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 2842 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES; 2843 struct hns3_mac *mac = &hw->mac; 2844 struct rte_eth_link new_link; 2845 int ret; 2846 2847 /* When port is stopped, report link down. */ 2848 if (eth_dev->data->dev_started == 0) { 2849 new_link.link_autoneg = mac->link_autoneg; 2850 new_link.link_duplex = mac->link_duplex; 2851 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2852 new_link.link_status = RTE_ETH_LINK_DOWN; 2853 goto out; 2854 } 2855 2856 do { 2857 ret = hns3_update_port_link_info(eth_dev); 2858 if (ret) { 2859 hns3_err(hw, "failed to get port link info, ret = %d.", 2860 ret); 2861 break; 2862 } 2863 2864 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP) 2865 break; 2866 2867 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); 2868 } while (retry_cnt--); 2869 2870 memset(&new_link, 0, sizeof(new_link)); 2871 hns3_setup_linkstatus(eth_dev, &new_link); 2872 2873 out: 2874 return rte_eth_linkstatus_set(eth_dev, &new_link); 2875 } 2876 2877 static int 2878 hns3_dev_set_link_up(struct rte_eth_dev *dev) 2879 { 2880 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2881 int ret; 2882 2883 /* 2884 * The "tx_pkt_burst" will be restored. But the secondary process does 2885 * not support the mechanism for notifying the primary process. 2886 */ 2887 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2888 hns3_err(hw, "secondary process does not support to set link up."); 2889 return -ENOTSUP; 2890 } 2891 2892 /* 2893 * If device isn't started Rx/Tx function is still disabled, setting 2894 * link up is not allowed. But it is probably better to return success 2895 * to reduce the impact on the upper layer. 2896 */ 2897 if (hw->adapter_state != HNS3_NIC_STARTED) { 2898 hns3_info(hw, "device isn't started, can't set link up."); 2899 return 0; 2900 } 2901 2902 if (!hw->set_link_down) 2903 return 0; 2904 2905 rte_spinlock_lock(&hw->lock); 2906 ret = hns3_cfg_mac_mode(hw, true); 2907 if (ret) { 2908 rte_spinlock_unlock(&hw->lock); 2909 hns3_err(hw, "failed to set link up, ret = %d", ret); 2910 return ret; 2911 } 2912 2913 hw->set_link_down = false; 2914 hns3_start_tx_datapath(dev); 2915 rte_spinlock_unlock(&hw->lock); 2916 2917 return 0; 2918 } 2919 2920 static int 2921 hns3_dev_set_link_down(struct rte_eth_dev *dev) 2922 { 2923 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2924 int ret; 2925 2926 /* 2927 * The "tx_pkt_burst" will be set to dummy function. But the secondary 2928 * process does not support the mechanism for notifying the primary 2929 * process. 2930 */ 2931 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2932 hns3_err(hw, "secondary process does not support to set link down."); 2933 return -ENOTSUP; 2934 } 2935 2936 /* 2937 * If device isn't started or the API has been called, link status is 2938 * down, return success. 2939 */ 2940 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) 2941 return 0; 2942 2943 rte_spinlock_lock(&hw->lock); 2944 hns3_stop_tx_datapath(dev); 2945 ret = hns3_cfg_mac_mode(hw, false); 2946 if (ret) { 2947 hns3_start_tx_datapath(dev); 2948 rte_spinlock_unlock(&hw->lock); 2949 hns3_err(hw, "failed to set link down, ret = %d", ret); 2950 return ret; 2951 } 2952 2953 hw->set_link_down = true; 2954 rte_spinlock_unlock(&hw->lock); 2955 2956 return 0; 2957 } 2958 2959 static int 2960 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2961 { 2962 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2963 struct hns3_pf *pf = &hns->pf; 2964 2965 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2966 return -EINVAL; 2967 2968 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2969 2970 return 0; 2971 } 2972 2973 static int 2974 hns3_query_function_status(struct hns3_hw *hw) 2975 { 2976 #define HNS3_QUERY_MAX_CNT 10 2977 #define HNS3_QUERY_SLEEP_MSCOEND 1 2978 struct hns3_func_status_cmd *req; 2979 struct hns3_cmd_desc desc; 2980 int timeout = 0; 2981 int ret; 2982 2983 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2984 req = (struct hns3_func_status_cmd *)desc.data; 2985 2986 do { 2987 ret = hns3_cmd_send(hw, &desc, 1); 2988 if (ret) { 2989 PMD_INIT_LOG(ERR, "query function status failed %d", 2990 ret); 2991 return ret; 2992 } 2993 2994 /* Check pf reset is done */ 2995 if (req->pf_state) 2996 break; 2997 2998 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2999 } while (timeout++ < HNS3_QUERY_MAX_CNT); 3000 3001 return hns3_parse_func_status(hw, req); 3002 } 3003 3004 static int 3005 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 3006 { 3007 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3008 struct hns3_pf *pf = &hns->pf; 3009 3010 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 3011 /* 3012 * The total_tqps_num obtained from firmware is maximum tqp 3013 * numbers of this port, which should be used for PF and VFs. 3014 * There is no need for pf to have so many tqp numbers in 3015 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3016 * coming from config file, is assigned to maximum queue number 3017 * for the PF of this port by user. So users can modify the 3018 * maximum queue number of PF according to their own application 3019 * scenarios, which is more flexible to use. In addition, many 3020 * memories can be saved due to allocating queue statistics 3021 * room according to the actual number of queues required. The 3022 * maximum queue number of PF for network engine with 3023 * revision_id greater than 0x30 is assigned by config file. 3024 */ 3025 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 3026 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 3027 "must be greater than 0.", 3028 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 3029 return -EINVAL; 3030 } 3031 3032 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 3033 hw->total_tqps_num); 3034 } else { 3035 /* 3036 * Due to the limitation on the number of PF interrupts 3037 * available, the maximum queue number assigned to PF on 3038 * the network engine with revision_id 0x21 is 64. 3039 */ 3040 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 3041 HNS3_MAX_TQP_NUM_HIP08_PF); 3042 } 3043 3044 return 0; 3045 } 3046 3047 static int 3048 hns3_query_pf_resource(struct hns3_hw *hw) 3049 { 3050 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3051 struct hns3_pf *pf = &hns->pf; 3052 struct hns3_pf_res_cmd *req; 3053 struct hns3_cmd_desc desc; 3054 int ret; 3055 3056 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 3057 ret = hns3_cmd_send(hw, &desc, 1); 3058 if (ret) { 3059 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 3060 return ret; 3061 } 3062 3063 req = (struct hns3_pf_res_cmd *)desc.data; 3064 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 3065 rte_le_to_cpu_16(req->ext_tqp_num); 3066 ret = hns3_get_pf_max_tqp_num(hw); 3067 if (ret) 3068 return ret; 3069 3070 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 3071 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 3072 3073 if (req->tx_buf_size) 3074 pf->tx_buf_size = 3075 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 3076 else 3077 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 3078 3079 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 3080 3081 if (req->dv_buf_size) 3082 pf->dv_buf_size = 3083 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 3084 else 3085 pf->dv_buf_size = HNS3_DEFAULT_DV; 3086 3087 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 3088 3089 hw->num_msi = 3090 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 3091 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 3092 3093 return 0; 3094 } 3095 3096 static void 3097 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 3098 { 3099 struct hns3_cfg_param_cmd *req; 3100 uint64_t mac_addr_tmp_high; 3101 uint8_t ext_rss_size_max; 3102 uint64_t mac_addr_tmp; 3103 uint32_t i; 3104 3105 req = (struct hns3_cfg_param_cmd *)desc[0].data; 3106 3107 /* get the configuration */ 3108 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3109 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 3110 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 3111 HNS3_CFG_TQP_DESC_N_M, 3112 HNS3_CFG_TQP_DESC_N_S); 3113 3114 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3115 HNS3_CFG_PHY_ADDR_M, 3116 HNS3_CFG_PHY_ADDR_S); 3117 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3118 HNS3_CFG_MEDIA_TP_M, 3119 HNS3_CFG_MEDIA_TP_S); 3120 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3121 HNS3_CFG_RX_BUF_LEN_M, 3122 HNS3_CFG_RX_BUF_LEN_S); 3123 /* get mac address */ 3124 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 3125 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3126 HNS3_CFG_MAC_ADDR_H_M, 3127 HNS3_CFG_MAC_ADDR_H_S); 3128 3129 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 3130 3131 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3132 HNS3_CFG_DEFAULT_SPEED_M, 3133 HNS3_CFG_DEFAULT_SPEED_S); 3134 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 3135 HNS3_CFG_RSS_SIZE_M, 3136 HNS3_CFG_RSS_SIZE_S); 3137 3138 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 3139 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 3140 3141 req = (struct hns3_cfg_param_cmd *)desc[1].data; 3142 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 3143 3144 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3145 HNS3_CFG_SPEED_ABILITY_M, 3146 HNS3_CFG_SPEED_ABILITY_S); 3147 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 3148 HNS3_CFG_UMV_TBL_SPACE_M, 3149 HNS3_CFG_UMV_TBL_SPACE_S); 3150 if (!cfg->umv_space) 3151 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 3152 3153 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 3154 HNS3_CFG_EXT_RSS_SIZE_M, 3155 HNS3_CFG_EXT_RSS_SIZE_S); 3156 /* 3157 * Field ext_rss_size_max obtained from firmware will be more flexible 3158 * for future changes and expansions, which is an exponent of 2, instead 3159 * of reading out directly. If this field is not zero, hns3 PF PMD 3160 * driver uses it as rss_size_max under one TC. Device, whose revision 3161 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 3162 * maximum number of queues supported under a TC through this field. 3163 */ 3164 if (ext_rss_size_max) 3165 cfg->rss_size_max = 1U << ext_rss_size_max; 3166 } 3167 3168 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 3169 * @hw: pointer to struct hns3_hw 3170 * @hcfg: the config structure to be getted 3171 */ 3172 static int 3173 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 3174 { 3175 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 3176 struct hns3_cfg_param_cmd *req; 3177 uint32_t offset; 3178 uint32_t i; 3179 int ret; 3180 3181 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 3182 offset = 0; 3183 req = (struct hns3_cfg_param_cmd *)desc[i].data; 3184 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 3185 true); 3186 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 3187 i * HNS3_CFG_RD_LEN_BYTES); 3188 /* Len should be divided by 4 when send to hardware */ 3189 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 3190 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 3191 req->offset = rte_cpu_to_le_32(offset); 3192 } 3193 3194 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 3195 if (ret) { 3196 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 3197 return ret; 3198 } 3199 3200 hns3_parse_cfg(hcfg, desc); 3201 3202 return 0; 3203 } 3204 3205 static int 3206 hns3_parse_speed(int speed_cmd, uint32_t *speed) 3207 { 3208 switch (speed_cmd) { 3209 case HNS3_CFG_SPEED_10M: 3210 *speed = RTE_ETH_SPEED_NUM_10M; 3211 break; 3212 case HNS3_CFG_SPEED_100M: 3213 *speed = RTE_ETH_SPEED_NUM_100M; 3214 break; 3215 case HNS3_CFG_SPEED_1G: 3216 *speed = RTE_ETH_SPEED_NUM_1G; 3217 break; 3218 case HNS3_CFG_SPEED_10G: 3219 *speed = RTE_ETH_SPEED_NUM_10G; 3220 break; 3221 case HNS3_CFG_SPEED_25G: 3222 *speed = RTE_ETH_SPEED_NUM_25G; 3223 break; 3224 case HNS3_CFG_SPEED_40G: 3225 *speed = RTE_ETH_SPEED_NUM_40G; 3226 break; 3227 case HNS3_CFG_SPEED_50G: 3228 *speed = RTE_ETH_SPEED_NUM_50G; 3229 break; 3230 case HNS3_CFG_SPEED_100G: 3231 *speed = RTE_ETH_SPEED_NUM_100G; 3232 break; 3233 case HNS3_CFG_SPEED_200G: 3234 *speed = RTE_ETH_SPEED_NUM_200G; 3235 break; 3236 default: 3237 return -EINVAL; 3238 } 3239 3240 return 0; 3241 } 3242 3243 static void 3244 hns3_set_default_dev_specifications(struct hns3_hw *hw) 3245 { 3246 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 3247 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 3248 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 3249 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 3250 hw->intr.int_ql_max = HNS3_INTR_QL_NONE; 3251 } 3252 3253 static void 3254 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 3255 { 3256 struct hns3_dev_specs_0_cmd *req0; 3257 3258 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 3259 3260 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 3261 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 3262 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 3263 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 3264 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); 3265 } 3266 3267 static int 3268 hns3_check_dev_specifications(struct hns3_hw *hw) 3269 { 3270 if (hw->rss_ind_tbl_size == 0 || 3271 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { 3272 hns3_err(hw, "the size of hash lookup table configured (%u)" 3273 " exceeds the maximum(%u)", hw->rss_ind_tbl_size, 3274 HNS3_RSS_IND_TBL_SIZE_MAX); 3275 return -EINVAL; 3276 } 3277 3278 return 0; 3279 } 3280 3281 static int 3282 hns3_query_dev_specifications(struct hns3_hw *hw) 3283 { 3284 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 3285 int ret; 3286 int i; 3287 3288 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3289 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 3290 true); 3291 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3292 } 3293 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 3294 3295 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 3296 if (ret) 3297 return ret; 3298 3299 hns3_parse_dev_specifications(hw, desc); 3300 3301 return hns3_check_dev_specifications(hw); 3302 } 3303 3304 static int 3305 hns3_get_capability(struct hns3_hw *hw) 3306 { 3307 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3308 struct rte_pci_device *pci_dev; 3309 struct hns3_pf *pf = &hns->pf; 3310 struct rte_eth_dev *eth_dev; 3311 uint16_t device_id; 3312 uint8_t revision; 3313 int ret; 3314 3315 eth_dev = &rte_eth_devices[hw->data->port_id]; 3316 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3317 device_id = pci_dev->id.device_id; 3318 3319 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3320 device_id == HNS3_DEV_ID_50GE_RDMA || 3321 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3322 device_id == HNS3_DEV_ID_200G_RDMA) 3323 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3324 3325 /* Get PCI revision id */ 3326 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3327 HNS3_PCI_REVISION_ID); 3328 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3329 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3330 ret); 3331 return -EIO; 3332 } 3333 hw->revision = revision; 3334 3335 if (revision < PCI_REVISION_ID_HIP09_A) { 3336 hns3_set_default_dev_specifications(hw); 3337 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3338 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3339 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3340 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3341 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; 3342 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3343 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3344 hw->rss_info.ipv6_sctp_offload_supported = false; 3345 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; 3346 pf->support_multi_tc_pause = false; 3347 return 0; 3348 } 3349 3350 ret = hns3_query_dev_specifications(hw); 3351 if (ret) { 3352 PMD_INIT_LOG(ERR, 3353 "failed to query dev specifications, ret = %d", 3354 ret); 3355 return ret; 3356 } 3357 3358 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3359 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3360 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3361 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3362 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; 3363 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3364 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3365 hw->rss_info.ipv6_sctp_offload_supported = true; 3366 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; 3367 pf->support_multi_tc_pause = true; 3368 3369 return 0; 3370 } 3371 3372 static int 3373 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) 3374 { 3375 int ret; 3376 3377 switch (media_type) { 3378 case HNS3_MEDIA_TYPE_COPPER: 3379 if (!hns3_dev_get_support(hw, COPPER)) { 3380 PMD_INIT_LOG(ERR, 3381 "Media type is copper, not supported."); 3382 ret = -EOPNOTSUPP; 3383 } else { 3384 ret = 0; 3385 } 3386 break; 3387 case HNS3_MEDIA_TYPE_FIBER: 3388 ret = 0; 3389 break; 3390 case HNS3_MEDIA_TYPE_BACKPLANE: 3391 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); 3392 ret = -EOPNOTSUPP; 3393 break; 3394 default: 3395 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); 3396 ret = -EINVAL; 3397 break; 3398 } 3399 3400 return ret; 3401 } 3402 3403 static int 3404 hns3_get_board_configuration(struct hns3_hw *hw) 3405 { 3406 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3407 struct hns3_pf *pf = &hns->pf; 3408 struct hns3_cfg cfg; 3409 int ret; 3410 3411 ret = hns3_get_board_cfg(hw, &cfg); 3412 if (ret) { 3413 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3414 return ret; 3415 } 3416 3417 ret = hns3_check_media_type(hw, cfg.media_type); 3418 if (ret) 3419 return ret; 3420 3421 hw->mac.media_type = cfg.media_type; 3422 hw->rss_size_max = cfg.rss_size_max; 3423 hw->rss_dis_flag = false; 3424 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3425 hw->mac.phy_addr = cfg.phy_addr; 3426 hw->num_tx_desc = cfg.tqp_desc_num; 3427 hw->num_rx_desc = cfg.tqp_desc_num; 3428 hw->dcb_info.num_pg = 1; 3429 hw->dcb_info.hw_pfc_map = 0; 3430 3431 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3432 if (ret) { 3433 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", 3434 cfg.default_speed, ret); 3435 return ret; 3436 } 3437 3438 pf->tc_max = cfg.tc_num; 3439 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3440 PMD_INIT_LOG(WARNING, 3441 "Get TC num(%u) from flash, set TC num to 1", 3442 pf->tc_max); 3443 pf->tc_max = 1; 3444 } 3445 3446 /* Dev does not support DCB */ 3447 if (!hns3_dev_get_support(hw, DCB)) { 3448 pf->tc_max = 1; 3449 pf->pfc_max = 0; 3450 } else 3451 pf->pfc_max = pf->tc_max; 3452 3453 hw->dcb_info.num_tc = 1; 3454 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3455 hw->tqps_num / hw->dcb_info.num_tc); 3456 hns3_set_bit(hw->hw_tc_map, 0, 1); 3457 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3458 3459 pf->wanted_umv_size = cfg.umv_space; 3460 3461 return ret; 3462 } 3463 3464 static int 3465 hns3_get_configuration(struct hns3_hw *hw) 3466 { 3467 int ret; 3468 3469 ret = hns3_query_function_status(hw); 3470 if (ret) { 3471 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3472 return ret; 3473 } 3474 3475 /* Get device capability */ 3476 ret = hns3_get_capability(hw); 3477 if (ret) { 3478 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3479 return ret; 3480 } 3481 3482 /* Get pf resource */ 3483 ret = hns3_query_pf_resource(hw); 3484 if (ret) { 3485 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3486 return ret; 3487 } 3488 3489 ret = hns3_get_board_configuration(hw); 3490 if (ret) { 3491 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3492 return ret; 3493 } 3494 3495 ret = hns3_query_dev_fec_info(hw); 3496 if (ret) 3497 PMD_INIT_LOG(ERR, 3498 "failed to query FEC information, ret = %d", ret); 3499 3500 return ret; 3501 } 3502 3503 static int 3504 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3505 uint16_t tqp_vid, bool is_pf) 3506 { 3507 struct hns3_tqp_map_cmd *req; 3508 struct hns3_cmd_desc desc; 3509 int ret; 3510 3511 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3512 3513 req = (struct hns3_tqp_map_cmd *)desc.data; 3514 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3515 req->tqp_vf = func_id; 3516 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3517 if (!is_pf) 3518 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3519 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3520 3521 ret = hns3_cmd_send(hw, &desc, 1); 3522 if (ret) 3523 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3524 3525 return ret; 3526 } 3527 3528 static int 3529 hns3_map_tqp(struct hns3_hw *hw) 3530 { 3531 int ret; 3532 int i; 3533 3534 /* 3535 * In current version, VF is not supported when PF is driven by DPDK 3536 * driver, so we assign total tqps_num tqps allocated to this port 3537 * to PF. 3538 */ 3539 for (i = 0; i < hw->total_tqps_num; i++) { 3540 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3541 if (ret) 3542 return ret; 3543 } 3544 3545 return 0; 3546 } 3547 3548 static int 3549 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3550 { 3551 struct hns3_config_mac_speed_dup_cmd *req; 3552 struct hns3_cmd_desc desc; 3553 int ret; 3554 3555 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3556 3557 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3558 3559 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3560 3561 switch (speed) { 3562 case RTE_ETH_SPEED_NUM_10M: 3563 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3564 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3565 break; 3566 case RTE_ETH_SPEED_NUM_100M: 3567 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3568 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3569 break; 3570 case RTE_ETH_SPEED_NUM_1G: 3571 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3572 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3573 break; 3574 case RTE_ETH_SPEED_NUM_10G: 3575 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3576 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3577 break; 3578 case RTE_ETH_SPEED_NUM_25G: 3579 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3580 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3581 break; 3582 case RTE_ETH_SPEED_NUM_40G: 3583 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3584 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3585 break; 3586 case RTE_ETH_SPEED_NUM_50G: 3587 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3588 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3589 break; 3590 case RTE_ETH_SPEED_NUM_100G: 3591 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3592 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3593 break; 3594 case RTE_ETH_SPEED_NUM_200G: 3595 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3596 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3597 break; 3598 default: 3599 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3600 return -EINVAL; 3601 } 3602 3603 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3604 3605 ret = hns3_cmd_send(hw, &desc, 1); 3606 if (ret) 3607 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3608 3609 return ret; 3610 } 3611 3612 static int 3613 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3614 { 3615 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3616 struct hns3_pf *pf = &hns->pf; 3617 struct hns3_priv_buf *priv; 3618 uint32_t i, total_size; 3619 3620 total_size = pf->pkt_buf_size; 3621 3622 /* alloc tx buffer for all enabled tc */ 3623 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3624 priv = &buf_alloc->priv_buf[i]; 3625 3626 if (hw->hw_tc_map & BIT(i)) { 3627 if (total_size < pf->tx_buf_size) 3628 return -ENOMEM; 3629 3630 priv->tx_buf_size = pf->tx_buf_size; 3631 } else 3632 priv->tx_buf_size = 0; 3633 3634 total_size -= priv->tx_buf_size; 3635 } 3636 3637 return 0; 3638 } 3639 3640 static int 3641 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3642 { 3643 /* TX buffer size is unit by 128 byte */ 3644 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3645 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3646 struct hns3_tx_buff_alloc_cmd *req; 3647 struct hns3_cmd_desc desc; 3648 uint32_t buf_size; 3649 uint32_t i; 3650 int ret; 3651 3652 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3653 3654 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3655 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3656 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3657 3658 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3659 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3660 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3661 } 3662 3663 ret = hns3_cmd_send(hw, &desc, 1); 3664 if (ret) 3665 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3666 3667 return ret; 3668 } 3669 3670 static int 3671 hns3_get_tc_num(struct hns3_hw *hw) 3672 { 3673 int cnt = 0; 3674 uint8_t i; 3675 3676 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3677 if (hw->hw_tc_map & BIT(i)) 3678 cnt++; 3679 return cnt; 3680 } 3681 3682 static uint32_t 3683 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3684 { 3685 struct hns3_priv_buf *priv; 3686 uint32_t rx_priv = 0; 3687 int i; 3688 3689 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3690 priv = &buf_alloc->priv_buf[i]; 3691 if (priv->enable) 3692 rx_priv += priv->buf_size; 3693 } 3694 return rx_priv; 3695 } 3696 3697 static uint32_t 3698 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3699 { 3700 uint32_t total_tx_size = 0; 3701 uint32_t i; 3702 3703 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3704 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3705 3706 return total_tx_size; 3707 } 3708 3709 /* Get the number of pfc enabled TCs, which have private buffer */ 3710 static int 3711 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3712 { 3713 struct hns3_priv_buf *priv; 3714 int cnt = 0; 3715 uint8_t i; 3716 3717 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3718 priv = &buf_alloc->priv_buf[i]; 3719 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3720 cnt++; 3721 } 3722 3723 return cnt; 3724 } 3725 3726 /* Get the number of pfc disabled TCs, which have private buffer */ 3727 static int 3728 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3729 struct hns3_pkt_buf_alloc *buf_alloc) 3730 { 3731 struct hns3_priv_buf *priv; 3732 int cnt = 0; 3733 uint8_t i; 3734 3735 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3736 priv = &buf_alloc->priv_buf[i]; 3737 if (hw->hw_tc_map & BIT(i) && 3738 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3739 cnt++; 3740 } 3741 3742 return cnt; 3743 } 3744 3745 static bool 3746 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3747 uint32_t rx_all) 3748 { 3749 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3750 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3751 struct hns3_pf *pf = &hns->pf; 3752 uint32_t shared_buf, aligned_mps; 3753 uint32_t rx_priv; 3754 uint8_t tc_num; 3755 uint8_t i; 3756 3757 tc_num = hns3_get_tc_num(hw); 3758 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3759 3760 if (hns3_dev_get_support(hw, DCB)) 3761 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3762 pf->dv_buf_size; 3763 else 3764 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3765 + pf->dv_buf_size; 3766 3767 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3768 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3769 HNS3_BUF_SIZE_UNIT); 3770 3771 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3772 if (rx_all < rx_priv + shared_std) 3773 return false; 3774 3775 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3776 buf_alloc->s_buf.buf_size = shared_buf; 3777 if (hns3_dev_get_support(hw, DCB)) { 3778 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3779 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3780 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3781 HNS3_BUF_SIZE_UNIT); 3782 } else { 3783 buf_alloc->s_buf.self.high = 3784 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3785 buf_alloc->s_buf.self.low = aligned_mps; 3786 } 3787 3788 if (hns3_dev_get_support(hw, DCB)) { 3789 hi_thrd = shared_buf - pf->dv_buf_size; 3790 3791 if (tc_num <= NEED_RESERVE_TC_NUM) 3792 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT / 3793 BUF_MAX_PERCENT; 3794 3795 if (tc_num) 3796 hi_thrd = hi_thrd / tc_num; 3797 3798 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3799 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3800 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3801 } else { 3802 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3803 lo_thrd = aligned_mps; 3804 } 3805 3806 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3807 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3808 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3809 } 3810 3811 return true; 3812 } 3813 3814 static bool 3815 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3816 struct hns3_pkt_buf_alloc *buf_alloc) 3817 { 3818 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3819 struct hns3_pf *pf = &hns->pf; 3820 struct hns3_priv_buf *priv; 3821 uint32_t aligned_mps; 3822 uint32_t rx_all; 3823 uint8_t i; 3824 3825 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3826 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3827 3828 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3829 priv = &buf_alloc->priv_buf[i]; 3830 3831 priv->enable = 0; 3832 priv->wl.low = 0; 3833 priv->wl.high = 0; 3834 priv->buf_size = 0; 3835 3836 if (!(hw->hw_tc_map & BIT(i))) 3837 continue; 3838 3839 priv->enable = 1; 3840 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3841 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3842 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3843 HNS3_BUF_SIZE_UNIT); 3844 } else { 3845 priv->wl.low = 0; 3846 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3847 aligned_mps; 3848 } 3849 3850 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3851 } 3852 3853 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3854 } 3855 3856 static bool 3857 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3858 struct hns3_pkt_buf_alloc *buf_alloc) 3859 { 3860 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3861 struct hns3_pf *pf = &hns->pf; 3862 struct hns3_priv_buf *priv; 3863 int no_pfc_priv_num; 3864 uint32_t rx_all; 3865 uint8_t mask; 3866 int i; 3867 3868 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3869 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3870 3871 /* let the last to be cleared first */ 3872 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3873 priv = &buf_alloc->priv_buf[i]; 3874 mask = BIT((uint8_t)i); 3875 if (hw->hw_tc_map & mask && 3876 !(hw->dcb_info.hw_pfc_map & mask)) { 3877 /* Clear the no pfc TC private buffer */ 3878 priv->wl.low = 0; 3879 priv->wl.high = 0; 3880 priv->buf_size = 0; 3881 priv->enable = 0; 3882 no_pfc_priv_num--; 3883 } 3884 3885 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3886 no_pfc_priv_num == 0) 3887 break; 3888 } 3889 3890 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3891 } 3892 3893 static bool 3894 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3895 struct hns3_pkt_buf_alloc *buf_alloc) 3896 { 3897 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3898 struct hns3_pf *pf = &hns->pf; 3899 struct hns3_priv_buf *priv; 3900 uint32_t rx_all; 3901 int pfc_priv_num; 3902 uint8_t mask; 3903 int i; 3904 3905 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3906 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3907 3908 /* let the last to be cleared first */ 3909 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3910 priv = &buf_alloc->priv_buf[i]; 3911 mask = BIT((uint8_t)i); 3912 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { 3913 /* Reduce the number of pfc TC with private buffer */ 3914 priv->wl.low = 0; 3915 priv->enable = 0; 3916 priv->wl.high = 0; 3917 priv->buf_size = 0; 3918 pfc_priv_num--; 3919 } 3920 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3921 pfc_priv_num == 0) 3922 break; 3923 } 3924 3925 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3926 } 3927 3928 static bool 3929 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3930 struct hns3_pkt_buf_alloc *buf_alloc) 3931 { 3932 #define COMPENSATE_BUFFER 0x3C00 3933 #define COMPENSATE_HALF_MPS_NUM 5 3934 #define PRIV_WL_GAP 0x1800 3935 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3936 struct hns3_pf *pf = &hns->pf; 3937 uint32_t tc_num = hns3_get_tc_num(hw); 3938 uint32_t half_mps = pf->mps >> 1; 3939 struct hns3_priv_buf *priv; 3940 uint32_t min_rx_priv; 3941 uint32_t rx_priv; 3942 uint8_t i; 3943 3944 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3945 if (tc_num) 3946 rx_priv = rx_priv / tc_num; 3947 3948 if (tc_num <= NEED_RESERVE_TC_NUM) 3949 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3950 3951 /* 3952 * Minimum value of private buffer in rx direction (min_rx_priv) is 3953 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3954 * buffer if rx_priv is greater than min_rx_priv. 3955 */ 3956 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3957 COMPENSATE_HALF_MPS_NUM * half_mps; 3958 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3959 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3960 if (rx_priv < min_rx_priv) 3961 return false; 3962 3963 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3964 priv = &buf_alloc->priv_buf[i]; 3965 priv->enable = 0; 3966 priv->wl.low = 0; 3967 priv->wl.high = 0; 3968 priv->buf_size = 0; 3969 3970 if (!(hw->hw_tc_map & BIT(i))) 3971 continue; 3972 3973 priv->enable = 1; 3974 priv->buf_size = rx_priv; 3975 priv->wl.high = rx_priv - pf->dv_buf_size; 3976 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3977 } 3978 3979 buf_alloc->s_buf.buf_size = 0; 3980 3981 return true; 3982 } 3983 3984 /* 3985 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3986 * @hw: pointer to struct hns3_hw 3987 * @buf_alloc: pointer to buffer calculation data 3988 * @return: 0: calculate sucessful, negative: fail 3989 */ 3990 static int 3991 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3992 { 3993 /* When DCB is not supported, rx private buffer is not allocated. */ 3994 if (!hns3_dev_get_support(hw, DCB)) { 3995 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3996 struct hns3_pf *pf = &hns->pf; 3997 uint32_t rx_all = pf->pkt_buf_size; 3998 3999 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 4000 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 4001 return -ENOMEM; 4002 4003 return 0; 4004 } 4005 4006 /* 4007 * Try to allocate privated packet buffer for all TCs without share 4008 * buffer. 4009 */ 4010 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 4011 return 0; 4012 4013 /* 4014 * Try to allocate privated packet buffer for all TCs with share 4015 * buffer. 4016 */ 4017 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 4018 return 0; 4019 4020 /* 4021 * For different application scenes, the enabled port number, TC number 4022 * and no_drop TC number are different. In order to obtain the better 4023 * performance, software could allocate the buffer size and configure 4024 * the waterline by trying to decrease the private buffer size according 4025 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc 4026 * enabled tc. 4027 */ 4028 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 4029 return 0; 4030 4031 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 4032 return 0; 4033 4034 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 4035 return 0; 4036 4037 return -ENOMEM; 4038 } 4039 4040 static int 4041 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4042 { 4043 struct hns3_rx_priv_buff_cmd *req; 4044 struct hns3_cmd_desc desc; 4045 uint32_t buf_size; 4046 int ret; 4047 int i; 4048 4049 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 4050 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 4051 4052 /* Alloc private buffer TCs */ 4053 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 4054 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 4055 4056 req->buf_num[i] = 4057 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 4058 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 4059 } 4060 4061 buf_size = buf_alloc->s_buf.buf_size; 4062 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 4063 (1 << HNS3_TC0_PRI_BUF_EN_B)); 4064 4065 ret = hns3_cmd_send(hw, &desc, 1); 4066 if (ret) 4067 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 4068 4069 return ret; 4070 } 4071 4072 static int 4073 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4074 { 4075 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 4076 struct hns3_rx_priv_wl_buf *req; 4077 struct hns3_priv_buf *priv; 4078 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 4079 int i, j; 4080 int ret; 4081 4082 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 4083 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 4084 false); 4085 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 4086 4087 /* The first descriptor set the NEXT bit to 1 */ 4088 if (i == 0) 4089 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4090 else 4091 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4092 4093 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4094 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 4095 4096 priv = &buf_alloc->priv_buf[idx]; 4097 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 4098 HNS3_BUF_UNIT_S); 4099 req->tc_wl[j].high |= 4100 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4101 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 4102 HNS3_BUF_UNIT_S); 4103 req->tc_wl[j].low |= 4104 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4105 } 4106 } 4107 4108 /* Send 2 descriptor at one time */ 4109 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 4110 if (ret) 4111 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 4112 ret); 4113 return ret; 4114 } 4115 4116 static int 4117 hns3_common_thrd_config(struct hns3_hw *hw, 4118 struct hns3_pkt_buf_alloc *buf_alloc) 4119 { 4120 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 4121 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 4122 struct hns3_rx_com_thrd *req; 4123 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 4124 struct hns3_tc_thrd *tc; 4125 int tc_idx; 4126 int i, j; 4127 int ret; 4128 4129 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 4130 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 4131 false); 4132 req = (struct hns3_rx_com_thrd *)&desc[i].data; 4133 4134 /* The first descriptor set the NEXT bit to 1 */ 4135 if (i == 0) 4136 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4137 else 4138 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4139 4140 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 4141 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 4142 tc = &s_buf->tc_thrd[tc_idx]; 4143 4144 req->com_thrd[j].high = 4145 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 4146 req->com_thrd[j].high |= 4147 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4148 req->com_thrd[j].low = 4149 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 4150 req->com_thrd[j].low |= 4151 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4152 } 4153 } 4154 4155 /* Send 2 descriptors at one time */ 4156 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 4157 if (ret) 4158 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 4159 4160 return ret; 4161 } 4162 4163 static int 4164 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 4165 { 4166 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 4167 struct hns3_rx_com_wl *req; 4168 struct hns3_cmd_desc desc; 4169 int ret; 4170 4171 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 4172 4173 req = (struct hns3_rx_com_wl *)desc.data; 4174 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 4175 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4176 4177 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 4178 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 4179 4180 ret = hns3_cmd_send(hw, &desc, 1); 4181 if (ret) 4182 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 4183 4184 return ret; 4185 } 4186 4187 int 4188 hns3_buffer_alloc(struct hns3_hw *hw) 4189 { 4190 struct hns3_pkt_buf_alloc pkt_buf; 4191 int ret; 4192 4193 memset(&pkt_buf, 0, sizeof(pkt_buf)); 4194 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 4195 if (ret) { 4196 PMD_INIT_LOG(ERR, 4197 "could not calc tx buffer size for all TCs %d", 4198 ret); 4199 return ret; 4200 } 4201 4202 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 4203 if (ret) { 4204 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 4205 return ret; 4206 } 4207 4208 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 4209 if (ret) { 4210 PMD_INIT_LOG(ERR, 4211 "could not calc rx priv buffer size for all TCs %d", 4212 ret); 4213 return ret; 4214 } 4215 4216 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 4217 if (ret) { 4218 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 4219 return ret; 4220 } 4221 4222 if (hns3_dev_get_support(hw, DCB)) { 4223 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 4224 if (ret) { 4225 PMD_INIT_LOG(ERR, 4226 "could not configure rx private waterline %d", 4227 ret); 4228 return ret; 4229 } 4230 4231 ret = hns3_common_thrd_config(hw, &pkt_buf); 4232 if (ret) { 4233 PMD_INIT_LOG(ERR, 4234 "could not configure common threshold %d", 4235 ret); 4236 return ret; 4237 } 4238 } 4239 4240 ret = hns3_common_wl_config(hw, &pkt_buf); 4241 if (ret) 4242 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 4243 ret); 4244 4245 return ret; 4246 } 4247 4248 static int 4249 hns3_mac_init(struct hns3_hw *hw) 4250 { 4251 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4252 struct hns3_mac *mac = &hw->mac; 4253 struct hns3_pf *pf = &hns->pf; 4254 int ret; 4255 4256 pf->support_sfp_query = true; 4257 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4258 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 4259 if (ret) { 4260 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 4261 return ret; 4262 } 4263 4264 mac->link_status = RTE_ETH_LINK_DOWN; 4265 4266 return hns3_config_mtu(hw, pf->mps); 4267 } 4268 4269 static int 4270 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 4271 { 4272 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 4273 #define HNS3_ETHERTYPE_ALREADY_ADD 1 4274 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 4275 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 4276 int return_status; 4277 4278 if (cmdq_resp) { 4279 PMD_INIT_LOG(ERR, 4280 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 4281 cmdq_resp); 4282 return -EIO; 4283 } 4284 4285 switch (resp_code) { 4286 case HNS3_ETHERTYPE_SUCCESS_ADD: 4287 case HNS3_ETHERTYPE_ALREADY_ADD: 4288 return_status = 0; 4289 break; 4290 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 4291 PMD_INIT_LOG(ERR, 4292 "add mac ethertype failed for manager table overflow."); 4293 return_status = -EIO; 4294 break; 4295 case HNS3_ETHERTYPE_KEY_CONFLICT: 4296 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 4297 return_status = -EIO; 4298 break; 4299 default: 4300 PMD_INIT_LOG(ERR, 4301 "add mac ethertype failed for undefined, code=%u.", 4302 resp_code); 4303 return_status = -EIO; 4304 break; 4305 } 4306 4307 return return_status; 4308 } 4309 4310 static int 4311 hns3_add_mgr_tbl(struct hns3_hw *hw, 4312 const struct hns3_mac_mgr_tbl_entry_cmd *req) 4313 { 4314 struct hns3_cmd_desc desc; 4315 uint8_t resp_code; 4316 uint16_t retval; 4317 int ret; 4318 4319 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 4320 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 4321 4322 ret = hns3_cmd_send(hw, &desc, 1); 4323 if (ret) { 4324 PMD_INIT_LOG(ERR, 4325 "add mac ethertype failed for cmd_send, ret =%d.", 4326 ret); 4327 return ret; 4328 } 4329 4330 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 4331 retval = rte_le_to_cpu_16(desc.retval); 4332 4333 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 4334 } 4335 4336 static void 4337 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 4338 int *table_item_num) 4339 { 4340 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4341 4342 /* 4343 * In current version, we add one item in management table as below: 4344 * 0x0180C200000E -- LLDP MC address 4345 */ 4346 tbl = mgr_table; 4347 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4348 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4349 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4350 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4351 tbl->i_port_bitmap = 0x1; 4352 *table_item_num = 1; 4353 } 4354 4355 static int 4356 hns3_init_mgr_tbl(struct hns3_hw *hw) 4357 { 4358 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4359 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4360 int table_item_num; 4361 int ret; 4362 int i; 4363 4364 memset(mgr_table, 0, sizeof(mgr_table)); 4365 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4366 for (i = 0; i < table_item_num; i++) { 4367 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4368 if (ret) { 4369 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4370 ret); 4371 return ret; 4372 } 4373 } 4374 4375 return 0; 4376 } 4377 4378 static void 4379 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4380 bool en_mc, bool en_bc, int vport_id) 4381 { 4382 if (!param) 4383 return; 4384 4385 memset(param, 0, sizeof(struct hns3_promisc_param)); 4386 if (en_uc) 4387 param->enable = HNS3_PROMISC_EN_UC; 4388 if (en_mc) 4389 param->enable |= HNS3_PROMISC_EN_MC; 4390 if (en_bc) 4391 param->enable |= HNS3_PROMISC_EN_BC; 4392 param->vf_id = vport_id; 4393 } 4394 4395 static int 4396 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4397 { 4398 struct hns3_promisc_cfg_cmd *req; 4399 struct hns3_cmd_desc desc; 4400 int ret; 4401 4402 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4403 4404 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4405 req->vf_id = param->vf_id; 4406 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4407 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4408 4409 ret = hns3_cmd_send(hw, &desc, 1); 4410 if (ret) 4411 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4412 4413 return ret; 4414 } 4415 4416 static int 4417 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4418 { 4419 struct hns3_promisc_param param; 4420 bool en_bc_pmc = true; 4421 uint8_t vf_id; 4422 4423 /* 4424 * In current version VF is not supported when PF is driven by DPDK 4425 * driver, just need to configure parameters for PF vport. 4426 */ 4427 vf_id = HNS3_PF_FUNC_ID; 4428 4429 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4430 return hns3_cmd_set_promisc_mode(hw, ¶m); 4431 } 4432 4433 static int 4434 hns3_promisc_init(struct hns3_hw *hw) 4435 { 4436 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4437 struct hns3_pf *pf = &hns->pf; 4438 struct hns3_promisc_param param; 4439 uint16_t func_id; 4440 int ret; 4441 4442 ret = hns3_set_promisc_mode(hw, false, false); 4443 if (ret) { 4444 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4445 return ret; 4446 } 4447 4448 /* 4449 * In current version VFs are not supported when PF is driven by DPDK 4450 * driver. After PF has been taken over by DPDK, the original VF will 4451 * be invalid. So, there is a possibility of entry residues. It should 4452 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4453 * during init. 4454 */ 4455 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4456 hns3_promisc_param_init(¶m, false, false, false, func_id); 4457 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4458 if (ret) { 4459 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," 4460 " ret = %d", func_id, ret); 4461 return ret; 4462 } 4463 } 4464 4465 return 0; 4466 } 4467 4468 static void 4469 hns3_promisc_uninit(struct hns3_hw *hw) 4470 { 4471 struct hns3_promisc_param param; 4472 uint16_t func_id; 4473 int ret; 4474 4475 func_id = HNS3_PF_FUNC_ID; 4476 4477 /* 4478 * In current version VFs are not supported when PF is driven by 4479 * DPDK driver, and VFs' promisc mode status has been cleared during 4480 * init and their status will not change. So just clear PF's promisc 4481 * mode status during uninit. 4482 */ 4483 hns3_promisc_param_init(¶m, false, false, false, func_id); 4484 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4485 if (ret) 4486 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4487 " uninit, ret = %d", ret); 4488 } 4489 4490 static int 4491 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4492 { 4493 bool allmulti = dev->data->all_multicast ? true : false; 4494 struct hns3_adapter *hns = dev->data->dev_private; 4495 struct hns3_hw *hw = &hns->hw; 4496 uint64_t offloads; 4497 int err; 4498 int ret; 4499 4500 rte_spinlock_lock(&hw->lock); 4501 ret = hns3_set_promisc_mode(hw, true, true); 4502 if (ret) { 4503 rte_spinlock_unlock(&hw->lock); 4504 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4505 ret); 4506 return ret; 4507 } 4508 4509 /* 4510 * When promiscuous mode was enabled, disable the vlan filter to let 4511 * all packets coming in in the receiving direction. 4512 */ 4513 offloads = dev->data->dev_conf.rxmode.offloads; 4514 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 4515 ret = hns3_enable_vlan_filter(hns, false); 4516 if (ret) { 4517 hns3_err(hw, "failed to enable promiscuous mode due to " 4518 "failure to disable vlan filter, ret = %d", 4519 ret); 4520 err = hns3_set_promisc_mode(hw, false, allmulti); 4521 if (err) 4522 hns3_err(hw, "failed to restore promiscuous " 4523 "status after disable vlan filter " 4524 "failed during enabling promiscuous " 4525 "mode, ret = %d", ret); 4526 } 4527 } 4528 4529 rte_spinlock_unlock(&hw->lock); 4530 4531 return ret; 4532 } 4533 4534 static int 4535 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4536 { 4537 bool allmulti = dev->data->all_multicast ? true : false; 4538 struct hns3_adapter *hns = dev->data->dev_private; 4539 struct hns3_hw *hw = &hns->hw; 4540 uint64_t offloads; 4541 int err; 4542 int ret; 4543 4544 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4545 rte_spinlock_lock(&hw->lock); 4546 ret = hns3_set_promisc_mode(hw, false, allmulti); 4547 if (ret) { 4548 rte_spinlock_unlock(&hw->lock); 4549 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4550 ret); 4551 return ret; 4552 } 4553 /* when promiscuous mode was disabled, restore the vlan filter status */ 4554 offloads = dev->data->dev_conf.rxmode.offloads; 4555 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 4556 ret = hns3_enable_vlan_filter(hns, true); 4557 if (ret) { 4558 hns3_err(hw, "failed to disable promiscuous mode due to" 4559 " failure to restore vlan filter, ret = %d", 4560 ret); 4561 err = hns3_set_promisc_mode(hw, true, true); 4562 if (err) 4563 hns3_err(hw, "failed to restore promiscuous " 4564 "status after enabling vlan filter " 4565 "failed during disabling promiscuous " 4566 "mode, ret = %d", ret); 4567 } 4568 } 4569 rte_spinlock_unlock(&hw->lock); 4570 4571 return ret; 4572 } 4573 4574 static int 4575 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4576 { 4577 struct hns3_adapter *hns = dev->data->dev_private; 4578 struct hns3_hw *hw = &hns->hw; 4579 int ret; 4580 4581 if (dev->data->promiscuous) 4582 return 0; 4583 4584 rte_spinlock_lock(&hw->lock); 4585 ret = hns3_set_promisc_mode(hw, false, true); 4586 rte_spinlock_unlock(&hw->lock); 4587 if (ret) 4588 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4589 ret); 4590 4591 return ret; 4592 } 4593 4594 static int 4595 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4596 { 4597 struct hns3_adapter *hns = dev->data->dev_private; 4598 struct hns3_hw *hw = &hns->hw; 4599 int ret; 4600 4601 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4602 if (dev->data->promiscuous) 4603 return 0; 4604 4605 rte_spinlock_lock(&hw->lock); 4606 ret = hns3_set_promisc_mode(hw, false, false); 4607 rte_spinlock_unlock(&hw->lock); 4608 if (ret) 4609 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4610 ret); 4611 4612 return ret; 4613 } 4614 4615 static int 4616 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4617 { 4618 struct hns3_hw *hw = &hns->hw; 4619 bool allmulti = hw->data->all_multicast ? true : false; 4620 int ret; 4621 4622 if (hw->data->promiscuous) { 4623 ret = hns3_set_promisc_mode(hw, true, true); 4624 if (ret) 4625 hns3_err(hw, "failed to restore promiscuous mode, " 4626 "ret = %d", ret); 4627 return ret; 4628 } 4629 4630 ret = hns3_set_promisc_mode(hw, false, allmulti); 4631 if (ret) 4632 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4633 ret); 4634 return ret; 4635 } 4636 4637 static int 4638 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) 4639 { 4640 struct hns3_sfp_info_cmd *resp; 4641 struct hns3_cmd_desc desc; 4642 int ret; 4643 4644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 4645 resp = (struct hns3_sfp_info_cmd *)desc.data; 4646 resp->query_type = HNS3_ACTIVE_QUERY; 4647 4648 ret = hns3_cmd_send(hw, &desc, 1); 4649 if (ret == -EOPNOTSUPP) { 4650 hns3_warn(hw, "firmware does not support get SFP info," 4651 " ret = %d.", ret); 4652 return ret; 4653 } else if (ret) { 4654 hns3_err(hw, "get sfp info failed, ret = %d.", ret); 4655 return ret; 4656 } 4657 4658 /* 4659 * In some case, the speed of MAC obtained from firmware may be 0, it 4660 * shouldn't be set to mac->speed. 4661 */ 4662 if (!rte_le_to_cpu_32(resp->sfp_speed)) 4663 return 0; 4664 4665 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed); 4666 /* 4667 * if resp->supported_speed is 0, it means it's an old version 4668 * firmware, do not update these params. 4669 */ 4670 if (resp->supported_speed) { 4671 mac_info->query_type = HNS3_ACTIVE_QUERY; 4672 mac_info->supported_speed = 4673 rte_le_to_cpu_32(resp->supported_speed); 4674 mac_info->support_autoneg = resp->autoneg_ability; 4675 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED 4676 : RTE_ETH_LINK_AUTONEG; 4677 } else { 4678 mac_info->query_type = HNS3_DEFAULT_QUERY; 4679 } 4680 4681 return 0; 4682 } 4683 4684 static uint8_t 4685 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4686 { 4687 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M)) 4688 duplex = RTE_ETH_LINK_FULL_DUPLEX; 4689 4690 return duplex; 4691 } 4692 4693 static int 4694 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4695 { 4696 struct hns3_mac *mac = &hw->mac; 4697 int ret; 4698 4699 duplex = hns3_check_speed_dup(duplex, speed); 4700 if (mac->link_speed == speed && mac->link_duplex == duplex) 4701 return 0; 4702 4703 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4704 if (ret) 4705 return ret; 4706 4707 ret = hns3_port_shaper_update(hw, speed); 4708 if (ret) 4709 return ret; 4710 4711 mac->link_speed = speed; 4712 mac->link_duplex = duplex; 4713 4714 return 0; 4715 } 4716 4717 static int 4718 hns3_update_fiber_link_info(struct hns3_hw *hw) 4719 { 4720 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 4721 struct hns3_mac *mac = &hw->mac; 4722 struct hns3_mac mac_info; 4723 int ret; 4724 4725 /* If firmware do not support get SFP/qSFP speed, return directly */ 4726 if (!pf->support_sfp_query) 4727 return 0; 4728 4729 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4730 ret = hns3_get_sfp_info(hw, &mac_info); 4731 if (ret == -EOPNOTSUPP) { 4732 pf->support_sfp_query = false; 4733 return ret; 4734 } else if (ret) 4735 return ret; 4736 4737 /* Do nothing if no SFP */ 4738 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE) 4739 return 0; 4740 4741 /* 4742 * If query_type is HNS3_ACTIVE_QUERY, it is no need 4743 * to reconfigure the speed of MAC. Otherwise, it indicates 4744 * that the current firmware only supports to obtain the 4745 * speed of the SFP, and the speed of MAC needs to reconfigure. 4746 */ 4747 mac->query_type = mac_info.query_type; 4748 if (mac->query_type == HNS3_ACTIVE_QUERY) { 4749 if (mac_info.link_speed != mac->link_speed) { 4750 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4751 if (ret) 4752 return ret; 4753 } 4754 4755 mac->link_speed = mac_info.link_speed; 4756 mac->supported_speed = mac_info.supported_speed; 4757 mac->support_autoneg = mac_info.support_autoneg; 4758 mac->link_autoneg = mac_info.link_autoneg; 4759 4760 return 0; 4761 } 4762 4763 /* Config full duplex for SFP */ 4764 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed, 4765 RTE_ETH_LINK_FULL_DUPLEX); 4766 } 4767 4768 static void 4769 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) 4770 { 4771 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f 4772 4773 struct hns3_phy_params_bd0_cmd *req; 4774 uint32_t supported; 4775 4776 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 4777 mac->link_speed = rte_le_to_cpu_32(req->speed); 4778 mac->link_duplex = hns3_get_bit(req->duplex, 4779 HNS3_PHY_DUPLEX_CFG_B); 4780 mac->link_autoneg = hns3_get_bit(req->autoneg, 4781 HNS3_PHY_AUTONEG_CFG_B); 4782 mac->advertising = rte_le_to_cpu_32(req->advertising); 4783 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); 4784 supported = rte_le_to_cpu_32(req->supported); 4785 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK; 4786 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT); 4787 } 4788 4789 static int 4790 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) 4791 { 4792 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 4793 uint16_t i; 4794 int ret; 4795 4796 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 4797 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 4798 true); 4799 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 4800 } 4801 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); 4802 4803 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 4804 if (ret) { 4805 hns3_err(hw, "get phy parameters failed, ret = %d.", ret); 4806 return ret; 4807 } 4808 4809 hns3_parse_copper_phy_params(desc, mac); 4810 4811 return 0; 4812 } 4813 4814 static int 4815 hns3_update_copper_link_info(struct hns3_hw *hw) 4816 { 4817 struct hns3_mac *mac = &hw->mac; 4818 struct hns3_mac mac_info; 4819 int ret; 4820 4821 memset(&mac_info, 0, sizeof(struct hns3_mac)); 4822 ret = hns3_get_copper_phy_params(hw, &mac_info); 4823 if (ret) 4824 return ret; 4825 4826 if (mac_info.link_speed != mac->link_speed) { 4827 ret = hns3_port_shaper_update(hw, mac_info.link_speed); 4828 if (ret) 4829 return ret; 4830 } 4831 4832 mac->link_speed = mac_info.link_speed; 4833 mac->link_duplex = mac_info.link_duplex; 4834 mac->link_autoneg = mac_info.link_autoneg; 4835 mac->supported_speed = mac_info.supported_speed; 4836 mac->advertising = mac_info.advertising; 4837 mac->lp_advertising = mac_info.lp_advertising; 4838 mac->support_autoneg = mac_info.support_autoneg; 4839 4840 return 0; 4841 } 4842 4843 static int 4844 hns3_update_link_info(struct rte_eth_dev *eth_dev) 4845 { 4846 struct hns3_adapter *hns = eth_dev->data->dev_private; 4847 struct hns3_hw *hw = &hns->hw; 4848 int ret = 0; 4849 4850 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) 4851 ret = hns3_update_copper_link_info(hw); 4852 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) 4853 ret = hns3_update_fiber_link_info(hw); 4854 4855 return ret; 4856 } 4857 4858 static int 4859 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4860 { 4861 struct hns3_config_mac_mode_cmd *req; 4862 struct hns3_cmd_desc desc; 4863 uint32_t loop_en = 0; 4864 uint8_t val = 0; 4865 int ret; 4866 4867 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4868 4869 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4870 if (enable) 4871 val = 1; 4872 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4873 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4874 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4875 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4876 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4877 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4878 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4879 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4880 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4881 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4882 4883 /* 4884 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4885 * when receiving frames. Otherwise, CRC will be stripped. 4886 */ 4887 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 4888 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4889 else 4890 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4891 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4892 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4893 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4894 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4895 4896 ret = hns3_cmd_send(hw, &desc, 1); 4897 if (ret) 4898 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4899 4900 return ret; 4901 } 4902 4903 static int 4904 hns3_get_mac_link_status(struct hns3_hw *hw) 4905 { 4906 struct hns3_link_status_cmd *req; 4907 struct hns3_cmd_desc desc; 4908 int link_status; 4909 int ret; 4910 4911 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4912 ret = hns3_cmd_send(hw, &desc, 1); 4913 if (ret) { 4914 hns3_err(hw, "get link status cmd failed %d", ret); 4915 return RTE_ETH_LINK_DOWN; 4916 } 4917 4918 req = (struct hns3_link_status_cmd *)desc.data; 4919 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4920 4921 return !!link_status; 4922 } 4923 4924 static bool 4925 hns3_update_link_status(struct hns3_hw *hw) 4926 { 4927 int state; 4928 4929 state = hns3_get_mac_link_status(hw); 4930 if (state != hw->mac.link_status) { 4931 hw->mac.link_status = state; 4932 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4933 return true; 4934 } 4935 4936 return false; 4937 } 4938 4939 void 4940 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query) 4941 { 4942 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4943 struct rte_eth_link new_link; 4944 int ret; 4945 4946 if (query) 4947 hns3_update_port_link_info(dev); 4948 4949 memset(&new_link, 0, sizeof(new_link)); 4950 hns3_setup_linkstatus(dev, &new_link); 4951 4952 ret = rte_eth_linkstatus_set(dev, &new_link); 4953 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) 4954 hns3_start_report_lse(dev); 4955 } 4956 4957 static void 4958 hns3_service_handler(void *param) 4959 { 4960 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4961 struct hns3_adapter *hns = eth_dev->data->dev_private; 4962 struct hns3_hw *hw = &hns->hw; 4963 4964 if (!hns3_is_reset_pending(hns)) 4965 hns3_update_linkstatus_and_event(hw, true); 4966 else 4967 hns3_warn(hw, "Cancel the query when reset is pending"); 4968 4969 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4970 } 4971 4972 static int 4973 hns3_init_hardware(struct hns3_adapter *hns) 4974 { 4975 struct hns3_hw *hw = &hns->hw; 4976 int ret; 4977 4978 ret = hns3_map_tqp(hw); 4979 if (ret) { 4980 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4981 return ret; 4982 } 4983 4984 ret = hns3_init_umv_space(hw); 4985 if (ret) { 4986 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4987 return ret; 4988 } 4989 4990 ret = hns3_mac_init(hw); 4991 if (ret) { 4992 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4993 goto err_mac_init; 4994 } 4995 4996 ret = hns3_init_mgr_tbl(hw); 4997 if (ret) { 4998 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4999 goto err_mac_init; 5000 } 5001 5002 ret = hns3_promisc_init(hw); 5003 if (ret) { 5004 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 5005 ret); 5006 goto err_mac_init; 5007 } 5008 5009 ret = hns3_init_vlan_config(hns); 5010 if (ret) { 5011 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 5012 goto err_mac_init; 5013 } 5014 5015 ret = hns3_dcb_init(hw); 5016 if (ret) { 5017 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 5018 goto err_mac_init; 5019 } 5020 5021 ret = hns3_init_fd_config(hns); 5022 if (ret) { 5023 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 5024 goto err_mac_init; 5025 } 5026 5027 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 5028 if (ret) { 5029 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 5030 goto err_mac_init; 5031 } 5032 5033 ret = hns3_config_gro(hw, false); 5034 if (ret) { 5035 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 5036 goto err_mac_init; 5037 } 5038 5039 /* 5040 * In the initialization clearing the all hardware mapping relationship 5041 * configurations between queues and interrupt vectors is needed, so 5042 * some error caused by the residual configurations, such as the 5043 * unexpected interrupt, can be avoid. 5044 */ 5045 ret = hns3_init_ring_with_vector(hw); 5046 if (ret) { 5047 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 5048 goto err_mac_init; 5049 } 5050 5051 return 0; 5052 5053 err_mac_init: 5054 hns3_uninit_umv_space(hw); 5055 return ret; 5056 } 5057 5058 static int 5059 hns3_clear_hw(struct hns3_hw *hw) 5060 { 5061 struct hns3_cmd_desc desc; 5062 int ret; 5063 5064 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 5065 5066 ret = hns3_cmd_send(hw, &desc, 1); 5067 if (ret && ret != -EOPNOTSUPP) 5068 return ret; 5069 5070 return 0; 5071 } 5072 5073 static void 5074 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 5075 { 5076 uint32_t val; 5077 5078 /* 5079 * The new firmware support report more hardware error types by 5080 * msix mode. These errors are defined as RAS errors in hardware 5081 * and belong to a different type from the MSI-x errors processed 5082 * by the network driver. 5083 * 5084 * Network driver should open the new error report on initialization. 5085 */ 5086 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5087 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 5088 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 5089 } 5090 5091 static uint32_t 5092 hns3_set_firber_default_support_speed(struct hns3_hw *hw) 5093 { 5094 struct hns3_mac *mac = &hw->mac; 5095 5096 switch (mac->link_speed) { 5097 case RTE_ETH_SPEED_NUM_1G: 5098 return HNS3_FIBER_LINK_SPEED_1G_BIT; 5099 case RTE_ETH_SPEED_NUM_10G: 5100 return HNS3_FIBER_LINK_SPEED_10G_BIT; 5101 case RTE_ETH_SPEED_NUM_25G: 5102 return HNS3_FIBER_LINK_SPEED_25G_BIT; 5103 case RTE_ETH_SPEED_NUM_40G: 5104 return HNS3_FIBER_LINK_SPEED_40G_BIT; 5105 case RTE_ETH_SPEED_NUM_50G: 5106 return HNS3_FIBER_LINK_SPEED_50G_BIT; 5107 case RTE_ETH_SPEED_NUM_100G: 5108 return HNS3_FIBER_LINK_SPEED_100G_BIT; 5109 case RTE_ETH_SPEED_NUM_200G: 5110 return HNS3_FIBER_LINK_SPEED_200G_BIT; 5111 default: 5112 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed); 5113 return 0; 5114 } 5115 } 5116 5117 /* 5118 * Validity of supported_speed for firber and copper media type can be 5119 * guaranteed by the following policy: 5120 * Copper: 5121 * Although the initialization of the phy in the firmware may not be 5122 * completed, the firmware can guarantees that the supported_speed is 5123 * an valid value. 5124 * Firber: 5125 * If the version of firmware supports the acitive query way of the 5126 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained 5127 * through it. If unsupported, use the SFP's speed as the value of the 5128 * supported_speed. 5129 */ 5130 static int 5131 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) 5132 { 5133 struct hns3_adapter *hns = eth_dev->data->dev_private; 5134 struct hns3_hw *hw = &hns->hw; 5135 struct hns3_mac *mac = &hw->mac; 5136 int ret; 5137 5138 ret = hns3_update_link_info(eth_dev); 5139 if (ret) 5140 return ret; 5141 5142 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) { 5143 /* 5144 * Some firmware does not support the report of supported_speed, 5145 * and only report the effective speed of SFP. In this case, it 5146 * is necessary to use the SFP's speed as the supported_speed. 5147 */ 5148 if (mac->supported_speed == 0) 5149 mac->supported_speed = 5150 hns3_set_firber_default_support_speed(hw); 5151 } 5152 5153 return 0; 5154 } 5155 5156 static void 5157 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns) 5158 { 5159 struct hns3_mac *mac = &hns->hw.mac; 5160 5161 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 5162 hns->pf.support_fc_autoneg = true; 5163 return; 5164 } 5165 5166 /* 5167 * Flow control auto-negotiation requires the cooperation of the driver 5168 * and firmware. Currently, the optical port does not support flow 5169 * control auto-negotiation. 5170 */ 5171 hns->pf.support_fc_autoneg = false; 5172 } 5173 5174 static int 5175 hns3_init_pf(struct rte_eth_dev *eth_dev) 5176 { 5177 struct rte_device *dev = eth_dev->device; 5178 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5179 struct hns3_adapter *hns = eth_dev->data->dev_private; 5180 struct hns3_hw *hw = &hns->hw; 5181 int ret; 5182 5183 PMD_INIT_FUNC_TRACE(); 5184 5185 /* Get hardware io base address from pcie BAR2 IO space */ 5186 hw->io_base = pci_dev->mem_resource[2].addr; 5187 5188 /* Firmware command queue initialize */ 5189 ret = hns3_cmd_init_queue(hw); 5190 if (ret) { 5191 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 5192 goto err_cmd_init_queue; 5193 } 5194 5195 hns3_clear_all_event_cause(hw); 5196 5197 /* Firmware command initialize */ 5198 ret = hns3_cmd_init(hw); 5199 if (ret) { 5200 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 5201 goto err_cmd_init; 5202 } 5203 5204 hns3_tx_push_init(eth_dev); 5205 5206 /* 5207 * To ensure that the hardware environment is clean during 5208 * initialization, the driver actively clear the hardware environment 5209 * during initialization, including PF and corresponding VFs' vlan, mac, 5210 * flow table configurations, etc. 5211 */ 5212 ret = hns3_clear_hw(hw); 5213 if (ret) { 5214 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 5215 goto err_cmd_init; 5216 } 5217 5218 /* Hardware statistics of imissed registers cleared. */ 5219 ret = hns3_update_imissed_stats(hw, true); 5220 if (ret) { 5221 hns3_err(hw, "clear imissed stats failed, ret = %d", ret); 5222 goto err_cmd_init; 5223 } 5224 5225 hns3_config_all_msix_error(hw, true); 5226 5227 ret = rte_intr_callback_register(&pci_dev->intr_handle, 5228 hns3_interrupt_handler, 5229 eth_dev); 5230 if (ret) { 5231 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 5232 goto err_intr_callback_register; 5233 } 5234 5235 ret = hns3_ptp_init(hw); 5236 if (ret) 5237 goto err_get_config; 5238 5239 /* Enable interrupt */ 5240 rte_intr_enable(&pci_dev->intr_handle); 5241 hns3_pf_enable_irq0(hw); 5242 5243 /* Get configuration */ 5244 ret = hns3_get_configuration(hw); 5245 if (ret) { 5246 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 5247 goto err_get_config; 5248 } 5249 5250 ret = hns3_tqp_stats_init(hw); 5251 if (ret) 5252 goto err_get_config; 5253 5254 ret = hns3_init_hardware(hns); 5255 if (ret) { 5256 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 5257 goto err_init_hw; 5258 } 5259 5260 /* Initialize flow director filter list & hash */ 5261 ret = hns3_fdir_filter_init(hns); 5262 if (ret) { 5263 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 5264 goto err_fdir; 5265 } 5266 5267 hns3_rss_set_default_args(hw); 5268 5269 ret = hns3_enable_hw_error_intr(hns, true); 5270 if (ret) { 5271 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 5272 ret); 5273 goto err_enable_intr; 5274 } 5275 5276 ret = hns3_get_port_supported_speed(eth_dev); 5277 if (ret) { 5278 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported " 5279 "by device, ret = %d.", ret); 5280 goto err_supported_speed; 5281 } 5282 5283 hns3_get_fc_autoneg_capability(hns); 5284 5285 hns3_tm_conf_init(eth_dev); 5286 5287 return 0; 5288 5289 err_supported_speed: 5290 (void)hns3_enable_hw_error_intr(hns, false); 5291 err_enable_intr: 5292 hns3_fdir_filter_uninit(hns); 5293 err_fdir: 5294 hns3_uninit_umv_space(hw); 5295 err_init_hw: 5296 hns3_tqp_stats_uninit(hw); 5297 err_get_config: 5298 hns3_pf_disable_irq0(hw); 5299 rte_intr_disable(&pci_dev->intr_handle); 5300 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5301 eth_dev); 5302 err_intr_callback_register: 5303 err_cmd_init: 5304 hns3_cmd_uninit(hw); 5305 hns3_cmd_destroy_queue(hw); 5306 err_cmd_init_queue: 5307 hw->io_base = NULL; 5308 5309 return ret; 5310 } 5311 5312 static void 5313 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 5314 { 5315 struct hns3_adapter *hns = eth_dev->data->dev_private; 5316 struct rte_device *dev = eth_dev->device; 5317 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 5318 struct hns3_hw *hw = &hns->hw; 5319 5320 PMD_INIT_FUNC_TRACE(); 5321 5322 hns3_tm_conf_uninit(eth_dev); 5323 hns3_enable_hw_error_intr(hns, false); 5324 hns3_rss_uninit(hns); 5325 (void)hns3_config_gro(hw, false); 5326 hns3_promisc_uninit(hw); 5327 hns3_flow_uninit(eth_dev); 5328 hns3_fdir_filter_uninit(hns); 5329 hns3_uninit_umv_space(hw); 5330 hns3_tqp_stats_uninit(hw); 5331 hns3_config_mac_tnl_int(hw, false); 5332 hns3_pf_disable_irq0(hw); 5333 rte_intr_disable(&pci_dev->intr_handle); 5334 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 5335 eth_dev); 5336 hns3_config_all_msix_error(hw, false); 5337 hns3_cmd_uninit(hw); 5338 hns3_cmd_destroy_queue(hw); 5339 hw->io_base = NULL; 5340 } 5341 5342 static uint32_t 5343 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds) 5344 { 5345 uint32_t speed_bit; 5346 5347 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 5348 case RTE_ETH_LINK_SPEED_10M: 5349 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT; 5350 break; 5351 case RTE_ETH_LINK_SPEED_10M_HD: 5352 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT; 5353 break; 5354 case RTE_ETH_LINK_SPEED_100M: 5355 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT; 5356 break; 5357 case RTE_ETH_LINK_SPEED_100M_HD: 5358 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT; 5359 break; 5360 case RTE_ETH_LINK_SPEED_1G: 5361 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT; 5362 break; 5363 default: 5364 speed_bit = 0; 5365 break; 5366 } 5367 5368 return speed_bit; 5369 } 5370 5371 static uint32_t 5372 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds) 5373 { 5374 uint32_t speed_bit; 5375 5376 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) { 5377 case RTE_ETH_LINK_SPEED_1G: 5378 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT; 5379 break; 5380 case RTE_ETH_LINK_SPEED_10G: 5381 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT; 5382 break; 5383 case RTE_ETH_LINK_SPEED_25G: 5384 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT; 5385 break; 5386 case RTE_ETH_LINK_SPEED_40G: 5387 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT; 5388 break; 5389 case RTE_ETH_LINK_SPEED_50G: 5390 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT; 5391 break; 5392 case RTE_ETH_LINK_SPEED_100G: 5393 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT; 5394 break; 5395 case RTE_ETH_LINK_SPEED_200G: 5396 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT; 5397 break; 5398 default: 5399 speed_bit = 0; 5400 break; 5401 } 5402 5403 return speed_bit; 5404 } 5405 5406 static int 5407 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) 5408 { 5409 struct hns3_mac *mac = &hw->mac; 5410 uint32_t supported_speed = mac->supported_speed; 5411 uint32_t speed_bit = 0; 5412 5413 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) 5414 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); 5415 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) 5416 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); 5417 5418 if (!(speed_bit & supported_speed)) { 5419 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.", 5420 link_speeds); 5421 return -EINVAL; 5422 } 5423 5424 return 0; 5425 } 5426 5427 static inline uint32_t 5428 hns3_get_link_speed(uint32_t link_speeds) 5429 { 5430 uint32_t speed = RTE_ETH_SPEED_NUM_NONE; 5431 5432 if (link_speeds & RTE_ETH_LINK_SPEED_10M || 5433 link_speeds & RTE_ETH_LINK_SPEED_10M_HD) 5434 speed = RTE_ETH_SPEED_NUM_10M; 5435 if (link_speeds & RTE_ETH_LINK_SPEED_100M || 5436 link_speeds & RTE_ETH_LINK_SPEED_100M_HD) 5437 speed = RTE_ETH_SPEED_NUM_100M; 5438 if (link_speeds & RTE_ETH_LINK_SPEED_1G) 5439 speed = RTE_ETH_SPEED_NUM_1G; 5440 if (link_speeds & RTE_ETH_LINK_SPEED_10G) 5441 speed = RTE_ETH_SPEED_NUM_10G; 5442 if (link_speeds & RTE_ETH_LINK_SPEED_25G) 5443 speed = RTE_ETH_SPEED_NUM_25G; 5444 if (link_speeds & RTE_ETH_LINK_SPEED_40G) 5445 speed = RTE_ETH_SPEED_NUM_40G; 5446 if (link_speeds & RTE_ETH_LINK_SPEED_50G) 5447 speed = RTE_ETH_SPEED_NUM_50G; 5448 if (link_speeds & RTE_ETH_LINK_SPEED_100G) 5449 speed = RTE_ETH_SPEED_NUM_100G; 5450 if (link_speeds & RTE_ETH_LINK_SPEED_200G) 5451 speed = RTE_ETH_SPEED_NUM_200G; 5452 5453 return speed; 5454 } 5455 5456 static uint8_t 5457 hns3_get_link_duplex(uint32_t link_speeds) 5458 { 5459 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 5460 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 5461 return RTE_ETH_LINK_HALF_DUPLEX; 5462 else 5463 return RTE_ETH_LINK_FULL_DUPLEX; 5464 } 5465 5466 static int 5467 hns3_set_copper_port_link_speed(struct hns3_hw *hw, 5468 struct hns3_set_link_speed_cfg *cfg) 5469 { 5470 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; 5471 struct hns3_phy_params_bd0_cmd *req; 5472 uint16_t i; 5473 5474 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { 5475 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, 5476 false); 5477 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 5478 } 5479 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false); 5480 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; 5481 req->autoneg = cfg->autoneg; 5482 5483 /* 5484 * The full speed capability is used to negotiate when 5485 * auto-negotiation is enabled. 5486 */ 5487 if (cfg->autoneg) { 5488 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT | 5489 HNS3_PHY_LINK_SPEED_10M_HD_BIT | 5490 HNS3_PHY_LINK_SPEED_100M_BIT | 5491 HNS3_PHY_LINK_SPEED_100M_HD_BIT | 5492 HNS3_PHY_LINK_SPEED_1000M_BIT; 5493 } else { 5494 req->speed = cfg->speed; 5495 req->duplex = cfg->duplex; 5496 } 5497 5498 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); 5499 } 5500 5501 static int 5502 hns3_set_autoneg(struct hns3_hw *hw, bool enable) 5503 { 5504 struct hns3_config_auto_neg_cmd *req; 5505 struct hns3_cmd_desc desc; 5506 uint32_t flag = 0; 5507 int ret; 5508 5509 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false); 5510 5511 req = (struct hns3_config_auto_neg_cmd *)desc.data; 5512 if (enable) 5513 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1); 5514 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag); 5515 5516 ret = hns3_cmd_send(hw, &desc, 1); 5517 if (ret) 5518 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret); 5519 5520 return ret; 5521 } 5522 5523 static int 5524 hns3_set_fiber_port_link_speed(struct hns3_hw *hw, 5525 struct hns3_set_link_speed_cfg *cfg) 5526 { 5527 int ret; 5528 5529 if (hw->mac.support_autoneg) { 5530 ret = hns3_set_autoneg(hw, cfg->autoneg); 5531 if (ret) { 5532 hns3_err(hw, "failed to configure auto-negotiation."); 5533 return ret; 5534 } 5535 5536 /* 5537 * To enable auto-negotiation, we only need to open the switch 5538 * of auto-negotiation, then firmware sets all speed 5539 * capabilities. 5540 */ 5541 if (cfg->autoneg) 5542 return 0; 5543 } 5544 5545 /* 5546 * Some hardware doesn't support auto-negotiation, but users may not 5547 * configure link_speeds (default 0), which means auto-negotiation. 5548 * In this case, a warning message need to be printed, instead of 5549 * an error. 5550 */ 5551 if (cfg->autoneg) { 5552 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!"); 5553 return 0; 5554 } 5555 5556 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); 5557 } 5558 5559 static int 5560 hns3_set_port_link_speed(struct hns3_hw *hw, 5561 struct hns3_set_link_speed_cfg *cfg) 5562 { 5563 int ret; 5564 5565 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) { 5566 #if defined(RTE_HNS3_ONLY_1630_FPGA) 5567 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 5568 if (pf->is_tmp_phy) 5569 return 0; 5570 #endif 5571 5572 ret = hns3_set_copper_port_link_speed(hw, cfg); 5573 if (ret) { 5574 hns3_err(hw, "failed to set copper port link speed," 5575 "ret = %d.", ret); 5576 return ret; 5577 } 5578 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) { 5579 ret = hns3_set_fiber_port_link_speed(hw, cfg); 5580 if (ret) { 5581 hns3_err(hw, "failed to set fiber port link speed," 5582 "ret = %d.", ret); 5583 return ret; 5584 } 5585 } 5586 5587 return 0; 5588 } 5589 5590 static int 5591 hns3_apply_link_speed(struct hns3_hw *hw) 5592 { 5593 struct rte_eth_conf *conf = &hw->data->dev_conf; 5594 struct hns3_set_link_speed_cfg cfg; 5595 5596 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg)); 5597 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ? 5598 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 5599 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) { 5600 cfg.speed = hns3_get_link_speed(conf->link_speeds); 5601 cfg.duplex = hns3_get_link_duplex(conf->link_speeds); 5602 } 5603 5604 return hns3_set_port_link_speed(hw, &cfg); 5605 } 5606 5607 static int 5608 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 5609 { 5610 struct hns3_hw *hw = &hns->hw; 5611 bool link_en; 5612 int ret; 5613 5614 ret = hns3_update_queue_map_configure(hns); 5615 if (ret) { 5616 hns3_err(hw, "failed to update queue mapping configuration, ret = %d", 5617 ret); 5618 return ret; 5619 } 5620 5621 /* Note: hns3_tm_conf_update must be called after configuring DCB. */ 5622 ret = hns3_tm_conf_update(hw); 5623 if (ret) { 5624 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret); 5625 return ret; 5626 } 5627 5628 hns3_enable_rxd_adv_layout(hw); 5629 5630 ret = hns3_init_queues(hns, reset_queue); 5631 if (ret) { 5632 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 5633 return ret; 5634 } 5635 5636 link_en = hw->set_link_down ? false : true; 5637 ret = hns3_cfg_mac_mode(hw, link_en); 5638 if (ret) { 5639 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 5640 goto err_config_mac_mode; 5641 } 5642 5643 ret = hns3_apply_link_speed(hw); 5644 if (ret) 5645 goto err_set_link_speed; 5646 5647 return 0; 5648 5649 err_set_link_speed: 5650 (void)hns3_cfg_mac_mode(hw, false); 5651 5652 err_config_mac_mode: 5653 hns3_dev_release_mbufs(hns); 5654 /* 5655 * Here is exception handling, hns3_reset_all_tqps will have the 5656 * corresponding error message if it is handled incorrectly, so it is 5657 * not necessary to check hns3_reset_all_tqps return value, here keep 5658 * ret as the error code causing the exception. 5659 */ 5660 (void)hns3_reset_all_tqps(hns); 5661 return ret; 5662 } 5663 5664 static int 5665 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 5666 { 5667 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5668 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5669 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5670 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; 5671 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5672 uint32_t intr_vector; 5673 uint16_t q_id; 5674 int ret; 5675 5676 /* 5677 * hns3 needs a separate interrupt to be used as event interrupt which 5678 * could not be shared with task queue pair, so KERNEL drivers need 5679 * support multiple interrupt vectors. 5680 */ 5681 if (dev->data->dev_conf.intr_conf.rxq == 0 || 5682 !rte_intr_cap_multiple(intr_handle)) 5683 return 0; 5684 5685 rte_intr_disable(intr_handle); 5686 intr_vector = hw->used_rx_queues; 5687 /* creates event fd for each intr vector when MSIX is used */ 5688 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5689 return -EINVAL; 5690 5691 if (intr_handle->intr_vec == NULL) { 5692 intr_handle->intr_vec = 5693 rte_zmalloc("intr_vec", 5694 hw->used_rx_queues * sizeof(int), 0); 5695 if (intr_handle->intr_vec == NULL) { 5696 hns3_err(hw, "failed to allocate %u rx_queues intr_vec", 5697 hw->used_rx_queues); 5698 ret = -ENOMEM; 5699 goto alloc_intr_vec_error; 5700 } 5701 } 5702 5703 if (rte_intr_allow_others(intr_handle)) { 5704 vec = RTE_INTR_VEC_RXTX_OFFSET; 5705 base = RTE_INTR_VEC_RXTX_OFFSET; 5706 } 5707 5708 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5709 ret = hns3_bind_ring_with_vector(hw, vec, true, 5710 HNS3_RING_TYPE_RX, q_id); 5711 if (ret) 5712 goto bind_vector_error; 5713 intr_handle->intr_vec[q_id] = vec; 5714 /* 5715 * If there are not enough efds (e.g. not enough interrupt), 5716 * remaining queues will be bond to the last interrupt. 5717 */ 5718 if (vec < base + intr_handle->nb_efd - 1) 5719 vec++; 5720 } 5721 rte_intr_enable(intr_handle); 5722 return 0; 5723 5724 bind_vector_error: 5725 rte_free(intr_handle->intr_vec); 5726 intr_handle->intr_vec = NULL; 5727 alloc_intr_vec_error: 5728 rte_intr_efd_disable(intr_handle); 5729 return ret; 5730 } 5731 5732 static int 5733 hns3_restore_rx_interrupt(struct hns3_hw *hw) 5734 { 5735 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 5736 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5737 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5738 uint16_t q_id; 5739 int ret; 5740 5741 if (dev->data->dev_conf.intr_conf.rxq == 0) 5742 return 0; 5743 5744 if (rte_intr_dp_is_en(intr_handle)) { 5745 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5746 ret = hns3_bind_ring_with_vector(hw, 5747 intr_handle->intr_vec[q_id], true, 5748 HNS3_RING_TYPE_RX, q_id); 5749 if (ret) 5750 return ret; 5751 } 5752 } 5753 5754 return 0; 5755 } 5756 5757 static void 5758 hns3_restore_filter(struct rte_eth_dev *dev) 5759 { 5760 hns3_restore_rss_filter(dev); 5761 } 5762 5763 static int 5764 hns3_dev_start(struct rte_eth_dev *dev) 5765 { 5766 struct hns3_adapter *hns = dev->data->dev_private; 5767 struct hns3_hw *hw = &hns->hw; 5768 bool old_state = hw->set_link_down; 5769 int ret; 5770 5771 PMD_INIT_FUNC_TRACE(); 5772 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) 5773 return -EBUSY; 5774 5775 rte_spinlock_lock(&hw->lock); 5776 hw->adapter_state = HNS3_NIC_STARTING; 5777 5778 /* 5779 * If the dev_set_link_down() API has been called, the "set_link_down" 5780 * flag can be cleared by dev_start() API. In addition, the flag should 5781 * also be cleared before calling hns3_do_start() so that MAC can be 5782 * enabled in dev_start stage. 5783 */ 5784 hw->set_link_down = false; 5785 ret = hns3_do_start(hns, true); 5786 if (ret) 5787 goto do_start_fail; 5788 5789 ret = hns3_map_rx_interrupt(dev); 5790 if (ret) 5791 goto map_rx_inter_err; 5792 5793 /* 5794 * There are three register used to control the status of a TQP 5795 * (contains a pair of Tx queue and Rx queue) in the new version network 5796 * engine. One is used to control the enabling of Tx queue, the other is 5797 * used to control the enabling of Rx queue, and the last is the master 5798 * switch used to control the enabling of the tqp. The Tx register and 5799 * TQP register must be enabled at the same time to enable a Tx queue. 5800 * The same applies to the Rx queue. For the older network engine, this 5801 * function only refresh the enabled flag, and it is used to update the 5802 * status of queue in the dpdk framework. 5803 */ 5804 ret = hns3_start_all_txqs(dev); 5805 if (ret) 5806 goto map_rx_inter_err; 5807 5808 ret = hns3_start_all_rxqs(dev); 5809 if (ret) 5810 goto start_all_rxqs_fail; 5811 5812 hw->adapter_state = HNS3_NIC_STARTED; 5813 rte_spinlock_unlock(&hw->lock); 5814 5815 hns3_rx_scattered_calc(dev); 5816 hns3_set_rxtx_function(dev); 5817 hns3_mp_req_start_rxtx(dev); 5818 5819 hns3_restore_filter(dev); 5820 5821 /* Enable interrupt of all rx queues before enabling queues */ 5822 hns3_dev_all_rx_queue_intr_enable(hw, true); 5823 5824 /* 5825 * After finished the initialization, enable tqps to receive/transmit 5826 * packets and refresh all queue status. 5827 */ 5828 hns3_start_tqps(hw); 5829 5830 hns3_tm_dev_start_proc(hw); 5831 5832 if (dev->data->dev_conf.intr_conf.lsc != 0) 5833 hns3_dev_link_update(dev, 0); 5834 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 5835 5836 hns3_info(hw, "hns3 dev start successful!"); 5837 5838 return 0; 5839 5840 start_all_rxqs_fail: 5841 hns3_stop_all_txqs(dev); 5842 map_rx_inter_err: 5843 (void)hns3_do_stop(hns); 5844 do_start_fail: 5845 hw->set_link_down = old_state; 5846 hw->adapter_state = HNS3_NIC_CONFIGURED; 5847 rte_spinlock_unlock(&hw->lock); 5848 5849 return ret; 5850 } 5851 5852 static int 5853 hns3_do_stop(struct hns3_adapter *hns) 5854 { 5855 struct hns3_hw *hw = &hns->hw; 5856 int ret; 5857 5858 /* 5859 * The "hns3_do_stop" function will also be called by .stop_service to 5860 * prepare reset. At the time of global or IMP reset, the command cannot 5861 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be 5862 * accessed during the reset process. So the mbuf can not be released 5863 * during reset and is required to be released after the reset is 5864 * completed. 5865 */ 5866 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) 5867 hns3_dev_release_mbufs(hns); 5868 5869 ret = hns3_cfg_mac_mode(hw, false); 5870 if (ret) 5871 return ret; 5872 hw->mac.link_status = RTE_ETH_LINK_DOWN; 5873 5874 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { 5875 hns3_configure_all_mac_addr(hns, true); 5876 ret = hns3_reset_all_tqps(hns); 5877 if (ret) { 5878 hns3_err(hw, "failed to reset all queues ret = %d.", 5879 ret); 5880 return ret; 5881 } 5882 } 5883 5884 return 0; 5885 } 5886 5887 static void 5888 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 5889 { 5890 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5891 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5892 struct hns3_adapter *hns = dev->data->dev_private; 5893 struct hns3_hw *hw = &hns->hw; 5894 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 5895 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 5896 uint16_t q_id; 5897 5898 if (dev->data->dev_conf.intr_conf.rxq == 0) 5899 return; 5900 5901 /* unmap the ring with vector */ 5902 if (rte_intr_allow_others(intr_handle)) { 5903 vec = RTE_INTR_VEC_RXTX_OFFSET; 5904 base = RTE_INTR_VEC_RXTX_OFFSET; 5905 } 5906 if (rte_intr_dp_is_en(intr_handle)) { 5907 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 5908 (void)hns3_bind_ring_with_vector(hw, vec, false, 5909 HNS3_RING_TYPE_RX, 5910 q_id); 5911 if (vec < base + intr_handle->nb_efd - 1) 5912 vec++; 5913 } 5914 } 5915 /* Clean datapath event and queue/vec mapping */ 5916 rte_intr_efd_disable(intr_handle); 5917 if (intr_handle->intr_vec) { 5918 rte_free(intr_handle->intr_vec); 5919 intr_handle->intr_vec = NULL; 5920 } 5921 } 5922 5923 static int 5924 hns3_dev_stop(struct rte_eth_dev *dev) 5925 { 5926 struct hns3_adapter *hns = dev->data->dev_private; 5927 struct hns3_hw *hw = &hns->hw; 5928 5929 PMD_INIT_FUNC_TRACE(); 5930 dev->data->dev_started = 0; 5931 5932 hw->adapter_state = HNS3_NIC_STOPPING; 5933 hns3_set_rxtx_function(dev); 5934 rte_wmb(); 5935 /* Disable datapath on secondary process. */ 5936 hns3_mp_req_stop_rxtx(dev); 5937 /* Prevent crashes when queues are still in use. */ 5938 rte_delay_ms(hw->cfg_max_queues); 5939 5940 rte_spinlock_lock(&hw->lock); 5941 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { 5942 hns3_tm_dev_stop_proc(hw); 5943 hns3_config_mac_tnl_int(hw, false); 5944 hns3_stop_tqps(hw); 5945 hns3_do_stop(hns); 5946 hns3_unmap_rx_interrupt(dev); 5947 hw->adapter_state = HNS3_NIC_CONFIGURED; 5948 } 5949 hns3_rx_scattered_reset(dev); 5950 rte_eal_alarm_cancel(hns3_service_handler, dev); 5951 hns3_stop_report_lse(dev); 5952 rte_spinlock_unlock(&hw->lock); 5953 5954 return 0; 5955 } 5956 5957 static int 5958 hns3_dev_close(struct rte_eth_dev *eth_dev) 5959 { 5960 struct hns3_adapter *hns = eth_dev->data->dev_private; 5961 struct hns3_hw *hw = &hns->hw; 5962 int ret = 0; 5963 5964 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5965 return 0; 5966 5967 if (hw->adapter_state == HNS3_NIC_STARTED) 5968 ret = hns3_dev_stop(eth_dev); 5969 5970 hw->adapter_state = HNS3_NIC_CLOSING; 5971 hns3_reset_abort(hns); 5972 hw->adapter_state = HNS3_NIC_CLOSED; 5973 5974 hns3_configure_all_mc_mac_addr(hns, true); 5975 hns3_remove_all_vlan_table(hns); 5976 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5977 hns3_uninit_pf(eth_dev); 5978 hns3_free_all_queues(eth_dev); 5979 rte_free(hw->reset.wait_data); 5980 hns3_mp_uninit_primary(); 5981 hns3_warn(hw, "Close port %u finished", hw->data->port_id); 5982 5983 return ret; 5984 } 5985 5986 static void 5987 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause, 5988 bool *tx_pause) 5989 { 5990 struct hns3_mac *mac = &hw->mac; 5991 uint32_t advertising = mac->advertising; 5992 uint32_t lp_advertising = mac->lp_advertising; 5993 *rx_pause = false; 5994 *tx_pause = false; 5995 5996 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) { 5997 *rx_pause = true; 5998 *tx_pause = true; 5999 } else if (advertising & lp_advertising & 6000 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) { 6001 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 6002 *rx_pause = true; 6003 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) 6004 *tx_pause = true; 6005 } 6006 } 6007 6008 static enum hns3_fc_mode 6009 hns3_get_autoneg_fc_mode(struct hns3_hw *hw) 6010 { 6011 enum hns3_fc_mode current_mode; 6012 bool rx_pause = false; 6013 bool tx_pause = false; 6014 6015 switch (hw->mac.media_type) { 6016 case HNS3_MEDIA_TYPE_COPPER: 6017 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause); 6018 break; 6019 6020 /* 6021 * Flow control auto-negotiation is not supported for fiber and 6022 * backpalne media type. 6023 */ 6024 case HNS3_MEDIA_TYPE_FIBER: 6025 case HNS3_MEDIA_TYPE_BACKPLANE: 6026 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled."); 6027 current_mode = hw->requested_fc_mode; 6028 goto out; 6029 default: 6030 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).", 6031 hw->mac.media_type); 6032 current_mode = HNS3_FC_NONE; 6033 goto out; 6034 } 6035 6036 if (rx_pause && tx_pause) 6037 current_mode = HNS3_FC_FULL; 6038 else if (rx_pause) 6039 current_mode = HNS3_FC_RX_PAUSE; 6040 else if (tx_pause) 6041 current_mode = HNS3_FC_TX_PAUSE; 6042 else 6043 current_mode = HNS3_FC_NONE; 6044 6045 out: 6046 return current_mode; 6047 } 6048 6049 static enum hns3_fc_mode 6050 hns3_get_current_fc_mode(struct rte_eth_dev *dev) 6051 { 6052 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6053 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6054 struct hns3_mac *mac = &hw->mac; 6055 6056 /* 6057 * When the flow control mode is obtained, the device may not complete 6058 * auto-negotiation. It is necessary to wait for link establishment. 6059 */ 6060 (void)hns3_dev_link_update(dev, 1); 6061 6062 /* 6063 * If the link auto-negotiation of the nic is disabled, or the flow 6064 * control auto-negotiation is not supported, the forced flow control 6065 * mode is used. 6066 */ 6067 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg) 6068 return hw->requested_fc_mode; 6069 6070 return hns3_get_autoneg_fc_mode(hw); 6071 } 6072 6073 static int 6074 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6075 { 6076 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6077 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6078 enum hns3_fc_mode current_mode; 6079 6080 current_mode = hns3_get_current_fc_mode(dev); 6081 switch (current_mode) { 6082 case HNS3_FC_FULL: 6083 fc_conf->mode = RTE_ETH_FC_FULL; 6084 break; 6085 case HNS3_FC_TX_PAUSE: 6086 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 6087 break; 6088 case HNS3_FC_RX_PAUSE: 6089 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 6090 break; 6091 case HNS3_FC_NONE: 6092 default: 6093 fc_conf->mode = RTE_ETH_FC_NONE; 6094 break; 6095 } 6096 6097 fc_conf->pause_time = pf->pause_time; 6098 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0; 6099 6100 return 0; 6101 } 6102 6103 static int 6104 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) 6105 { 6106 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 6107 6108 if (!pf->support_fc_autoneg) { 6109 if (autoneg != 0) { 6110 hns3_err(hw, "unsupported fc auto-negotiation setting."); 6111 return -EOPNOTSUPP; 6112 } 6113 6114 /* 6115 * Flow control auto-negotiation of the NIC is not supported, 6116 * but other auto-negotiation features may be supported. 6117 */ 6118 if (autoneg != hw->mac.link_autoneg) { 6119 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); 6120 return -EOPNOTSUPP; 6121 } 6122 6123 return 0; 6124 } 6125 6126 /* 6127 * If flow control auto-negotiation of the NIC is supported, all 6128 * auto-negotiation features are supported. 6129 */ 6130 if (autoneg != hw->mac.link_autoneg) { 6131 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!"); 6132 return -EOPNOTSUPP; 6133 } 6134 6135 return 0; 6136 } 6137 6138 static int 6139 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6140 { 6141 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6142 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6143 int ret; 6144 6145 if (fc_conf->high_water || fc_conf->low_water || 6146 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 6147 hns3_err(hw, "Unsupported flow control settings specified, " 6148 "high_water(%u), low_water(%u), send_xon(%u) and " 6149 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6150 fc_conf->high_water, fc_conf->low_water, 6151 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 6152 return -EINVAL; 6153 } 6154 6155 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg); 6156 if (ret) 6157 return ret; 6158 6159 if (!fc_conf->pause_time) { 6160 hns3_err(hw, "Invalid pause time %u setting.", 6161 fc_conf->pause_time); 6162 return -EINVAL; 6163 } 6164 6165 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6166 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 6167 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 6168 "current_fc_status = %d", hw->current_fc_status); 6169 return -EOPNOTSUPP; 6170 } 6171 6172 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) { 6173 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported."); 6174 return -EOPNOTSUPP; 6175 } 6176 6177 rte_spinlock_lock(&hw->lock); 6178 ret = hns3_fc_enable(dev, fc_conf); 6179 rte_spinlock_unlock(&hw->lock); 6180 6181 return ret; 6182 } 6183 6184 static int 6185 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 6186 struct rte_eth_pfc_conf *pfc_conf) 6187 { 6188 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6189 int ret; 6190 6191 if (!hns3_dev_get_support(hw, DCB)) { 6192 hns3_err(hw, "This port does not support dcb configurations."); 6193 return -EOPNOTSUPP; 6194 } 6195 6196 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 6197 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 6198 hns3_err(hw, "Unsupported flow control settings specified, " 6199 "high_water(%u), low_water(%u), send_xon(%u) and " 6200 "mac_ctrl_frame_fwd(%u) must be set to '0'", 6201 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 6202 pfc_conf->fc.send_xon, 6203 pfc_conf->fc.mac_ctrl_frame_fwd); 6204 return -EINVAL; 6205 } 6206 if (pfc_conf->fc.autoneg) { 6207 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 6208 return -EINVAL; 6209 } 6210 if (pfc_conf->fc.pause_time == 0) { 6211 hns3_err(hw, "Invalid pause time %u setting.", 6212 pfc_conf->fc.pause_time); 6213 return -EINVAL; 6214 } 6215 6216 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 6217 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 6218 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 6219 "current_fc_status = %d", hw->current_fc_status); 6220 return -EOPNOTSUPP; 6221 } 6222 6223 rte_spinlock_lock(&hw->lock); 6224 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 6225 rte_spinlock_unlock(&hw->lock); 6226 6227 return ret; 6228 } 6229 6230 static int 6231 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 6232 { 6233 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6234 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 6235 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 6236 int i; 6237 6238 rte_spinlock_lock(&hw->lock); 6239 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 6240 dcb_info->nb_tcs = pf->local_max_tc; 6241 else 6242 dcb_info->nb_tcs = 1; 6243 6244 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 6245 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 6246 for (i = 0; i < dcb_info->nb_tcs; i++) 6247 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 6248 6249 for (i = 0; i < hw->num_tc; i++) { 6250 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 6251 dcb_info->tc_queue.tc_txq[0][i].base = 6252 hw->tc_queue[i].tqp_offset; 6253 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 6254 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 6255 hw->tc_queue[i].tqp_count; 6256 } 6257 rte_spinlock_unlock(&hw->lock); 6258 6259 return 0; 6260 } 6261 6262 static int 6263 hns3_reinit_dev(struct hns3_adapter *hns) 6264 { 6265 struct hns3_hw *hw = &hns->hw; 6266 int ret; 6267 6268 ret = hns3_cmd_init(hw); 6269 if (ret) { 6270 hns3_err(hw, "Failed to init cmd: %d", ret); 6271 return ret; 6272 } 6273 6274 ret = hns3_reset_all_tqps(hns); 6275 if (ret) { 6276 hns3_err(hw, "Failed to reset all queues: %d", ret); 6277 return ret; 6278 } 6279 6280 ret = hns3_init_hardware(hns); 6281 if (ret) { 6282 hns3_err(hw, "Failed to init hardware: %d", ret); 6283 return ret; 6284 } 6285 6286 ret = hns3_enable_hw_error_intr(hns, true); 6287 if (ret) { 6288 hns3_err(hw, "fail to enable hw error interrupts: %d", 6289 ret); 6290 return ret; 6291 } 6292 hns3_info(hw, "Reset done, driver initialization finished."); 6293 6294 return 0; 6295 } 6296 6297 static bool 6298 is_pf_reset_done(struct hns3_hw *hw) 6299 { 6300 uint32_t val, reg, reg_bit; 6301 6302 switch (hw->reset.level) { 6303 case HNS3_IMP_RESET: 6304 reg = HNS3_GLOBAL_RESET_REG; 6305 reg_bit = HNS3_IMP_RESET_BIT; 6306 break; 6307 case HNS3_GLOBAL_RESET: 6308 reg = HNS3_GLOBAL_RESET_REG; 6309 reg_bit = HNS3_GLOBAL_RESET_BIT; 6310 break; 6311 case HNS3_FUNC_RESET: 6312 reg = HNS3_FUN_RST_ING; 6313 reg_bit = HNS3_FUN_RST_ING_B; 6314 break; 6315 case HNS3_FLR_RESET: 6316 default: 6317 hns3_err(hw, "Wait for unsupported reset level: %d", 6318 hw->reset.level); 6319 return true; 6320 } 6321 val = hns3_read_dev(hw, reg); 6322 if (hns3_get_bit(val, reg_bit)) 6323 return false; 6324 else 6325 return true; 6326 } 6327 6328 bool 6329 hns3_is_reset_pending(struct hns3_adapter *hns) 6330 { 6331 struct hns3_hw *hw = &hns->hw; 6332 enum hns3_reset_level reset; 6333 6334 hns3_check_event_cause(hns, NULL); 6335 reset = hns3_get_reset_level(hns, &hw->reset.pending); 6336 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6337 hw->reset.level < reset) { 6338 hns3_warn(hw, "High level reset %d is pending", reset); 6339 return true; 6340 } 6341 reset = hns3_get_reset_level(hns, &hw->reset.request); 6342 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && 6343 hw->reset.level < reset) { 6344 hns3_warn(hw, "High level reset %d is request", reset); 6345 return true; 6346 } 6347 return false; 6348 } 6349 6350 static int 6351 hns3_wait_hardware_ready(struct hns3_adapter *hns) 6352 { 6353 struct hns3_hw *hw = &hns->hw; 6354 struct hns3_wait_data *wait_data = hw->reset.wait_data; 6355 struct timeval tv; 6356 6357 if (wait_data->result == HNS3_WAIT_SUCCESS) 6358 return 0; 6359 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 6360 hns3_clock_gettime(&tv); 6361 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 6362 tv.tv_sec, tv.tv_usec); 6363 return -ETIME; 6364 } else if (wait_data->result == HNS3_WAIT_REQUEST) 6365 return -EAGAIN; 6366 6367 wait_data->hns = hns; 6368 wait_data->check_completion = is_pf_reset_done; 6369 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 6370 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms(); 6371 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 6372 wait_data->count = HNS3_RESET_WAIT_CNT; 6373 wait_data->result = HNS3_WAIT_REQUEST; 6374 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 6375 return -EAGAIN; 6376 } 6377 6378 static int 6379 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 6380 { 6381 struct hns3_cmd_desc desc; 6382 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 6383 6384 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 6385 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 6386 req->fun_reset_vfid = func_id; 6387 6388 return hns3_cmd_send(hw, &desc, 1); 6389 } 6390 6391 static int 6392 hns3_imp_reset_cmd(struct hns3_hw *hw) 6393 { 6394 struct hns3_cmd_desc desc; 6395 6396 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 6397 desc.data[0] = 0xeedd; 6398 6399 return hns3_cmd_send(hw, &desc, 1); 6400 } 6401 6402 static void 6403 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 6404 { 6405 struct hns3_hw *hw = &hns->hw; 6406 struct timeval tv; 6407 uint32_t val; 6408 6409 hns3_clock_gettime(&tv); 6410 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 6411 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 6412 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 6413 tv.tv_sec, tv.tv_usec); 6414 return; 6415 } 6416 6417 switch (reset_level) { 6418 case HNS3_IMP_RESET: 6419 hns3_imp_reset_cmd(hw); 6420 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 6421 tv.tv_sec, tv.tv_usec); 6422 break; 6423 case HNS3_GLOBAL_RESET: 6424 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 6425 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 6426 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 6427 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 6428 tv.tv_sec, tv.tv_usec); 6429 break; 6430 case HNS3_FUNC_RESET: 6431 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 6432 tv.tv_sec, tv.tv_usec); 6433 /* schedule again to check later */ 6434 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 6435 hns3_schedule_reset(hns); 6436 break; 6437 default: 6438 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 6439 return; 6440 } 6441 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 6442 } 6443 6444 static enum hns3_reset_level 6445 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 6446 { 6447 struct hns3_hw *hw = &hns->hw; 6448 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 6449 6450 /* Return the highest priority reset level amongst all */ 6451 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 6452 reset_level = HNS3_IMP_RESET; 6453 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 6454 reset_level = HNS3_GLOBAL_RESET; 6455 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 6456 reset_level = HNS3_FUNC_RESET; 6457 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 6458 reset_level = HNS3_FLR_RESET; 6459 6460 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 6461 return HNS3_NONE_RESET; 6462 6463 return reset_level; 6464 } 6465 6466 static void 6467 hns3_record_imp_error(struct hns3_adapter *hns) 6468 { 6469 struct hns3_hw *hw = &hns->hw; 6470 uint32_t reg_val; 6471 6472 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6473 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 6474 hns3_warn(hw, "Detected IMP RD poison!"); 6475 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 6476 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6477 } 6478 6479 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 6480 hns3_warn(hw, "Detected IMP CMDQ error!"); 6481 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 6482 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 6483 } 6484 } 6485 6486 static int 6487 hns3_prepare_reset(struct hns3_adapter *hns) 6488 { 6489 struct hns3_hw *hw = &hns->hw; 6490 uint32_t reg_val; 6491 int ret; 6492 6493 switch (hw->reset.level) { 6494 case HNS3_FUNC_RESET: 6495 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 6496 if (ret) 6497 return ret; 6498 6499 /* 6500 * After performaning pf reset, it is not necessary to do the 6501 * mailbox handling or send any command to firmware, because 6502 * any mailbox handling or command to firmware is only valid 6503 * after hns3_cmd_init is called. 6504 */ 6505 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); 6506 hw->reset.stats.request_cnt++; 6507 break; 6508 case HNS3_IMP_RESET: 6509 hns3_record_imp_error(hns); 6510 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 6511 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 6512 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 6513 break; 6514 default: 6515 break; 6516 } 6517 return 0; 6518 } 6519 6520 static int 6521 hns3_set_rst_done(struct hns3_hw *hw) 6522 { 6523 struct hns3_pf_rst_done_cmd *req; 6524 struct hns3_cmd_desc desc; 6525 6526 req = (struct hns3_pf_rst_done_cmd *)desc.data; 6527 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 6528 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 6529 return hns3_cmd_send(hw, &desc, 1); 6530 } 6531 6532 static int 6533 hns3_stop_service(struct hns3_adapter *hns) 6534 { 6535 struct hns3_hw *hw = &hns->hw; 6536 struct rte_eth_dev *eth_dev; 6537 6538 eth_dev = &rte_eth_devices[hw->data->port_id]; 6539 hw->mac.link_status = RTE_ETH_LINK_DOWN; 6540 if (hw->adapter_state == HNS3_NIC_STARTED) { 6541 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 6542 hns3_update_linkstatus_and_event(hw, false); 6543 } 6544 6545 hns3_set_rxtx_function(eth_dev); 6546 rte_wmb(); 6547 /* Disable datapath on secondary process. */ 6548 hns3_mp_req_stop_rxtx(eth_dev); 6549 rte_delay_ms(hw->cfg_max_queues); 6550 6551 rte_spinlock_lock(&hw->lock); 6552 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 6553 hw->adapter_state == HNS3_NIC_STOPPING) { 6554 hns3_enable_all_queues(hw, false); 6555 hns3_do_stop(hns); 6556 hw->reset.mbuf_deferred_free = true; 6557 } else 6558 hw->reset.mbuf_deferred_free = false; 6559 6560 /* 6561 * It is cumbersome for hardware to pick-and-choose entries for deletion 6562 * from table space. Hence, for function reset software intervention is 6563 * required to delete the entries 6564 */ 6565 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) 6566 hns3_configure_all_mc_mac_addr(hns, true); 6567 rte_spinlock_unlock(&hw->lock); 6568 6569 return 0; 6570 } 6571 6572 static int 6573 hns3_start_service(struct hns3_adapter *hns) 6574 { 6575 struct hns3_hw *hw = &hns->hw; 6576 struct rte_eth_dev *eth_dev; 6577 6578 if (hw->reset.level == HNS3_IMP_RESET || 6579 hw->reset.level == HNS3_GLOBAL_RESET) 6580 hns3_set_rst_done(hw); 6581 eth_dev = &rte_eth_devices[hw->data->port_id]; 6582 hns3_set_rxtx_function(eth_dev); 6583 hns3_mp_req_start_rxtx(eth_dev); 6584 if (hw->adapter_state == HNS3_NIC_STARTED) { 6585 /* 6586 * This API parent function already hold the hns3_hw.lock, the 6587 * hns3_service_handler may report lse, in bonding application 6588 * it will call driver's ops which may acquire the hns3_hw.lock 6589 * again, thus lead to deadlock. 6590 * We defer calls hns3_service_handler to avoid the deadlock. 6591 */ 6592 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, 6593 hns3_service_handler, eth_dev); 6594 6595 /* Enable interrupt of all rx queues before enabling queues */ 6596 hns3_dev_all_rx_queue_intr_enable(hw, true); 6597 /* 6598 * Enable state of each rxq and txq will be recovered after 6599 * reset, so we need to restore them before enable all tqps; 6600 */ 6601 hns3_restore_tqp_enable_state(hw); 6602 /* 6603 * When finished the initialization, enable queues to receive 6604 * and transmit packets. 6605 */ 6606 hns3_enable_all_queues(hw, true); 6607 } 6608 6609 return 0; 6610 } 6611 6612 static int 6613 hns3_restore_conf(struct hns3_adapter *hns) 6614 { 6615 struct hns3_hw *hw = &hns->hw; 6616 int ret; 6617 6618 ret = hns3_configure_all_mac_addr(hns, false); 6619 if (ret) 6620 return ret; 6621 6622 ret = hns3_configure_all_mc_mac_addr(hns, false); 6623 if (ret) 6624 goto err_mc_mac; 6625 6626 ret = hns3_dev_promisc_restore(hns); 6627 if (ret) 6628 goto err_promisc; 6629 6630 ret = hns3_restore_vlan_table(hns); 6631 if (ret) 6632 goto err_promisc; 6633 6634 ret = hns3_restore_vlan_conf(hns); 6635 if (ret) 6636 goto err_promisc; 6637 6638 ret = hns3_restore_all_fdir_filter(hns); 6639 if (ret) 6640 goto err_promisc; 6641 6642 ret = hns3_restore_ptp(hns); 6643 if (ret) 6644 goto err_promisc; 6645 6646 ret = hns3_restore_rx_interrupt(hw); 6647 if (ret) 6648 goto err_promisc; 6649 6650 ret = hns3_restore_gro_conf(hw); 6651 if (ret) 6652 goto err_promisc; 6653 6654 ret = hns3_restore_fec(hw); 6655 if (ret) 6656 goto err_promisc; 6657 6658 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 6659 ret = hns3_do_start(hns, false); 6660 if (ret) 6661 goto err_promisc; 6662 hns3_info(hw, "hns3 dev restart successful!"); 6663 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 6664 hw->adapter_state = HNS3_NIC_CONFIGURED; 6665 return 0; 6666 6667 err_promisc: 6668 hns3_configure_all_mc_mac_addr(hns, true); 6669 err_mc_mac: 6670 hns3_configure_all_mac_addr(hns, true); 6671 return ret; 6672 } 6673 6674 static void 6675 hns3_reset_service(void *param) 6676 { 6677 struct hns3_adapter *hns = (struct hns3_adapter *)param; 6678 struct hns3_hw *hw = &hns->hw; 6679 enum hns3_reset_level reset_level; 6680 struct timeval tv_delta; 6681 struct timeval tv_start; 6682 struct timeval tv; 6683 uint64_t msec; 6684 int ret; 6685 6686 /* 6687 * The interrupt is not triggered within the delay time. 6688 * The interrupt may have been lost. It is necessary to handle 6689 * the interrupt to recover from the error. 6690 */ 6691 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 6692 SCHEDULE_DEFERRED) { 6693 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 6694 __ATOMIC_RELAXED); 6695 hns3_err(hw, "Handling interrupts in delayed tasks"); 6696 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 6697 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6698 if (reset_level == HNS3_NONE_RESET) { 6699 hns3_err(hw, "No reset level is set, try IMP reset"); 6700 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 6701 } 6702 } 6703 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); 6704 6705 /* 6706 * Check if there is any ongoing reset in the hardware. This status can 6707 * be checked from reset_pending. If there is then, we need to wait for 6708 * hardware to complete reset. 6709 * a. If we are able to figure out in reasonable time that hardware 6710 * has fully resetted then, we can proceed with driver, client 6711 * reset. 6712 * b. else, we can come back later to check this status so re-sched 6713 * now. 6714 */ 6715 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 6716 if (reset_level != HNS3_NONE_RESET) { 6717 hns3_clock_gettime(&tv_start); 6718 ret = hns3_reset_process(hns, reset_level); 6719 hns3_clock_gettime(&tv); 6720 timersub(&tv, &tv_start, &tv_delta); 6721 msec = hns3_clock_calctime_ms(&tv_delta); 6722 if (msec > HNS3_RESET_PROCESS_MS) 6723 hns3_err(hw, "%d handle long time delta %" PRIu64 6724 " ms time=%ld.%.6ld", 6725 hw->reset.level, msec, 6726 tv.tv_sec, tv.tv_usec); 6727 if (ret == -EAGAIN) 6728 return; 6729 } 6730 6731 /* Check if we got any *new* reset requests to be honored */ 6732 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 6733 if (reset_level != HNS3_NONE_RESET) 6734 hns3_msix_process(hns, reset_level); 6735 } 6736 6737 static unsigned int 6738 hns3_get_speed_capa_num(uint16_t device_id) 6739 { 6740 unsigned int num; 6741 6742 switch (device_id) { 6743 case HNS3_DEV_ID_25GE: 6744 case HNS3_DEV_ID_25GE_RDMA: 6745 num = 2; 6746 break; 6747 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6748 case HNS3_DEV_ID_200G_RDMA: 6749 num = 1; 6750 break; 6751 default: 6752 num = 0; 6753 break; 6754 } 6755 6756 return num; 6757 } 6758 6759 static int 6760 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 6761 uint16_t device_id) 6762 { 6763 switch (device_id) { 6764 case HNS3_DEV_ID_25GE: 6765 /* fallthrough */ 6766 case HNS3_DEV_ID_25GE_RDMA: 6767 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 6768 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 6769 6770 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 6771 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 6772 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 6773 break; 6774 case HNS3_DEV_ID_100G_RDMA_MACSEC: 6775 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 6776 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 6777 break; 6778 case HNS3_DEV_ID_200G_RDMA: 6779 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 6780 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 6781 break; 6782 default: 6783 return -ENOTSUP; 6784 } 6785 6786 return 0; 6787 } 6788 6789 static int 6790 hns3_fec_get_capability(struct rte_eth_dev *dev, 6791 struct rte_eth_fec_capa *speed_fec_capa, 6792 unsigned int num) 6793 { 6794 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6796 uint16_t device_id = pci_dev->id.device_id; 6797 unsigned int capa_num; 6798 int ret; 6799 6800 capa_num = hns3_get_speed_capa_num(device_id); 6801 if (capa_num == 0) { 6802 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 6803 device_id); 6804 return -ENOTSUP; 6805 } 6806 6807 if (speed_fec_capa == NULL || num < capa_num) 6808 return capa_num; 6809 6810 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 6811 if (ret) 6812 return -ENOTSUP; 6813 6814 return capa_num; 6815 } 6816 6817 static int 6818 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 6819 { 6820 struct hns3_config_fec_cmd *req; 6821 struct hns3_cmd_desc desc; 6822 int ret; 6823 6824 /* 6825 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported 6826 * in device of link speed 6827 * below 10 Gbps. 6828 */ 6829 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) { 6830 *state = 0; 6831 return 0; 6832 } 6833 6834 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 6835 req = (struct hns3_config_fec_cmd *)desc.data; 6836 ret = hns3_cmd_send(hw, &desc, 1); 6837 if (ret) { 6838 hns3_err(hw, "get current fec auto state failed, ret = %d", 6839 ret); 6840 return ret; 6841 } 6842 6843 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 6844 return 0; 6845 } 6846 6847 static int 6848 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) 6849 { 6850 struct hns3_sfp_info_cmd *resp; 6851 uint32_t tmp_fec_capa; 6852 uint8_t auto_state; 6853 struct hns3_cmd_desc desc; 6854 int ret; 6855 6856 /* 6857 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 6858 * configured FEC mode is returned. 6859 * If link is up, current FEC mode is returned. 6860 */ 6861 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) { 6862 ret = get_current_fec_auto_state(hw, &auto_state); 6863 if (ret) 6864 return ret; 6865 6866 if (auto_state == 0x1) { 6867 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 6868 return 0; 6869 } 6870 } 6871 6872 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true); 6873 resp = (struct hns3_sfp_info_cmd *)desc.data; 6874 resp->query_type = HNS3_ACTIVE_QUERY; 6875 6876 ret = hns3_cmd_send(hw, &desc, 1); 6877 if (ret == -EOPNOTSUPP) { 6878 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 6879 return ret; 6880 } else if (ret) { 6881 hns3_err(hw, "get FEC failed, ret = %d", ret); 6882 return ret; 6883 } 6884 6885 /* 6886 * FEC mode order defined in hns3 hardware is inconsistend with 6887 * that defined in the ethdev library. So the sequence needs 6888 * to be converted. 6889 */ 6890 switch (resp->active_fec) { 6891 case HNS3_HW_FEC_MODE_NOFEC: 6892 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6893 break; 6894 case HNS3_HW_FEC_MODE_BASER: 6895 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 6896 break; 6897 case HNS3_HW_FEC_MODE_RS: 6898 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 6899 break; 6900 default: 6901 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 6902 break; 6903 } 6904 6905 *fec_capa = tmp_fec_capa; 6906 return 0; 6907 } 6908 6909 static int 6910 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 6911 { 6912 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6913 6914 return hns3_fec_get_internal(hw, fec_capa); 6915 } 6916 6917 static int 6918 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 6919 { 6920 struct hns3_config_fec_cmd *req; 6921 struct hns3_cmd_desc desc; 6922 int ret; 6923 6924 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 6925 6926 req = (struct hns3_config_fec_cmd *)desc.data; 6927 switch (mode) { 6928 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 6929 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6930 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 6931 break; 6932 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 6933 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6934 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 6935 break; 6936 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 6937 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 6938 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 6939 break; 6940 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 6941 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 6942 break; 6943 default: 6944 return 0; 6945 } 6946 ret = hns3_cmd_send(hw, &desc, 1); 6947 if (ret) 6948 hns3_err(hw, "set fec mode failed, ret = %d", ret); 6949 6950 return ret; 6951 } 6952 6953 static uint32_t 6954 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 6955 { 6956 struct hns3_mac *mac = &hw->mac; 6957 uint32_t cur_capa; 6958 6959 switch (mac->link_speed) { 6960 case RTE_ETH_SPEED_NUM_10G: 6961 cur_capa = fec_capa[1].capa; 6962 break; 6963 case RTE_ETH_SPEED_NUM_25G: 6964 case RTE_ETH_SPEED_NUM_100G: 6965 case RTE_ETH_SPEED_NUM_200G: 6966 cur_capa = fec_capa[0].capa; 6967 break; 6968 default: 6969 cur_capa = 0; 6970 break; 6971 } 6972 6973 return cur_capa; 6974 } 6975 6976 static bool 6977 is_fec_mode_one_bit_set(uint32_t mode) 6978 { 6979 int cnt = 0; 6980 uint8_t i; 6981 6982 for (i = 0; i < sizeof(mode); i++) 6983 if (mode >> i & 0x1) 6984 cnt++; 6985 6986 return cnt == 1 ? true : false; 6987 } 6988 6989 static int 6990 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 6991 { 6992 #define FEC_CAPA_NUM 2 6993 struct hns3_adapter *hns = dev->data->dev_private; 6994 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 6995 struct hns3_pf *pf = &hns->pf; 6996 6997 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 6998 uint32_t cur_capa; 6999 uint32_t num = FEC_CAPA_NUM; 7000 int ret; 7001 7002 ret = hns3_fec_get_capability(dev, fec_capa, num); 7003 if (ret < 0) 7004 return ret; 7005 7006 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 7007 if (!is_fec_mode_one_bit_set(mode)) { 7008 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " 7009 "FEC mode should be only one bit set", mode); 7010 return -EINVAL; 7011 } 7012 7013 /* 7014 * Check whether the configured mode is within the FEC capability. 7015 * If not, the configured mode will not be supported. 7016 */ 7017 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 7018 if (!(cur_capa & mode)) { 7019 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 7020 return -EINVAL; 7021 } 7022 7023 rte_spinlock_lock(&hw->lock); 7024 ret = hns3_set_fec_hw(hw, mode); 7025 if (ret) { 7026 rte_spinlock_unlock(&hw->lock); 7027 return ret; 7028 } 7029 7030 pf->fec_mode = mode; 7031 rte_spinlock_unlock(&hw->lock); 7032 7033 return 0; 7034 } 7035 7036 static int 7037 hns3_restore_fec(struct hns3_hw *hw) 7038 { 7039 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7040 struct hns3_pf *pf = &hns->pf; 7041 uint32_t mode = pf->fec_mode; 7042 int ret; 7043 7044 ret = hns3_set_fec_hw(hw, mode); 7045 if (ret) 7046 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 7047 mode, ret); 7048 7049 return ret; 7050 } 7051 7052 static int 7053 hns3_query_dev_fec_info(struct hns3_hw *hw) 7054 { 7055 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 7056 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns); 7057 int ret; 7058 7059 ret = hns3_fec_get_internal(hw, &pf->fec_mode); 7060 if (ret) 7061 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 7062 7063 return ret; 7064 } 7065 7066 static bool 7067 hns3_optical_module_existed(struct hns3_hw *hw) 7068 { 7069 struct hns3_cmd_desc desc; 7070 bool existed; 7071 int ret; 7072 7073 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); 7074 ret = hns3_cmd_send(hw, &desc, 1); 7075 if (ret) { 7076 hns3_err(hw, 7077 "fail to get optical module exist state, ret = %d.\n", 7078 ret); 7079 return false; 7080 } 7081 existed = !!desc.data[0]; 7082 7083 return existed; 7084 } 7085 7086 static int 7087 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, 7088 uint32_t len, uint8_t *data) 7089 { 7090 #define HNS3_SFP_INFO_CMD_NUM 6 7091 #define HNS3_SFP_INFO_MAX_LEN \ 7092 (HNS3_SFP_INFO_BD0_LEN + \ 7093 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) 7094 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; 7095 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; 7096 uint16_t read_len; 7097 uint16_t copy_len; 7098 int ret; 7099 int i; 7100 7101 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7102 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, 7103 true); 7104 if (i < HNS3_SFP_INFO_CMD_NUM - 1) 7105 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 7106 } 7107 7108 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; 7109 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); 7110 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); 7111 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); 7112 7113 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); 7114 if (ret) { 7115 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", 7116 ret); 7117 return ret; 7118 } 7119 7120 /* The data format in BD0 is different with the others. */ 7121 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); 7122 memcpy(data, sfp_info_bd0->data, copy_len); 7123 read_len = copy_len; 7124 7125 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { 7126 if (read_len >= len) 7127 break; 7128 7129 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); 7130 memcpy(data + read_len, desc[i].data, copy_len); 7131 read_len += copy_len; 7132 } 7133 7134 return (int)read_len; 7135 } 7136 7137 static int 7138 hns3_get_module_eeprom(struct rte_eth_dev *dev, 7139 struct rte_dev_eeprom_info *info) 7140 { 7141 struct hns3_adapter *hns = dev->data->dev_private; 7142 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7143 uint32_t offset = info->offset; 7144 uint32_t len = info->length; 7145 uint8_t *data = info->data; 7146 uint32_t read_len = 0; 7147 7148 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) 7149 return -ENOTSUP; 7150 7151 if (!hns3_optical_module_existed(hw)) { 7152 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); 7153 return -EIO; 7154 } 7155 7156 while (read_len < len) { 7157 int ret; 7158 ret = hns3_get_module_eeprom_data(hw, offset + read_len, 7159 len - read_len, 7160 data + read_len); 7161 if (ret < 0) 7162 return -EIO; 7163 read_len += ret; 7164 } 7165 7166 return 0; 7167 } 7168 7169 static int 7170 hns3_get_module_info(struct rte_eth_dev *dev, 7171 struct rte_eth_dev_module_info *modinfo) 7172 { 7173 #define HNS3_SFF8024_ID_SFP 0x03 7174 #define HNS3_SFF8024_ID_QSFP_8438 0x0c 7175 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d 7176 #define HNS3_SFF8024_ID_QSFP28_8636 0x11 7177 #define HNS3_SFF_8636_V1_3 0x03 7178 struct hns3_adapter *hns = dev->data->dev_private; 7179 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 7180 struct rte_dev_eeprom_info info; 7181 struct hns3_sfp_type sfp_type; 7182 int ret; 7183 7184 memset(&sfp_type, 0, sizeof(sfp_type)); 7185 memset(&info, 0, sizeof(info)); 7186 info.data = (uint8_t *)&sfp_type; 7187 info.length = sizeof(sfp_type); 7188 ret = hns3_get_module_eeprom(dev, &info); 7189 if (ret) 7190 return ret; 7191 7192 switch (sfp_type.type) { 7193 case HNS3_SFF8024_ID_SFP: 7194 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7195 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7196 break; 7197 case HNS3_SFF8024_ID_QSFP_8438: 7198 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7199 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7200 break; 7201 case HNS3_SFF8024_ID_QSFP_8436_8636: 7202 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { 7203 modinfo->type = RTE_ETH_MODULE_SFF_8436; 7204 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; 7205 } else { 7206 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7207 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7208 } 7209 break; 7210 case HNS3_SFF8024_ID_QSFP28_8636: 7211 modinfo->type = RTE_ETH_MODULE_SFF_8636; 7212 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 7213 break; 7214 default: 7215 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", 7216 sfp_type.type, sfp_type.ext_type); 7217 return -EINVAL; 7218 } 7219 7220 return 0; 7221 } 7222 7223 void 7224 hns3_clock_gettime(struct timeval *tv) 7225 { 7226 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ 7227 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW 7228 #else 7229 #define CLOCK_TYPE CLOCK_MONOTONIC 7230 #endif 7231 #define NSEC_TO_USEC_DIV 1000 7232 7233 struct timespec spec; 7234 (void)clock_gettime(CLOCK_TYPE, &spec); 7235 7236 tv->tv_sec = spec.tv_sec; 7237 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; 7238 } 7239 7240 uint64_t 7241 hns3_clock_calctime_ms(struct timeval *tv) 7242 { 7243 return (uint64_t)tv->tv_sec * MSEC_PER_SEC + 7244 tv->tv_usec / USEC_PER_MSEC; 7245 } 7246 7247 uint64_t 7248 hns3_clock_gettime_ms(void) 7249 { 7250 struct timeval tv; 7251 7252 hns3_clock_gettime(&tv); 7253 return hns3_clock_calctime_ms(&tv); 7254 } 7255 7256 static int 7257 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) 7258 { 7259 uint32_t hint = HNS3_IO_FUNC_HINT_NONE; 7260 7261 RTE_SET_USED(key); 7262 7263 if (strcmp(value, "vec") == 0) 7264 hint = HNS3_IO_FUNC_HINT_VEC; 7265 else if (strcmp(value, "sve") == 0) 7266 hint = HNS3_IO_FUNC_HINT_SVE; 7267 else if (strcmp(value, "simple") == 0) 7268 hint = HNS3_IO_FUNC_HINT_SIMPLE; 7269 else if (strcmp(value, "common") == 0) 7270 hint = HNS3_IO_FUNC_HINT_COMMON; 7271 7272 /* If the hint is valid then update output parameters */ 7273 if (hint != HNS3_IO_FUNC_HINT_NONE) 7274 *(uint32_t *)extra_args = hint; 7275 7276 return 0; 7277 } 7278 7279 static const char * 7280 hns3_get_io_hint_func_name(uint32_t hint) 7281 { 7282 switch (hint) { 7283 case HNS3_IO_FUNC_HINT_VEC: 7284 return "vec"; 7285 case HNS3_IO_FUNC_HINT_SVE: 7286 return "sve"; 7287 case HNS3_IO_FUNC_HINT_SIMPLE: 7288 return "simple"; 7289 case HNS3_IO_FUNC_HINT_COMMON: 7290 return "common"; 7291 default: 7292 return "none"; 7293 } 7294 } 7295 7296 static int 7297 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) 7298 { 7299 uint64_t val; 7300 7301 RTE_SET_USED(key); 7302 7303 val = strtoull(value, NULL, 16); 7304 *(uint64_t *)extra_args = val; 7305 7306 return 0; 7307 } 7308 7309 static int 7310 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) 7311 { 7312 uint32_t val; 7313 7314 RTE_SET_USED(key); 7315 7316 val = strtoul(value, NULL, 10); 7317 7318 /* 7319 * 500ms is empirical value in process of mailbox communication. If 7320 * the delay value is set to one lower thanthe empirical value, mailbox 7321 * communication may fail. 7322 */ 7323 if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) 7324 *(uint16_t *)extra_args = val; 7325 7326 return 0; 7327 } 7328 7329 void 7330 hns3_parse_devargs(struct rte_eth_dev *dev) 7331 { 7332 uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS; 7333 struct hns3_adapter *hns = dev->data->dev_private; 7334 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7335 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; 7336 struct hns3_hw *hw = &hns->hw; 7337 uint64_t dev_caps_mask = 0; 7338 struct rte_kvargs *kvlist; 7339 7340 if (dev->device->devargs == NULL) 7341 return; 7342 7343 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); 7344 if (!kvlist) 7345 return; 7346 7347 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, 7348 &hns3_parse_io_hint_func, &rx_func_hint); 7349 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, 7350 &hns3_parse_io_hint_func, &tx_func_hint); 7351 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, 7352 &hns3_parse_dev_caps_mask, &dev_caps_mask); 7353 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, 7354 &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); 7355 7356 rte_kvargs_free(kvlist); 7357 7358 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7359 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, 7360 hns3_get_io_hint_func_name(rx_func_hint)); 7361 hns->rx_func_hint = rx_func_hint; 7362 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) 7363 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, 7364 hns3_get_io_hint_func_name(tx_func_hint)); 7365 hns->tx_func_hint = tx_func_hint; 7366 7367 if (dev_caps_mask != 0) 7368 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", 7369 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); 7370 hns->dev_caps_mask = dev_caps_mask; 7371 7372 if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS) 7373 hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS, 7374 mbx_time_limit_ms); 7375 hns->mbx_time_limit_ms = mbx_time_limit_ms; 7376 } 7377 7378 static const struct eth_dev_ops hns3_eth_dev_ops = { 7379 .dev_configure = hns3_dev_configure, 7380 .dev_start = hns3_dev_start, 7381 .dev_stop = hns3_dev_stop, 7382 .dev_close = hns3_dev_close, 7383 .promiscuous_enable = hns3_dev_promiscuous_enable, 7384 .promiscuous_disable = hns3_dev_promiscuous_disable, 7385 .allmulticast_enable = hns3_dev_allmulticast_enable, 7386 .allmulticast_disable = hns3_dev_allmulticast_disable, 7387 .mtu_set = hns3_dev_mtu_set, 7388 .stats_get = hns3_stats_get, 7389 .stats_reset = hns3_stats_reset, 7390 .xstats_get = hns3_dev_xstats_get, 7391 .xstats_get_names = hns3_dev_xstats_get_names, 7392 .xstats_reset = hns3_dev_xstats_reset, 7393 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 7394 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 7395 .dev_infos_get = hns3_dev_infos_get, 7396 .fw_version_get = hns3_fw_version_get, 7397 .rx_queue_setup = hns3_rx_queue_setup, 7398 .tx_queue_setup = hns3_tx_queue_setup, 7399 .rx_queue_release = hns3_dev_rx_queue_release, 7400 .tx_queue_release = hns3_dev_tx_queue_release, 7401 .rx_queue_start = hns3_dev_rx_queue_start, 7402 .rx_queue_stop = hns3_dev_rx_queue_stop, 7403 .tx_queue_start = hns3_dev_tx_queue_start, 7404 .tx_queue_stop = hns3_dev_tx_queue_stop, 7405 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 7406 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 7407 .rxq_info_get = hns3_rxq_info_get, 7408 .txq_info_get = hns3_txq_info_get, 7409 .rx_burst_mode_get = hns3_rx_burst_mode_get, 7410 .tx_burst_mode_get = hns3_tx_burst_mode_get, 7411 .flow_ctrl_get = hns3_flow_ctrl_get, 7412 .flow_ctrl_set = hns3_flow_ctrl_set, 7413 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 7414 .mac_addr_add = hns3_add_mac_addr, 7415 .mac_addr_remove = hns3_remove_mac_addr, 7416 .mac_addr_set = hns3_set_default_mac_addr, 7417 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 7418 .link_update = hns3_dev_link_update, 7419 .dev_set_link_up = hns3_dev_set_link_up, 7420 .dev_set_link_down = hns3_dev_set_link_down, 7421 .rss_hash_update = hns3_dev_rss_hash_update, 7422 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 7423 .reta_update = hns3_dev_rss_reta_update, 7424 .reta_query = hns3_dev_rss_reta_query, 7425 .flow_ops_get = hns3_dev_flow_ops_get, 7426 .vlan_filter_set = hns3_vlan_filter_set, 7427 .vlan_tpid_set = hns3_vlan_tpid_set, 7428 .vlan_offload_set = hns3_vlan_offload_set, 7429 .vlan_pvid_set = hns3_vlan_pvid_set, 7430 .get_reg = hns3_get_regs, 7431 .get_module_info = hns3_get_module_info, 7432 .get_module_eeprom = hns3_get_module_eeprom, 7433 .get_dcb_info = hns3_get_dcb_info, 7434 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 7435 .fec_get_capability = hns3_fec_get_capability, 7436 .fec_get = hns3_fec_get, 7437 .fec_set = hns3_fec_set, 7438 .tm_ops_get = hns3_tm_ops_get, 7439 .tx_done_cleanup = hns3_tx_done_cleanup, 7440 .timesync_enable = hns3_timesync_enable, 7441 .timesync_disable = hns3_timesync_disable, 7442 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp, 7443 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp, 7444 .timesync_adjust_time = hns3_timesync_adjust_time, 7445 .timesync_read_time = hns3_timesync_read_time, 7446 .timesync_write_time = hns3_timesync_write_time, 7447 }; 7448 7449 static const struct hns3_reset_ops hns3_reset_ops = { 7450 .reset_service = hns3_reset_service, 7451 .stop_service = hns3_stop_service, 7452 .prepare_reset = hns3_prepare_reset, 7453 .wait_hardware_ready = hns3_wait_hardware_ready, 7454 .reinit_dev = hns3_reinit_dev, 7455 .restore_conf = hns3_restore_conf, 7456 .start_service = hns3_start_service, 7457 }; 7458 7459 static int 7460 hns3_dev_init(struct rte_eth_dev *eth_dev) 7461 { 7462 struct hns3_adapter *hns = eth_dev->data->dev_private; 7463 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 7464 struct rte_ether_addr *eth_addr; 7465 struct hns3_hw *hw = &hns->hw; 7466 int ret; 7467 7468 PMD_INIT_FUNC_TRACE(); 7469 7470 hns3_flow_init(eth_dev); 7471 7472 hns3_set_rxtx_function(eth_dev); 7473 eth_dev->dev_ops = &hns3_eth_dev_ops; 7474 eth_dev->rx_queue_count = hns3_rx_queue_count; 7475 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7476 ret = hns3_mp_init_secondary(); 7477 if (ret) { 7478 PMD_INIT_LOG(ERR, "Failed to init for secondary " 7479 "process, ret = %d", ret); 7480 goto err_mp_init_secondary; 7481 } 7482 hw->secondary_cnt++; 7483 hns3_tx_push_init(eth_dev); 7484 return 0; 7485 } 7486 7487 ret = hns3_mp_init_primary(); 7488 if (ret) { 7489 PMD_INIT_LOG(ERR, 7490 "Failed to init for primary process, ret = %d", 7491 ret); 7492 goto err_mp_init_primary; 7493 } 7494 7495 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 7496 hns->is_vf = false; 7497 hw->data = eth_dev->data; 7498 hns3_parse_devargs(eth_dev); 7499 7500 /* 7501 * Set default max packet size according to the mtu 7502 * default vale in DPDK frame. 7503 */ 7504 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 7505 7506 ret = hns3_reset_init(hw); 7507 if (ret) 7508 goto err_init_reset; 7509 hw->reset.ops = &hns3_reset_ops; 7510 7511 ret = hns3_init_pf(eth_dev); 7512 if (ret) { 7513 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 7514 goto err_init_pf; 7515 } 7516 7517 /* Allocate memory for storing MAC addresses */ 7518 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 7519 sizeof(struct rte_ether_addr) * 7520 HNS3_UC_MACADDR_NUM, 0); 7521 if (eth_dev->data->mac_addrs == NULL) { 7522 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 7523 "to store MAC addresses", 7524 sizeof(struct rte_ether_addr) * 7525 HNS3_UC_MACADDR_NUM); 7526 ret = -ENOMEM; 7527 goto err_rte_zmalloc; 7528 } 7529 7530 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 7531 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 7532 rte_eth_random_addr(hw->mac.mac_addr); 7533 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 7534 (struct rte_ether_addr *)hw->mac.mac_addr); 7535 hns3_warn(hw, "default mac_addr from firmware is an invalid " 7536 "unicast address, using random MAC address %s", 7537 mac_str); 7538 } 7539 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 7540 ð_dev->data->mac_addrs[0]); 7541 7542 hw->adapter_state = HNS3_NIC_INITIALIZED; 7543 7544 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 7545 SCHEDULE_PENDING) { 7546 hns3_err(hw, "Reschedule reset service after dev_init"); 7547 hns3_schedule_reset(hns); 7548 } else { 7549 /* IMP will wait ready flag before reset */ 7550 hns3_notify_reset_ready(hw, false); 7551 } 7552 7553 hns3_info(hw, "hns3 dev initialization successful!"); 7554 return 0; 7555 7556 err_rte_zmalloc: 7557 hns3_uninit_pf(eth_dev); 7558 7559 err_init_pf: 7560 rte_free(hw->reset.wait_data); 7561 7562 err_init_reset: 7563 hns3_mp_uninit_primary(); 7564 7565 err_mp_init_primary: 7566 err_mp_init_secondary: 7567 eth_dev->dev_ops = NULL; 7568 eth_dev->rx_pkt_burst = NULL; 7569 eth_dev->rx_descriptor_status = NULL; 7570 eth_dev->tx_pkt_burst = NULL; 7571 eth_dev->tx_pkt_prepare = NULL; 7572 eth_dev->tx_descriptor_status = NULL; 7573 return ret; 7574 } 7575 7576 static int 7577 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 7578 { 7579 struct hns3_adapter *hns = eth_dev->data->dev_private; 7580 struct hns3_hw *hw = &hns->hw; 7581 7582 PMD_INIT_FUNC_TRACE(); 7583 7584 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 7585 return 0; 7586 7587 if (hw->adapter_state < HNS3_NIC_CLOSING) 7588 hns3_dev_close(eth_dev); 7589 7590 hw->adapter_state = HNS3_NIC_REMOVED; 7591 return 0; 7592 } 7593 7594 static int 7595 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7596 struct rte_pci_device *pci_dev) 7597 { 7598 return rte_eth_dev_pci_generic_probe(pci_dev, 7599 sizeof(struct hns3_adapter), 7600 hns3_dev_init); 7601 } 7602 7603 static int 7604 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 7605 { 7606 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 7607 } 7608 7609 static const struct rte_pci_id pci_id_hns3_map[] = { 7610 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 7611 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 7612 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 7613 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 7614 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 7615 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 7616 { .vendor_id = 0, }, /* sentinel */ 7617 }; 7618 7619 static struct rte_pci_driver rte_hns3_pmd = { 7620 .id_table = pci_id_hns3_map, 7621 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 7622 .probe = eth_hns3_pci_probe, 7623 .remove = eth_hns3_pci_remove, 7624 }; 7625 7626 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 7627 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 7628 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 7629 RTE_PMD_REGISTER_PARAM_STRING(net_hns3, 7630 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " 7631 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " 7632 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " 7633 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> "); 7634 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); 7635 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); 7636