1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Hisilicon Limited. 3 */ 4 5 #include <errno.h> 6 #include <stdarg.h> 7 #include <stdbool.h> 8 #include <stdio.h> 9 #include <stdint.h> 10 #include <inttypes.h> 11 #include <unistd.h> 12 #include <rte_atomic.h> 13 #include <rte_bus_pci.h> 14 #include <rte_common.h> 15 #include <rte_cycles.h> 16 #include <rte_dev.h> 17 #include <rte_eal.h> 18 #include <rte_ether.h> 19 #include <rte_ethdev_driver.h> 20 #include <rte_ethdev_pci.h> 21 #include <rte_interrupts.h> 22 #include <rte_io.h> 23 #include <rte_log.h> 24 #include <rte_pci.h> 25 26 #include "hns3_ethdev.h" 27 #include "hns3_logs.h" 28 #include "hns3_rxtx.h" 29 #include "hns3_intr.h" 30 #include "hns3_regs.h" 31 #include "hns3_dcb.h" 32 #include "hns3_mp.h" 33 34 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 35 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 36 37 #define HNS3_SERVICE_INTERVAL 1000000 /* us */ 38 #define HNS3_INVALID_PVID 0xFFFF 39 40 #define HNS3_FILTER_TYPE_VF 0 41 #define HNS3_FILTER_TYPE_PORT 1 42 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) 43 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) 44 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) 45 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) 46 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) 47 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ 48 | HNS3_FILTER_FE_ROCE_EGRESS_B) 49 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ 50 | HNS3_FILTER_FE_ROCE_INGRESS_B) 51 52 /* Reset related Registers */ 53 #define HNS3_GLOBAL_RESET_BIT 0 54 #define HNS3_CORE_RESET_BIT 1 55 #define HNS3_IMP_RESET_BIT 2 56 #define HNS3_FUN_RST_ING_B 0 57 58 #define HNS3_VECTOR0_IMP_RESET_INT_B 1 59 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U 60 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U 61 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U 62 63 #define HNS3_RESET_WAIT_MS 100 64 #define HNS3_RESET_WAIT_CNT 200 65 66 /* FEC mode order defined in HNS3 hardware */ 67 #define HNS3_HW_FEC_MODE_NOFEC 0 68 #define HNS3_HW_FEC_MODE_BASER 1 69 #define HNS3_HW_FEC_MODE_RS 2 70 71 enum hns3_evt_cause { 72 HNS3_VECTOR0_EVENT_RST, 73 HNS3_VECTOR0_EVENT_MBX, 74 HNS3_VECTOR0_EVENT_ERR, 75 HNS3_VECTOR0_EVENT_OTHER, 76 }; 77 78 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { 79 { ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 80 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 81 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 82 83 { ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 84 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 85 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 86 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 87 88 { ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 89 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 90 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, 91 92 { ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 93 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 94 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | 95 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 96 97 { ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 98 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 99 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, 100 101 { ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | 102 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | 103 RTE_ETH_FEC_MODE_CAPA_MASK(RS) } 104 }; 105 106 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, 107 uint64_t *levels); 108 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 109 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, 110 int on); 111 static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); 112 113 static int hns3_add_mc_addr(struct hns3_hw *hw, 114 struct rte_ether_addr *mac_addr); 115 static int hns3_remove_mc_addr(struct hns3_hw *hw, 116 struct rte_ether_addr *mac_addr); 117 static int hns3_restore_fec(struct hns3_hw *hw); 118 static int hns3_query_dev_fec_info(struct rte_eth_dev *dev); 119 120 static void 121 hns3_pf_disable_irq0(struct hns3_hw *hw) 122 { 123 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); 124 } 125 126 static void 127 hns3_pf_enable_irq0(struct hns3_hw *hw) 128 { 129 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); 130 } 131 132 static enum hns3_evt_cause 133 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) 134 { 135 struct hns3_hw *hw = &hns->hw; 136 uint32_t vector0_int_stats; 137 uint32_t cmdq_src_val; 138 uint32_t hw_err_src_reg; 139 uint32_t val; 140 enum hns3_evt_cause ret; 141 142 /* fetch the events from their corresponding regs */ 143 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 144 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); 145 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 146 147 /* 148 * Assumption: If by any chance reset and mailbox events are reported 149 * together then we will only process reset event and defer the 150 * processing of the mailbox events. Since, we would have not cleared 151 * RX CMDQ event this time we would receive again another interrupt 152 * from H/W just for the mailbox. 153 */ 154 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ 155 rte_atomic16_set(&hw->reset.disable_cmd, 1); 156 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 157 val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); 158 if (clearval) { 159 hw->reset.stats.imp_cnt++; 160 hns3_warn(hw, "IMP reset detected, clear reset status"); 161 } else { 162 hns3_schedule_delayed_reset(hns); 163 hns3_warn(hw, "IMP reset detected, don't clear reset status"); 164 } 165 166 ret = HNS3_VECTOR0_EVENT_RST; 167 goto out; 168 } 169 170 /* Global reset */ 171 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { 172 rte_atomic16_set(&hw->reset.disable_cmd, 1); 173 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); 174 val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); 175 if (clearval) { 176 hw->reset.stats.global_cnt++; 177 hns3_warn(hw, "Global reset detected, clear reset status"); 178 } else { 179 hns3_schedule_delayed_reset(hns); 180 hns3_warn(hw, "Global reset detected, don't clear reset status"); 181 } 182 183 ret = HNS3_VECTOR0_EVENT_RST; 184 goto out; 185 } 186 187 /* check for vector0 msix event source */ 188 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || 189 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { 190 val = vector0_int_stats | hw_err_src_reg; 191 ret = HNS3_VECTOR0_EVENT_ERR; 192 goto out; 193 } 194 195 /* check for vector0 mailbox(=CMDQ RX) event source */ 196 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { 197 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); 198 val = cmdq_src_val; 199 ret = HNS3_VECTOR0_EVENT_MBX; 200 goto out; 201 } 202 203 if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg)) 204 hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x", 205 vector0_int_stats, cmdq_src_val, hw_err_src_reg); 206 val = vector0_int_stats; 207 ret = HNS3_VECTOR0_EVENT_OTHER; 208 out: 209 210 if (clearval) 211 *clearval = val; 212 return ret; 213 } 214 215 static void 216 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) 217 { 218 if (event_type == HNS3_VECTOR0_EVENT_RST) 219 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); 220 else if (event_type == HNS3_VECTOR0_EVENT_MBX) 221 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); 222 } 223 224 static void 225 hns3_clear_all_event_cause(struct hns3_hw *hw) 226 { 227 uint32_t vector0_int_stats; 228 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); 229 230 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) 231 hns3_warn(hw, "Probe during IMP reset interrupt"); 232 233 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) 234 hns3_warn(hw, "Probe during Global reset interrupt"); 235 236 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, 237 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | 238 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | 239 BIT(HNS3_VECTOR0_CORERESET_INT_B)); 240 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); 241 } 242 243 static void 244 hns3_interrupt_handler(void *param) 245 { 246 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 247 struct hns3_adapter *hns = dev->data->dev_private; 248 struct hns3_hw *hw = &hns->hw; 249 enum hns3_evt_cause event_cause; 250 uint32_t clearval = 0; 251 252 /* Disable interrupt */ 253 hns3_pf_disable_irq0(hw); 254 255 event_cause = hns3_check_event_cause(hns, &clearval); 256 257 /* vector 0 interrupt is shared with reset and mailbox source events. */ 258 if (event_cause == HNS3_VECTOR0_EVENT_ERR) { 259 hns3_warn(hw, "Received err interrupt"); 260 hns3_handle_msix_error(hns, &hw->reset.request); 261 hns3_handle_ras_error(hns, &hw->reset.request); 262 hns3_schedule_reset(hns); 263 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { 264 hns3_warn(hw, "Received reset interrupt"); 265 hns3_schedule_reset(hns); 266 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) 267 hns3_dev_handle_mbx_msg(hw); 268 else 269 hns3_err(hw, "Received unknown event"); 270 271 hns3_clear_event_cause(hw, event_cause, clearval); 272 /* Enable interrupt if it is not cause by reset */ 273 hns3_pf_enable_irq0(hw); 274 } 275 276 static int 277 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) 278 { 279 #define HNS3_VLAN_ID_OFFSET_STEP 160 280 #define HNS3_VLAN_BYTE_SIZE 8 281 struct hns3_vlan_filter_pf_cfg_cmd *req; 282 struct hns3_hw *hw = &hns->hw; 283 uint8_t vlan_offset_byte_val; 284 struct hns3_cmd_desc desc; 285 uint8_t vlan_offset_byte; 286 uint8_t vlan_offset_base; 287 int ret; 288 289 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); 290 291 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; 292 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / 293 HNS3_VLAN_BYTE_SIZE; 294 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); 295 296 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; 297 req->vlan_offset = vlan_offset_base; 298 req->vlan_cfg = on ? 0 : 1; 299 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 300 301 ret = hns3_cmd_send(hw, &desc, 1); 302 if (ret) 303 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", 304 vlan_id, ret); 305 306 return ret; 307 } 308 309 static void 310 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) 311 { 312 struct hns3_user_vlan_table *vlan_entry; 313 struct hns3_pf *pf = &hns->pf; 314 315 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 316 if (vlan_entry->vlan_id == vlan_id) { 317 if (vlan_entry->hd_tbl_status) 318 hns3_set_port_vlan_filter(hns, vlan_id, 0); 319 LIST_REMOVE(vlan_entry, next); 320 rte_free(vlan_entry); 321 break; 322 } 323 } 324 } 325 326 static void 327 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, 328 bool writen_to_tbl) 329 { 330 struct hns3_user_vlan_table *vlan_entry; 331 struct hns3_hw *hw = &hns->hw; 332 struct hns3_pf *pf = &hns->pf; 333 334 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 335 if (vlan_entry->vlan_id == vlan_id) 336 return; 337 } 338 339 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); 340 if (vlan_entry == NULL) { 341 hns3_err(hw, "Failed to malloc hns3 vlan table"); 342 return; 343 } 344 345 vlan_entry->hd_tbl_status = writen_to_tbl; 346 vlan_entry->vlan_id = vlan_id; 347 348 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); 349 } 350 351 static int 352 hns3_restore_vlan_table(struct hns3_adapter *hns) 353 { 354 struct hns3_user_vlan_table *vlan_entry; 355 struct hns3_hw *hw = &hns->hw; 356 struct hns3_pf *pf = &hns->pf; 357 uint16_t vlan_id; 358 int ret = 0; 359 360 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) 361 return hns3_vlan_pvid_configure(hns, 362 hw->port_base_vlan_cfg.pvid, 1); 363 364 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 365 if (vlan_entry->hd_tbl_status) { 366 vlan_id = vlan_entry->vlan_id; 367 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); 368 if (ret) 369 break; 370 } 371 } 372 373 return ret; 374 } 375 376 static int 377 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) 378 { 379 struct hns3_hw *hw = &hns->hw; 380 bool writen_to_tbl = false; 381 int ret = 0; 382 383 /* 384 * When vlan filter is enabled, hardware regards packets without vlan 385 * as packets with vlan 0. So, to receive packets without vlan, vlan id 386 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter. 387 */ 388 if (on == 0 && vlan_id == 0) 389 return 0; 390 391 /* 392 * When port base vlan enabled, we use port base vlan as the vlan 393 * filter condition. In this case, we don't update vlan filter table 394 * when user add new vlan or remove exist vlan, just update the 395 * vlan list. The vlan id in vlan list will be writen in vlan filter 396 * table until port base vlan disabled 397 */ 398 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 399 ret = hns3_set_port_vlan_filter(hns, vlan_id, on); 400 writen_to_tbl = true; 401 } 402 403 if (ret == 0) { 404 if (on) 405 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); 406 else 407 hns3_rm_dev_vlan_table(hns, vlan_id); 408 } 409 return ret; 410 } 411 412 static int 413 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 414 { 415 struct hns3_adapter *hns = dev->data->dev_private; 416 struct hns3_hw *hw = &hns->hw; 417 int ret; 418 419 rte_spinlock_lock(&hw->lock); 420 ret = hns3_vlan_filter_configure(hns, vlan_id, on); 421 rte_spinlock_unlock(&hw->lock); 422 return ret; 423 } 424 425 static int 426 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, 427 uint16_t tpid) 428 { 429 struct hns3_rx_vlan_type_cfg_cmd *rx_req; 430 struct hns3_tx_vlan_type_cfg_cmd *tx_req; 431 struct hns3_hw *hw = &hns->hw; 432 struct hns3_cmd_desc desc; 433 int ret; 434 435 if ((vlan_type != ETH_VLAN_TYPE_INNER && 436 vlan_type != ETH_VLAN_TYPE_OUTER)) { 437 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); 438 return -EINVAL; 439 } 440 441 if (tpid != RTE_ETHER_TYPE_VLAN) { 442 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); 443 return -EINVAL; 444 } 445 446 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); 447 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; 448 449 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 450 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 451 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 452 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 453 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); 454 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); 455 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); 456 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); 457 } 458 459 ret = hns3_cmd_send(hw, &desc, 1); 460 if (ret) { 461 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", 462 ret); 463 return ret; 464 } 465 466 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); 467 468 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; 469 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); 470 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); 471 472 ret = hns3_cmd_send(hw, &desc, 1); 473 if (ret) 474 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", 475 ret); 476 return ret; 477 } 478 479 static int 480 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 481 uint16_t tpid) 482 { 483 struct hns3_adapter *hns = dev->data->dev_private; 484 struct hns3_hw *hw = &hns->hw; 485 int ret; 486 487 rte_spinlock_lock(&hw->lock); 488 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); 489 rte_spinlock_unlock(&hw->lock); 490 return ret; 491 } 492 493 static int 494 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, 495 struct hns3_rx_vtag_cfg *vcfg) 496 { 497 struct hns3_vport_vtag_rx_cfg_cmd *req; 498 struct hns3_hw *hw = &hns->hw; 499 struct hns3_cmd_desc desc; 500 uint16_t vport_id; 501 uint8_t bitmap; 502 int ret; 503 504 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); 505 506 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; 507 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, 508 vcfg->strip_tag1_en ? 1 : 0); 509 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, 510 vcfg->strip_tag2_en ? 1 : 0); 511 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, 512 vcfg->vlan1_vlan_prionly ? 1 : 0); 513 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, 514 vcfg->vlan2_vlan_prionly ? 1 : 0); 515 516 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 517 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, 518 vcfg->strip_tag1_discard_en ? 1 : 0); 519 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, 520 vcfg->strip_tag2_discard_en ? 1 : 0); 521 /* 522 * In current version VF is not supported when PF is driven by DPDK 523 * driver, just need to configure parameters for PF vport. 524 */ 525 vport_id = HNS3_PF_FUNC_ID; 526 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 527 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 528 req->vf_bitmap[req->vf_offset] = bitmap; 529 530 ret = hns3_cmd_send(hw, &desc, 1); 531 if (ret) 532 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); 533 return ret; 534 } 535 536 static void 537 hns3_update_rx_offload_cfg(struct hns3_adapter *hns, 538 struct hns3_rx_vtag_cfg *vcfg) 539 { 540 struct hns3_pf *pf = &hns->pf; 541 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); 542 } 543 544 static void 545 hns3_update_tx_offload_cfg(struct hns3_adapter *hns, 546 struct hns3_tx_vtag_cfg *vcfg) 547 { 548 struct hns3_pf *pf = &hns->pf; 549 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); 550 } 551 552 static int 553 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) 554 { 555 struct hns3_rx_vtag_cfg rxvlan_cfg; 556 struct hns3_hw *hw = &hns->hw; 557 int ret; 558 559 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { 560 rxvlan_cfg.strip_tag1_en = false; 561 rxvlan_cfg.strip_tag2_en = enable; 562 rxvlan_cfg.strip_tag2_discard_en = false; 563 } else { 564 rxvlan_cfg.strip_tag1_en = enable; 565 rxvlan_cfg.strip_tag2_en = true; 566 rxvlan_cfg.strip_tag2_discard_en = true; 567 } 568 569 rxvlan_cfg.strip_tag1_discard_en = false; 570 rxvlan_cfg.vlan1_vlan_prionly = false; 571 rxvlan_cfg.vlan2_vlan_prionly = false; 572 rxvlan_cfg.rx_vlan_offload_en = enable; 573 574 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); 575 if (ret) { 576 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret); 577 return ret; 578 } 579 580 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); 581 582 return ret; 583 } 584 585 static int 586 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, 587 uint8_t fe_type, bool filter_en, uint8_t vf_id) 588 { 589 struct hns3_vlan_filter_ctrl_cmd *req; 590 struct hns3_cmd_desc desc; 591 int ret; 592 593 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); 594 595 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; 596 req->vlan_type = vlan_type; 597 req->vlan_fe = filter_en ? fe_type : 0; 598 req->vf_id = vf_id; 599 600 ret = hns3_cmd_send(hw, &desc, 1); 601 if (ret) 602 hns3_err(hw, "set vlan filter fail, ret =%d", ret); 603 604 return ret; 605 } 606 607 static int 608 hns3_vlan_filter_init(struct hns3_adapter *hns) 609 { 610 struct hns3_hw *hw = &hns->hw; 611 int ret; 612 613 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, 614 HNS3_FILTER_FE_EGRESS, false, 615 HNS3_PF_FUNC_ID); 616 if (ret) { 617 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); 618 return ret; 619 } 620 621 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 622 HNS3_FILTER_FE_INGRESS, false, 623 HNS3_PF_FUNC_ID); 624 if (ret) 625 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); 626 627 return ret; 628 } 629 630 static int 631 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) 632 { 633 struct hns3_hw *hw = &hns->hw; 634 int ret; 635 636 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, 637 HNS3_FILTER_FE_INGRESS, enable, 638 HNS3_PF_FUNC_ID); 639 if (ret) 640 hns3_err(hw, "failed to %s port vlan filter, ret = %d", 641 enable ? "enable" : "disable", ret); 642 643 return ret; 644 } 645 646 static int 647 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) 648 { 649 struct hns3_adapter *hns = dev->data->dev_private; 650 struct hns3_hw *hw = &hns->hw; 651 struct rte_eth_rxmode *rxmode; 652 unsigned int tmp_mask; 653 bool enable; 654 int ret = 0; 655 656 rte_spinlock_lock(&hw->lock); 657 rxmode = &dev->data->dev_conf.rxmode; 658 tmp_mask = (unsigned int)mask; 659 if (tmp_mask & ETH_VLAN_FILTER_MASK) { 660 /* ignore vlan filter configuration during promiscuous mode */ 661 if (!dev->data->promiscuous) { 662 /* Enable or disable VLAN filter */ 663 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? 664 true : false; 665 666 ret = hns3_enable_vlan_filter(hns, enable); 667 if (ret) { 668 rte_spinlock_unlock(&hw->lock); 669 hns3_err(hw, "failed to %s rx filter, ret = %d", 670 enable ? "enable" : "disable", ret); 671 return ret; 672 } 673 } 674 } 675 676 if (tmp_mask & ETH_VLAN_STRIP_MASK) { 677 /* Enable or disable VLAN stripping */ 678 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? 679 true : false; 680 681 ret = hns3_en_hw_strip_rxvtag(hns, enable); 682 if (ret) { 683 rte_spinlock_unlock(&hw->lock); 684 hns3_err(hw, "failed to %s rx strip, ret = %d", 685 enable ? "enable" : "disable", ret); 686 return ret; 687 } 688 } 689 690 rte_spinlock_unlock(&hw->lock); 691 692 return ret; 693 } 694 695 static int 696 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, 697 struct hns3_tx_vtag_cfg *vcfg) 698 { 699 struct hns3_vport_vtag_tx_cfg_cmd *req; 700 struct hns3_cmd_desc desc; 701 struct hns3_hw *hw = &hns->hw; 702 uint16_t vport_id; 703 uint8_t bitmap; 704 int ret; 705 706 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); 707 708 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; 709 req->def_vlan_tag1 = vcfg->default_tag1; 710 req->def_vlan_tag2 = vcfg->default_tag2; 711 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, 712 vcfg->accept_tag1 ? 1 : 0); 713 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, 714 vcfg->accept_untag1 ? 1 : 0); 715 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, 716 vcfg->accept_tag2 ? 1 : 0); 717 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, 718 vcfg->accept_untag2 ? 1 : 0); 719 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, 720 vcfg->insert_tag1_en ? 1 : 0); 721 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, 722 vcfg->insert_tag2_en ? 1 : 0); 723 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); 724 725 /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */ 726 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, 727 vcfg->tag_shift_mode_en ? 1 : 0); 728 729 /* 730 * In current version VF is not supported when PF is driven by DPDK 731 * driver, just need to configure parameters for PF vport. 732 */ 733 vport_id = HNS3_PF_FUNC_ID; 734 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; 735 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); 736 req->vf_bitmap[req->vf_offset] = bitmap; 737 738 ret = hns3_cmd_send(hw, &desc, 1); 739 if (ret) 740 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); 741 742 return ret; 743 } 744 745 static int 746 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, 747 uint16_t pvid) 748 { 749 struct hns3_hw *hw = &hns->hw; 750 struct hns3_tx_vtag_cfg txvlan_cfg; 751 int ret; 752 753 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { 754 txvlan_cfg.accept_tag1 = true; 755 txvlan_cfg.insert_tag1_en = false; 756 txvlan_cfg.default_tag1 = 0; 757 } else { 758 txvlan_cfg.accept_tag1 = 759 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE; 760 txvlan_cfg.insert_tag1_en = true; 761 txvlan_cfg.default_tag1 = pvid; 762 } 763 764 txvlan_cfg.accept_untag1 = true; 765 txvlan_cfg.accept_tag2 = true; 766 txvlan_cfg.accept_untag2 = true; 767 txvlan_cfg.insert_tag2_en = false; 768 txvlan_cfg.default_tag2 = 0; 769 txvlan_cfg.tag_shift_mode_en = true; 770 771 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); 772 if (ret) { 773 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, 774 ret); 775 return ret; 776 } 777 778 hns3_update_tx_offload_cfg(hns, &txvlan_cfg); 779 return ret; 780 } 781 782 783 static void 784 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) 785 { 786 struct hns3_user_vlan_table *vlan_entry; 787 struct hns3_pf *pf = &hns->pf; 788 789 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 790 if (vlan_entry->hd_tbl_status) { 791 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); 792 vlan_entry->hd_tbl_status = false; 793 } 794 } 795 796 if (is_del_list) { 797 vlan_entry = LIST_FIRST(&pf->vlan_list); 798 while (vlan_entry) { 799 LIST_REMOVE(vlan_entry, next); 800 rte_free(vlan_entry); 801 vlan_entry = LIST_FIRST(&pf->vlan_list); 802 } 803 } 804 } 805 806 static void 807 hns3_add_all_vlan_table(struct hns3_adapter *hns) 808 { 809 struct hns3_user_vlan_table *vlan_entry; 810 struct hns3_pf *pf = &hns->pf; 811 812 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { 813 if (!vlan_entry->hd_tbl_status) { 814 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); 815 vlan_entry->hd_tbl_status = true; 816 } 817 } 818 } 819 820 static void 821 hns3_remove_all_vlan_table(struct hns3_adapter *hns) 822 { 823 struct hns3_hw *hw = &hns->hw; 824 int ret; 825 826 hns3_rm_all_vlan_table(hns, true); 827 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) { 828 ret = hns3_set_port_vlan_filter(hns, 829 hw->port_base_vlan_cfg.pvid, 0); 830 if (ret) { 831 hns3_err(hw, "Failed to remove all vlan table, ret =%d", 832 ret); 833 return; 834 } 835 } 836 } 837 838 static int 839 hns3_update_vlan_filter_entries(struct hns3_adapter *hns, 840 uint16_t port_base_vlan_state, uint16_t new_pvid) 841 { 842 struct hns3_hw *hw = &hns->hw; 843 uint16_t old_pvid; 844 int ret; 845 846 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { 847 old_pvid = hw->port_base_vlan_cfg.pvid; 848 if (old_pvid != HNS3_INVALID_PVID) { 849 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); 850 if (ret) { 851 hns3_err(hw, "failed to remove old pvid %u, " 852 "ret = %d", old_pvid, ret); 853 return ret; 854 } 855 } 856 857 hns3_rm_all_vlan_table(hns, false); 858 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1); 859 if (ret) { 860 hns3_err(hw, "failed to add new pvid %u, ret = %d", 861 new_pvid, ret); 862 return ret; 863 } 864 } else { 865 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); 866 if (ret) { 867 hns3_err(hw, "failed to remove pvid %u, ret = %d", 868 new_pvid, ret); 869 return ret; 870 } 871 872 hns3_add_all_vlan_table(hns); 873 } 874 return 0; 875 } 876 877 static int 878 hns3_en_pvid_strip(struct hns3_adapter *hns, int on) 879 { 880 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg; 881 struct hns3_rx_vtag_cfg rx_vlan_cfg; 882 bool rx_strip_en; 883 int ret; 884 885 rx_strip_en = old_cfg->rx_vlan_offload_en; 886 if (on) { 887 rx_vlan_cfg.strip_tag1_en = rx_strip_en; 888 rx_vlan_cfg.strip_tag2_en = true; 889 rx_vlan_cfg.strip_tag2_discard_en = true; 890 } else { 891 rx_vlan_cfg.strip_tag1_en = false; 892 rx_vlan_cfg.strip_tag2_en = rx_strip_en; 893 rx_vlan_cfg.strip_tag2_discard_en = false; 894 } 895 rx_vlan_cfg.strip_tag1_discard_en = false; 896 rx_vlan_cfg.vlan1_vlan_prionly = false; 897 rx_vlan_cfg.vlan2_vlan_prionly = false; 898 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en; 899 900 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); 901 if (ret) 902 return ret; 903 904 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); 905 return ret; 906 } 907 908 static int 909 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) 910 { 911 struct hns3_hw *hw = &hns->hw; 912 uint16_t port_base_vlan_state; 913 int ret; 914 915 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) { 916 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) 917 hns3_warn(hw, "Invalid operation! As current pvid set " 918 "is %u, disable pvid %u is invalid", 919 hw->port_base_vlan_cfg.pvid, pvid); 920 return 0; 921 } 922 923 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : 924 HNS3_PORT_BASE_VLAN_DISABLE; 925 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); 926 if (ret) { 927 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d", 928 ret); 929 return ret; 930 } 931 932 ret = hns3_en_pvid_strip(hns, on); 933 if (ret) { 934 hns3_err(hw, "failed to config rx vlan strip for pvid, " 935 "ret = %d", ret); 936 return ret; 937 } 938 939 if (pvid == HNS3_INVALID_PVID) 940 goto out; 941 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid); 942 if (ret) { 943 hns3_err(hw, "failed to update vlan filter entries, ret = %d", 944 ret); 945 return ret; 946 } 947 948 out: 949 hw->port_base_vlan_cfg.state = port_base_vlan_state; 950 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID; 951 return ret; 952 } 953 954 static int 955 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) 956 { 957 struct hns3_adapter *hns = dev->data->dev_private; 958 struct hns3_hw *hw = &hns->hw; 959 bool pvid_en_state_change; 960 uint16_t pvid_state; 961 int ret; 962 963 if (pvid > RTE_ETHER_MAX_VLAN_ID) { 964 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, 965 RTE_ETHER_MAX_VLAN_ID); 966 return -EINVAL; 967 } 968 969 /* 970 * If PVID configuration state change, should refresh the PVID 971 * configuration state in struct hns3_tx_queue/hns3_rx_queue. 972 */ 973 pvid_state = hw->port_base_vlan_cfg.state; 974 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) || 975 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE)) 976 pvid_en_state_change = false; 977 else 978 pvid_en_state_change = true; 979 980 rte_spinlock_lock(&hw->lock); 981 ret = hns3_vlan_pvid_configure(hns, pvid, on); 982 rte_spinlock_unlock(&hw->lock); 983 if (ret) 984 return ret; 985 /* 986 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx 987 * need be processed by PMD driver. 988 */ 989 if (pvid_en_state_change && 990 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 991 hns3_update_all_queues_pvid_proc_en(hw); 992 993 return 0; 994 } 995 996 static int 997 hns3_default_vlan_config(struct hns3_adapter *hns) 998 { 999 struct hns3_hw *hw = &hns->hw; 1000 int ret; 1001 1002 /* 1003 * When vlan filter is enabled, hardware regards packets without vlan 1004 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan 1005 * table, packets without vlan won't be received. So, add vlan 0 as 1006 * the default vlan. 1007 */ 1008 ret = hns3_vlan_filter_configure(hns, 0, 1); 1009 if (ret) 1010 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); 1011 return ret; 1012 } 1013 1014 static int 1015 hns3_init_vlan_config(struct hns3_adapter *hns) 1016 { 1017 struct hns3_hw *hw = &hns->hw; 1018 int ret; 1019 1020 /* 1021 * This function can be called in the initialization and reset process, 1022 * when in reset process, it means that hardware had been reseted 1023 * successfully and we need to restore the hardware configuration to 1024 * ensure that the hardware configuration remains unchanged before and 1025 * after reset. 1026 */ 1027 if (rte_atomic16_read(&hw->reset.resetting) == 0) { 1028 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; 1029 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; 1030 } 1031 1032 ret = hns3_vlan_filter_init(hns); 1033 if (ret) { 1034 hns3_err(hw, "vlan init fail in pf, ret =%d", ret); 1035 return ret; 1036 } 1037 1038 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, 1039 RTE_ETHER_TYPE_VLAN); 1040 if (ret) { 1041 hns3_err(hw, "tpid set fail in pf, ret =%d", ret); 1042 return ret; 1043 } 1044 1045 /* 1046 * When in the reinit dev stage of the reset process, the following 1047 * vlan-related configurations may differ from those at initialization, 1048 * we will restore configurations to hardware in hns3_restore_vlan_table 1049 * and hns3_restore_vlan_conf later. 1050 */ 1051 if (rte_atomic16_read(&hw->reset.resetting) == 0) { 1052 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); 1053 if (ret) { 1054 hns3_err(hw, "pvid set fail in pf, ret =%d", ret); 1055 return ret; 1056 } 1057 1058 ret = hns3_en_hw_strip_rxvtag(hns, false); 1059 if (ret) { 1060 hns3_err(hw, "rx strip configure fail in pf, ret =%d", 1061 ret); 1062 return ret; 1063 } 1064 } 1065 1066 return hns3_default_vlan_config(hns); 1067 } 1068 1069 static int 1070 hns3_restore_vlan_conf(struct hns3_adapter *hns) 1071 { 1072 struct hns3_pf *pf = &hns->pf; 1073 struct hns3_hw *hw = &hns->hw; 1074 uint64_t offloads; 1075 bool enable; 1076 int ret; 1077 1078 if (!hw->data->promiscuous) { 1079 /* restore vlan filter states */ 1080 offloads = hw->data->dev_conf.rxmode.offloads; 1081 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; 1082 ret = hns3_enable_vlan_filter(hns, enable); 1083 if (ret) { 1084 hns3_err(hw, "failed to restore vlan rx filter conf, " 1085 "ret = %d", ret); 1086 return ret; 1087 } 1088 } 1089 1090 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); 1091 if (ret) { 1092 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); 1093 return ret; 1094 } 1095 1096 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); 1097 if (ret) 1098 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); 1099 1100 return ret; 1101 } 1102 1103 static int 1104 hns3_dev_configure_vlan(struct rte_eth_dev *dev) 1105 { 1106 struct hns3_adapter *hns = dev->data->dev_private; 1107 struct rte_eth_dev_data *data = dev->data; 1108 struct rte_eth_txmode *txmode; 1109 struct hns3_hw *hw = &hns->hw; 1110 int mask; 1111 int ret; 1112 1113 txmode = &data->dev_conf.txmode; 1114 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) 1115 hns3_warn(hw, 1116 "hw_vlan_reject_tagged or hw_vlan_reject_untagged " 1117 "configuration is not supported! Ignore these two " 1118 "parameters: hw_vlan_reject_tagged(%d), " 1119 "hw_vlan_reject_untagged(%d)", 1120 txmode->hw_vlan_reject_tagged, 1121 txmode->hw_vlan_reject_untagged); 1122 1123 /* Apply vlan offload setting */ 1124 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; 1125 ret = hns3_vlan_offload_set(dev, mask); 1126 if (ret) { 1127 hns3_err(hw, "dev config rx vlan offload failed, ret = %d", 1128 ret); 1129 return ret; 1130 } 1131 1132 /* 1133 * If pvid config is not set in rte_eth_conf, driver needn't to set 1134 * VLAN pvid related configuration to hardware. 1135 */ 1136 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) 1137 return 0; 1138 1139 /* Apply pvid setting */ 1140 ret = hns3_vlan_pvid_set(dev, txmode->pvid, 1141 txmode->hw_vlan_insert_pvid); 1142 if (ret) 1143 hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d", 1144 txmode->pvid, ret); 1145 1146 return ret; 1147 } 1148 1149 static int 1150 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, 1151 unsigned int tso_mss_max) 1152 { 1153 struct hns3_cfg_tso_status_cmd *req; 1154 struct hns3_cmd_desc desc; 1155 uint16_t tso_mss; 1156 1157 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); 1158 1159 req = (struct hns3_cfg_tso_status_cmd *)desc.data; 1160 1161 tso_mss = 0; 1162 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1163 tso_mss_min); 1164 req->tso_mss_min = rte_cpu_to_le_16(tso_mss); 1165 1166 tso_mss = 0; 1167 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, 1168 tso_mss_max); 1169 req->tso_mss_max = rte_cpu_to_le_16(tso_mss); 1170 1171 return hns3_cmd_send(hw, &desc, 1); 1172 } 1173 1174 static int 1175 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, 1176 uint16_t *allocated_size, bool is_alloc) 1177 { 1178 struct hns3_umv_spc_alc_cmd *req; 1179 struct hns3_cmd_desc desc; 1180 int ret; 1181 1182 req = (struct hns3_umv_spc_alc_cmd *)desc.data; 1183 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); 1184 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); 1185 req->space_size = rte_cpu_to_le_32(space_size); 1186 1187 ret = hns3_cmd_send(hw, &desc, 1); 1188 if (ret) { 1189 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", 1190 is_alloc ? "allocate" : "free", ret); 1191 return ret; 1192 } 1193 1194 if (is_alloc && allocated_size) 1195 *allocated_size = rte_le_to_cpu_32(desc.data[1]); 1196 1197 return 0; 1198 } 1199 1200 static int 1201 hns3_init_umv_space(struct hns3_hw *hw) 1202 { 1203 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1204 struct hns3_pf *pf = &hns->pf; 1205 uint16_t allocated_size = 0; 1206 int ret; 1207 1208 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, 1209 true); 1210 if (ret) 1211 return ret; 1212 1213 if (allocated_size < pf->wanted_umv_size) 1214 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", 1215 pf->wanted_umv_size, allocated_size); 1216 1217 pf->max_umv_size = (!!allocated_size) ? allocated_size : 1218 pf->wanted_umv_size; 1219 pf->used_umv_size = 0; 1220 return 0; 1221 } 1222 1223 static int 1224 hns3_uninit_umv_space(struct hns3_hw *hw) 1225 { 1226 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1227 struct hns3_pf *pf = &hns->pf; 1228 int ret; 1229 1230 if (pf->max_umv_size == 0) 1231 return 0; 1232 1233 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); 1234 if (ret) 1235 return ret; 1236 1237 pf->max_umv_size = 0; 1238 1239 return 0; 1240 } 1241 1242 static bool 1243 hns3_is_umv_space_full(struct hns3_hw *hw) 1244 { 1245 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1246 struct hns3_pf *pf = &hns->pf; 1247 bool is_full; 1248 1249 is_full = (pf->used_umv_size >= pf->max_umv_size); 1250 1251 return is_full; 1252 } 1253 1254 static void 1255 hns3_update_umv_space(struct hns3_hw *hw, bool is_free) 1256 { 1257 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1258 struct hns3_pf *pf = &hns->pf; 1259 1260 if (is_free) { 1261 if (pf->used_umv_size > 0) 1262 pf->used_umv_size--; 1263 } else 1264 pf->used_umv_size++; 1265 } 1266 1267 static void 1268 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, 1269 const uint8_t *addr, bool is_mc) 1270 { 1271 const unsigned char *mac_addr = addr; 1272 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | 1273 ((uint32_t)mac_addr[2] << 16) | 1274 ((uint32_t)mac_addr[1] << 8) | 1275 (uint32_t)mac_addr[0]; 1276 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; 1277 1278 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1279 if (is_mc) { 1280 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1281 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); 1282 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); 1283 } 1284 1285 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); 1286 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); 1287 } 1288 1289 static int 1290 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, 1291 uint8_t resp_code, 1292 enum hns3_mac_vlan_tbl_opcode op) 1293 { 1294 if (cmdq_resp) { 1295 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", 1296 cmdq_resp); 1297 return -EIO; 1298 } 1299 1300 if (op == HNS3_MAC_VLAN_ADD) { 1301 if (resp_code == 0 || resp_code == 1) { 1302 return 0; 1303 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { 1304 hns3_err(hw, "add mac addr failed for uc_overflow"); 1305 return -ENOSPC; 1306 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { 1307 hns3_err(hw, "add mac addr failed for mc_overflow"); 1308 return -ENOSPC; 1309 } 1310 1311 hns3_err(hw, "add mac addr failed for undefined, code=%u", 1312 resp_code); 1313 return -EIO; 1314 } else if (op == HNS3_MAC_VLAN_REMOVE) { 1315 if (resp_code == 0) { 1316 return 0; 1317 } else if (resp_code == 1) { 1318 hns3_dbg(hw, "remove mac addr failed for miss"); 1319 return -ENOENT; 1320 } 1321 1322 hns3_err(hw, "remove mac addr failed for undefined, code=%u", 1323 resp_code); 1324 return -EIO; 1325 } else if (op == HNS3_MAC_VLAN_LKUP) { 1326 if (resp_code == 0) { 1327 return 0; 1328 } else if (resp_code == 1) { 1329 hns3_dbg(hw, "lookup mac addr failed for miss"); 1330 return -ENOENT; 1331 } 1332 1333 hns3_err(hw, "lookup mac addr failed for undefined, code=%u", 1334 resp_code); 1335 return -EIO; 1336 } 1337 1338 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", 1339 op); 1340 1341 return -EINVAL; 1342 } 1343 1344 static int 1345 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, 1346 struct hns3_mac_vlan_tbl_entry_cmd *req, 1347 struct hns3_cmd_desc *desc, bool is_mc) 1348 { 1349 uint8_t resp_code; 1350 uint16_t retval; 1351 int ret; 1352 1353 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); 1354 if (is_mc) { 1355 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1356 memcpy(desc[0].data, req, 1357 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1358 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, 1359 true); 1360 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1361 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, 1362 true); 1363 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1364 } else { 1365 memcpy(desc[0].data, req, 1366 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1367 ret = hns3_cmd_send(hw, desc, 1); 1368 } 1369 if (ret) { 1370 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", 1371 ret); 1372 return ret; 1373 } 1374 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; 1375 retval = rte_le_to_cpu_16(desc[0].retval); 1376 1377 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1378 HNS3_MAC_VLAN_LKUP); 1379 } 1380 1381 static int 1382 hns3_add_mac_vlan_tbl(struct hns3_hw *hw, 1383 struct hns3_mac_vlan_tbl_entry_cmd *req, 1384 struct hns3_cmd_desc *mc_desc) 1385 { 1386 uint8_t resp_code; 1387 uint16_t retval; 1388 int cfg_status; 1389 int ret; 1390 1391 if (mc_desc == NULL) { 1392 struct hns3_cmd_desc desc; 1393 1394 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); 1395 memcpy(desc.data, req, 1396 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1397 ret = hns3_cmd_send(hw, &desc, 1); 1398 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1399 retval = rte_le_to_cpu_16(desc.retval); 1400 1401 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1402 HNS3_MAC_VLAN_ADD); 1403 } else { 1404 hns3_cmd_reuse_desc(&mc_desc[0], false); 1405 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1406 hns3_cmd_reuse_desc(&mc_desc[1], false); 1407 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1408 hns3_cmd_reuse_desc(&mc_desc[2], false); 1409 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); 1410 memcpy(mc_desc[0].data, req, 1411 sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1412 mc_desc[0].retval = 0; 1413 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); 1414 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; 1415 retval = rte_le_to_cpu_16(mc_desc[0].retval); 1416 1417 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1418 HNS3_MAC_VLAN_ADD); 1419 } 1420 1421 if (ret) { 1422 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); 1423 return ret; 1424 } 1425 1426 return cfg_status; 1427 } 1428 1429 static int 1430 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, 1431 struct hns3_mac_vlan_tbl_entry_cmd *req) 1432 { 1433 struct hns3_cmd_desc desc; 1434 uint8_t resp_code; 1435 uint16_t retval; 1436 int ret; 1437 1438 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); 1439 1440 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); 1441 1442 ret = hns3_cmd_send(hw, &desc, 1); 1443 if (ret) { 1444 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); 1445 return ret; 1446 } 1447 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 1448 retval = rte_le_to_cpu_16(desc.retval); 1449 1450 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, 1451 HNS3_MAC_VLAN_REMOVE); 1452 } 1453 1454 static int 1455 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1456 { 1457 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 1458 struct hns3_mac_vlan_tbl_entry_cmd req; 1459 struct hns3_pf *pf = &hns->pf; 1460 struct hns3_cmd_desc desc[3]; 1461 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1462 uint16_t egress_port = 0; 1463 uint8_t vf_id; 1464 int ret; 1465 1466 /* check if mac addr is valid */ 1467 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1468 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1469 mac_addr); 1470 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", 1471 mac_str); 1472 return -EINVAL; 1473 } 1474 1475 memset(&req, 0, sizeof(req)); 1476 1477 /* 1478 * In current version VF is not supported when PF is driven by DPDK 1479 * driver, just need to configure parameters for PF vport. 1480 */ 1481 vf_id = HNS3_PF_FUNC_ID; 1482 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, 1483 HNS3_MAC_EPORT_VFID_S, vf_id); 1484 1485 req.egress_port = rte_cpu_to_le_16(egress_port); 1486 1487 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1488 1489 /* 1490 * Lookup the mac address in the mac_vlan table, and add 1491 * it if the entry is inexistent. Repeated unicast entry 1492 * is not allowed in the mac vlan table. 1493 */ 1494 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); 1495 if (ret == -ENOENT) { 1496 if (!hns3_is_umv_space_full(hw)) { 1497 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); 1498 if (!ret) 1499 hns3_update_umv_space(hw, false); 1500 return ret; 1501 } 1502 1503 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); 1504 1505 return -ENOSPC; 1506 } 1507 1508 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); 1509 1510 /* check if we just hit the duplicate */ 1511 if (ret == 0) { 1512 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); 1513 return 0; 1514 } 1515 1516 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", 1517 mac_str); 1518 1519 return ret; 1520 } 1521 1522 static int 1523 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1524 { 1525 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1526 struct rte_ether_addr *addr; 1527 int ret; 1528 int i; 1529 1530 for (i = 0; i < hw->mc_addrs_num; i++) { 1531 addr = &hw->mc_addrs[i]; 1532 /* Check if there are duplicate addresses */ 1533 if (rte_is_same_ether_addr(addr, mac_addr)) { 1534 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1535 addr); 1536 hns3_err(hw, "failed to add mc mac addr, same addrs" 1537 "(%s) is added by the set_mc_mac_addr_list " 1538 "API", mac_str); 1539 return -EINVAL; 1540 } 1541 } 1542 1543 ret = hns3_add_mc_addr(hw, mac_addr); 1544 if (ret) { 1545 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1546 mac_addr); 1547 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", 1548 mac_str, ret); 1549 } 1550 return ret; 1551 } 1552 1553 static int 1554 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1555 { 1556 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1557 int ret; 1558 1559 ret = hns3_remove_mc_addr(hw, mac_addr); 1560 if (ret) { 1561 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1562 mac_addr); 1563 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", 1564 mac_str, ret); 1565 } 1566 return ret; 1567 } 1568 1569 static int 1570 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1571 uint32_t idx, __rte_unused uint32_t pool) 1572 { 1573 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1574 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1575 int ret; 1576 1577 rte_spinlock_lock(&hw->lock); 1578 1579 /* 1580 * In hns3 network engine adding UC and MC mac address with different 1581 * commands with firmware. We need to determine whether the input 1582 * address is a UC or a MC address to call different commands. 1583 * By the way, it is recommended calling the API function named 1584 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because 1585 * using the rte_eth_dev_mac_addr_add API function to set MC mac address 1586 * may affect the specifications of UC mac addresses. 1587 */ 1588 if (rte_is_multicast_ether_addr(mac_addr)) 1589 ret = hns3_add_mc_addr_common(hw, mac_addr); 1590 else 1591 ret = hns3_add_uc_addr_common(hw, mac_addr); 1592 1593 if (ret) { 1594 rte_spinlock_unlock(&hw->lock); 1595 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1596 mac_addr); 1597 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, 1598 ret); 1599 return ret; 1600 } 1601 1602 if (idx == 0) 1603 hw->mac.default_addr_setted = true; 1604 rte_spinlock_unlock(&hw->lock); 1605 1606 return ret; 1607 } 1608 1609 static int 1610 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1611 { 1612 struct hns3_mac_vlan_tbl_entry_cmd req; 1613 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1614 int ret; 1615 1616 /* check if mac addr is valid */ 1617 if (!rte_is_valid_assigned_ether_addr(mac_addr)) { 1618 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1619 mac_addr); 1620 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", 1621 mac_str); 1622 return -EINVAL; 1623 } 1624 1625 memset(&req, 0, sizeof(req)); 1626 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1627 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); 1628 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1629 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ 1630 return 0; 1631 else if (ret == 0) 1632 hns3_update_umv_space(hw, true); 1633 1634 return ret; 1635 } 1636 1637 static void 1638 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) 1639 { 1640 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1641 /* index will be checked by upper level rte interface */ 1642 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; 1643 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1644 int ret; 1645 1646 rte_spinlock_lock(&hw->lock); 1647 1648 if (rte_is_multicast_ether_addr(mac_addr)) 1649 ret = hns3_remove_mc_addr_common(hw, mac_addr); 1650 else 1651 ret = hns3_remove_uc_addr_common(hw, mac_addr); 1652 rte_spinlock_unlock(&hw->lock); 1653 if (ret) { 1654 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1655 mac_addr); 1656 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, 1657 ret); 1658 } 1659 } 1660 1661 static int 1662 hns3_set_default_mac_addr(struct rte_eth_dev *dev, 1663 struct rte_ether_addr *mac_addr) 1664 { 1665 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1666 struct rte_ether_addr *oaddr; 1667 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1668 bool default_addr_setted; 1669 bool rm_succes = false; 1670 int ret, ret_val; 1671 1672 /* 1673 * It has been guaranteed that input parameter named mac_addr is valid 1674 * address in the rte layer of DPDK framework. 1675 */ 1676 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; 1677 default_addr_setted = hw->mac.default_addr_setted; 1678 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) 1679 return 0; 1680 1681 rte_spinlock_lock(&hw->lock); 1682 if (default_addr_setted) { 1683 ret = hns3_remove_uc_addr_common(hw, oaddr); 1684 if (ret) { 1685 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1686 oaddr); 1687 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", 1688 mac_str, ret); 1689 rm_succes = false; 1690 } else 1691 rm_succes = true; 1692 } 1693 1694 ret = hns3_add_uc_addr_common(hw, mac_addr); 1695 if (ret) { 1696 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1697 mac_addr); 1698 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); 1699 goto err_add_uc_addr; 1700 } 1701 1702 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); 1703 if (ret) { 1704 hns3_err(hw, "Failed to configure mac pause address: %d", ret); 1705 goto err_pause_addr_cfg; 1706 } 1707 1708 rte_ether_addr_copy(mac_addr, 1709 (struct rte_ether_addr *)hw->mac.mac_addr); 1710 hw->mac.default_addr_setted = true; 1711 rte_spinlock_unlock(&hw->lock); 1712 1713 return 0; 1714 1715 err_pause_addr_cfg: 1716 ret_val = hns3_remove_uc_addr_common(hw, mac_addr); 1717 if (ret_val) { 1718 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1719 mac_addr); 1720 hns3_warn(hw, 1721 "Failed to roll back to del setted mac addr(%s): %d", 1722 mac_str, ret_val); 1723 } 1724 1725 err_add_uc_addr: 1726 if (rm_succes) { 1727 ret_val = hns3_add_uc_addr_common(hw, oaddr); 1728 if (ret_val) { 1729 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1730 oaddr); 1731 hns3_warn(hw, 1732 "Failed to restore old uc mac addr(%s): %d", 1733 mac_str, ret_val); 1734 hw->mac.default_addr_setted = false; 1735 } 1736 } 1737 rte_spinlock_unlock(&hw->lock); 1738 1739 return ret; 1740 } 1741 1742 static int 1743 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) 1744 { 1745 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1746 struct hns3_hw *hw = &hns->hw; 1747 struct rte_ether_addr *addr; 1748 int err = 0; 1749 int ret; 1750 int i; 1751 1752 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { 1753 addr = &hw->data->mac_addrs[i]; 1754 if (rte_is_zero_ether_addr(addr)) 1755 continue; 1756 if (rte_is_multicast_ether_addr(addr)) 1757 ret = del ? hns3_remove_mc_addr(hw, addr) : 1758 hns3_add_mc_addr(hw, addr); 1759 else 1760 ret = del ? hns3_remove_uc_addr_common(hw, addr) : 1761 hns3_add_uc_addr_common(hw, addr); 1762 1763 if (ret) { 1764 err = ret; 1765 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1766 addr); 1767 hns3_err(hw, "failed to %s mac addr(%s) index:%d " 1768 "ret = %d.", del ? "remove" : "restore", 1769 mac_str, i, ret); 1770 } 1771 } 1772 return err; 1773 } 1774 1775 static void 1776 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) 1777 { 1778 #define HNS3_VF_NUM_IN_FIRST_DESC 192 1779 uint8_t word_num; 1780 uint8_t bit_num; 1781 1782 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { 1783 word_num = vfid / 32; 1784 bit_num = vfid % 32; 1785 if (clr) 1786 desc[1].data[word_num] &= 1787 rte_cpu_to_le_32(~(1UL << bit_num)); 1788 else 1789 desc[1].data[word_num] |= 1790 rte_cpu_to_le_32(1UL << bit_num); 1791 } else { 1792 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; 1793 bit_num = vfid % 32; 1794 if (clr) 1795 desc[2].data[word_num] &= 1796 rte_cpu_to_le_32(~(1UL << bit_num)); 1797 else 1798 desc[2].data[word_num] |= 1799 rte_cpu_to_le_32(1UL << bit_num); 1800 } 1801 } 1802 1803 static int 1804 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1805 { 1806 struct hns3_mac_vlan_tbl_entry_cmd req; 1807 struct hns3_cmd_desc desc[3]; 1808 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1809 uint8_t vf_id; 1810 int ret; 1811 1812 /* Check if mac addr is valid */ 1813 if (!rte_is_multicast_ether_addr(mac_addr)) { 1814 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1815 mac_addr); 1816 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", 1817 mac_str); 1818 return -EINVAL; 1819 } 1820 1821 memset(&req, 0, sizeof(req)); 1822 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1823 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1824 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1825 if (ret) { 1826 /* This mac addr do not exist, add new entry for it */ 1827 memset(desc[0].data, 0, sizeof(desc[0].data)); 1828 memset(desc[1].data, 0, sizeof(desc[0].data)); 1829 memset(desc[2].data, 0, sizeof(desc[0].data)); 1830 } 1831 1832 /* 1833 * In current version VF is not supported when PF is driven by DPDK 1834 * driver, just need to configure parameters for PF vport. 1835 */ 1836 vf_id = HNS3_PF_FUNC_ID; 1837 hns3_update_desc_vfid(desc, vf_id, false); 1838 ret = hns3_add_mac_vlan_tbl(hw, &req, desc); 1839 if (ret) { 1840 if (ret == -ENOSPC) 1841 hns3_err(hw, "mc mac vlan table is full"); 1842 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1843 mac_addr); 1844 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); 1845 } 1846 1847 return ret; 1848 } 1849 1850 static int 1851 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) 1852 { 1853 struct hns3_mac_vlan_tbl_entry_cmd req; 1854 struct hns3_cmd_desc desc[3]; 1855 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1856 uint8_t vf_id; 1857 int ret; 1858 1859 /* Check if mac addr is valid */ 1860 if (!rte_is_multicast_ether_addr(mac_addr)) { 1861 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1862 mac_addr); 1863 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", 1864 mac_str); 1865 return -EINVAL; 1866 } 1867 1868 memset(&req, 0, sizeof(req)); 1869 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); 1870 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); 1871 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); 1872 if (ret == 0) { 1873 /* 1874 * This mac addr exist, remove this handle's VFID for it. 1875 * In current version VF is not supported when PF is driven by 1876 * DPDK driver, just need to configure parameters for PF vport. 1877 */ 1878 vf_id = HNS3_PF_FUNC_ID; 1879 hns3_update_desc_vfid(desc, vf_id, true); 1880 1881 /* All the vfid is zero, so need to delete this entry */ 1882 ret = hns3_remove_mac_vlan_tbl(hw, &req); 1883 } else if (ret == -ENOENT) { 1884 /* This mac addr doesn't exist. */ 1885 return 0; 1886 } 1887 1888 if (ret) { 1889 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1890 mac_addr); 1891 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); 1892 } 1893 1894 return ret; 1895 } 1896 1897 static int 1898 hns3_set_mc_addr_chk_param(struct hns3_hw *hw, 1899 struct rte_ether_addr *mc_addr_set, 1900 uint32_t nb_mc_addr) 1901 { 1902 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 1903 struct rte_ether_addr *addr; 1904 uint32_t i; 1905 uint32_t j; 1906 1907 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { 1908 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " 1909 "invalid. valid range: 0~%d", 1910 nb_mc_addr, HNS3_MC_MACADDR_NUM); 1911 return -EINVAL; 1912 } 1913 1914 /* Check if input mac addresses are valid */ 1915 for (i = 0; i < nb_mc_addr; i++) { 1916 addr = &mc_addr_set[i]; 1917 if (!rte_is_multicast_ether_addr(addr)) { 1918 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 1919 addr); 1920 hns3_err(hw, 1921 "failed to set mc mac addr, addr(%s) invalid.", 1922 mac_str); 1923 return -EINVAL; 1924 } 1925 1926 /* Check if there are duplicate addresses */ 1927 for (j = i + 1; j < nb_mc_addr; j++) { 1928 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 1929 rte_ether_format_addr(mac_str, 1930 RTE_ETHER_ADDR_FMT_SIZE, 1931 addr); 1932 hns3_err(hw, "failed to set mc mac addr, " 1933 "addrs invalid. two same addrs(%s).", 1934 mac_str); 1935 return -EINVAL; 1936 } 1937 } 1938 1939 /* 1940 * Check if there are duplicate addresses between mac_addrs 1941 * and mc_addr_set 1942 */ 1943 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { 1944 if (rte_is_same_ether_addr(addr, 1945 &hw->data->mac_addrs[j])) { 1946 rte_ether_format_addr(mac_str, 1947 RTE_ETHER_ADDR_FMT_SIZE, 1948 addr); 1949 hns3_err(hw, "failed to set mc mac addr, " 1950 "addrs invalid. addrs(%s) has already " 1951 "configured in mac_addr add API", 1952 mac_str); 1953 return -EINVAL; 1954 } 1955 } 1956 } 1957 1958 return 0; 1959 } 1960 1961 static void 1962 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, 1963 struct rte_ether_addr *mc_addr_set, 1964 int mc_addr_num, 1965 struct rte_ether_addr *reserved_addr_list, 1966 int *reserved_addr_num, 1967 struct rte_ether_addr *add_addr_list, 1968 int *add_addr_num, 1969 struct rte_ether_addr *rm_addr_list, 1970 int *rm_addr_num) 1971 { 1972 struct rte_ether_addr *addr; 1973 int current_addr_num; 1974 int reserved_num = 0; 1975 int add_num = 0; 1976 int rm_num = 0; 1977 int num; 1978 int i; 1979 int j; 1980 bool same_addr; 1981 1982 /* Calculate the mc mac address list that should be removed */ 1983 current_addr_num = hw->mc_addrs_num; 1984 for (i = 0; i < current_addr_num; i++) { 1985 addr = &hw->mc_addrs[i]; 1986 same_addr = false; 1987 for (j = 0; j < mc_addr_num; j++) { 1988 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { 1989 same_addr = true; 1990 break; 1991 } 1992 } 1993 1994 if (!same_addr) { 1995 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); 1996 rm_num++; 1997 } else { 1998 rte_ether_addr_copy(addr, 1999 &reserved_addr_list[reserved_num]); 2000 reserved_num++; 2001 } 2002 } 2003 2004 /* Calculate the mc mac address list that should be added */ 2005 for (i = 0; i < mc_addr_num; i++) { 2006 addr = &mc_addr_set[i]; 2007 same_addr = false; 2008 for (j = 0; j < current_addr_num; j++) { 2009 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { 2010 same_addr = true; 2011 break; 2012 } 2013 } 2014 2015 if (!same_addr) { 2016 rte_ether_addr_copy(addr, &add_addr_list[add_num]); 2017 add_num++; 2018 } 2019 } 2020 2021 /* Reorder the mc mac address list maintained by driver */ 2022 for (i = 0; i < reserved_num; i++) 2023 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); 2024 2025 for (i = 0; i < rm_num; i++) { 2026 num = reserved_num + i; 2027 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); 2028 } 2029 2030 *reserved_addr_num = reserved_num; 2031 *add_addr_num = add_num; 2032 *rm_addr_num = rm_num; 2033 } 2034 2035 static int 2036 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, 2037 struct rte_ether_addr *mc_addr_set, 2038 uint32_t nb_mc_addr) 2039 { 2040 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2041 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; 2042 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; 2043 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; 2044 struct rte_ether_addr *addr; 2045 int reserved_addr_num; 2046 int add_addr_num; 2047 int rm_addr_num; 2048 int mc_addr_num; 2049 int num; 2050 int ret; 2051 int i; 2052 2053 /* Check if input parameters are valid */ 2054 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); 2055 if (ret) 2056 return ret; 2057 2058 rte_spinlock_lock(&hw->lock); 2059 2060 /* 2061 * Calculate the mc mac address lists those should be removed and be 2062 * added, Reorder the mc mac address list maintained by driver. 2063 */ 2064 mc_addr_num = (int)nb_mc_addr; 2065 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, 2066 reserved_addr_list, &reserved_addr_num, 2067 add_addr_list, &add_addr_num, 2068 rm_addr_list, &rm_addr_num); 2069 2070 /* Remove mc mac addresses */ 2071 for (i = 0; i < rm_addr_num; i++) { 2072 num = rm_addr_num - i - 1; 2073 addr = &rm_addr_list[num]; 2074 ret = hns3_remove_mc_addr(hw, addr); 2075 if (ret) { 2076 rte_spinlock_unlock(&hw->lock); 2077 return ret; 2078 } 2079 hw->mc_addrs_num--; 2080 } 2081 2082 /* Add mc mac addresses */ 2083 for (i = 0; i < add_addr_num; i++) { 2084 addr = &add_addr_list[i]; 2085 ret = hns3_add_mc_addr(hw, addr); 2086 if (ret) { 2087 rte_spinlock_unlock(&hw->lock); 2088 return ret; 2089 } 2090 2091 num = reserved_addr_num + i; 2092 rte_ether_addr_copy(addr, &hw->mc_addrs[num]); 2093 hw->mc_addrs_num++; 2094 } 2095 rte_spinlock_unlock(&hw->lock); 2096 2097 return 0; 2098 } 2099 2100 static int 2101 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) 2102 { 2103 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 2104 struct hns3_hw *hw = &hns->hw; 2105 struct rte_ether_addr *addr; 2106 int err = 0; 2107 int ret; 2108 int i; 2109 2110 for (i = 0; i < hw->mc_addrs_num; i++) { 2111 addr = &hw->mc_addrs[i]; 2112 if (!rte_is_multicast_ether_addr(addr)) 2113 continue; 2114 if (del) 2115 ret = hns3_remove_mc_addr(hw, addr); 2116 else 2117 ret = hns3_add_mc_addr(hw, addr); 2118 if (ret) { 2119 err = ret; 2120 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 2121 addr); 2122 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", 2123 del ? "Remove" : "Restore", mac_str, ret); 2124 } 2125 } 2126 return err; 2127 } 2128 2129 static int 2130 hns3_check_mq_mode(struct rte_eth_dev *dev) 2131 { 2132 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 2133 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 2134 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2135 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 2136 struct rte_eth_dcb_rx_conf *dcb_rx_conf; 2137 struct rte_eth_dcb_tx_conf *dcb_tx_conf; 2138 uint8_t num_tc; 2139 int max_tc = 0; 2140 int i; 2141 2142 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 2143 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; 2144 2145 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2146 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. " 2147 "rx_mq_mode = %d", rx_mq_mode); 2148 return -EINVAL; 2149 } 2150 2151 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB || 2152 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2153 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB " 2154 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d", 2155 rx_mq_mode, tx_mq_mode); 2156 return -EINVAL; 2157 } 2158 2159 if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) { 2160 if (dcb_rx_conf->nb_tcs > pf->tc_max) { 2161 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", 2162 dcb_rx_conf->nb_tcs, pf->tc_max); 2163 return -EINVAL; 2164 } 2165 2166 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || 2167 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { 2168 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " 2169 "nb_tcs(%d) != %d or %d in rx direction.", 2170 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); 2171 return -EINVAL; 2172 } 2173 2174 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { 2175 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", 2176 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); 2177 return -EINVAL; 2178 } 2179 2180 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { 2181 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { 2182 hns3_err(hw, "dcb_tc[%d] = %d in rx direction, " 2183 "is not equal to one in tx direction.", 2184 i, dcb_rx_conf->dcb_tc[i]); 2185 return -EINVAL; 2186 } 2187 if (dcb_rx_conf->dcb_tc[i] > max_tc) 2188 max_tc = dcb_rx_conf->dcb_tc[i]; 2189 } 2190 2191 num_tc = max_tc + 1; 2192 if (num_tc > dcb_rx_conf->nb_tcs) { 2193 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", 2194 num_tc, dcb_rx_conf->nb_tcs); 2195 return -EINVAL; 2196 } 2197 } 2198 2199 return 0; 2200 } 2201 2202 static int 2203 hns3_check_dcb_cfg(struct rte_eth_dev *dev) 2204 { 2205 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2206 2207 if (!hns3_dev_dcb_supported(hw)) { 2208 hns3_err(hw, "this port does not support dcb configurations."); 2209 return -EOPNOTSUPP; 2210 } 2211 2212 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { 2213 hns3_err(hw, "MAC pause enabled, cannot config dcb info."); 2214 return -EOPNOTSUPP; 2215 } 2216 2217 /* Check multiple queue mode */ 2218 return hns3_check_mq_mode(dev); 2219 } 2220 2221 static int 2222 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, 2223 enum hns3_ring_type queue_type, uint16_t queue_id) 2224 { 2225 struct hns3_cmd_desc desc; 2226 struct hns3_ctrl_vector_chain_cmd *req = 2227 (struct hns3_ctrl_vector_chain_cmd *)desc.data; 2228 enum hns3_cmd_status status; 2229 enum hns3_opcode_type op; 2230 uint16_t tqp_type_and_id = 0; 2231 const char *op_str; 2232 uint16_t type; 2233 uint16_t gl; 2234 2235 op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; 2236 hns3_cmd_setup_basic_desc(&desc, op, false); 2237 req->int_vector_id = vector_id; 2238 2239 if (queue_type == HNS3_RING_TYPE_RX) 2240 gl = HNS3_RING_GL_RX; 2241 else 2242 gl = HNS3_RING_GL_TX; 2243 2244 type = queue_type; 2245 2246 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, 2247 type); 2248 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); 2249 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, 2250 gl); 2251 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); 2252 req->int_cause_num = 1; 2253 op_str = mmap ? "Map" : "Unmap"; 2254 status = hns3_cmd_send(hw, &desc, 1); 2255 if (status) { 2256 hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.", 2257 op_str, queue_id, req->int_vector_id, status); 2258 return status; 2259 } 2260 2261 return 0; 2262 } 2263 2264 static int 2265 hns3_init_ring_with_vector(struct hns3_hw *hw) 2266 { 2267 uint16_t vec; 2268 int ret; 2269 int i; 2270 2271 /* 2272 * In hns3 network engine, vector 0 is always the misc interrupt of this 2273 * function, vector 1~N can be used respectively for the queues of the 2274 * function. Tx and Rx queues with the same number share the interrupt 2275 * vector. In the initialization clearing the all hardware mapping 2276 * relationship configurations between queues and interrupt vectors is 2277 * needed, so some error caused by the residual configurations, such as 2278 * the unexpected Tx interrupt, can be avoid. 2279 */ 2280 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ 2281 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) 2282 vec = vec - 1; /* the last interrupt is reserved */ 2283 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); 2284 for (i = 0; i < hw->intr_tqps_num; i++) { 2285 /* 2286 * Set gap limiter/rate limiter/quanity limiter algorithm 2287 * configuration for interrupt coalesce of queue's interrupt. 2288 */ 2289 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, 2290 HNS3_TQP_INTR_GL_DEFAULT); 2291 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, 2292 HNS3_TQP_INTR_GL_DEFAULT); 2293 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); 2294 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); 2295 2296 ret = hns3_bind_ring_with_vector(hw, vec, false, 2297 HNS3_RING_TYPE_TX, i); 2298 if (ret) { 2299 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " 2300 "vector: %d, ret=%d", i, vec, ret); 2301 return ret; 2302 } 2303 2304 ret = hns3_bind_ring_with_vector(hw, vec, false, 2305 HNS3_RING_TYPE_RX, i); 2306 if (ret) { 2307 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " 2308 "vector: %d, ret=%d", i, vec, ret); 2309 return ret; 2310 } 2311 } 2312 2313 return 0; 2314 } 2315 2316 static int 2317 hns3_dev_configure(struct rte_eth_dev *dev) 2318 { 2319 struct hns3_adapter *hns = dev->data->dev_private; 2320 struct rte_eth_conf *conf = &dev->data->dev_conf; 2321 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; 2322 struct hns3_hw *hw = &hns->hw; 2323 struct hns3_rss_conf *rss_cfg = &hw->rss_info; 2324 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2325 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2326 struct rte_eth_rss_conf rss_conf; 2327 uint16_t mtu; 2328 bool gro_en; 2329 int ret; 2330 2331 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); 2332 2333 /* 2334 * Some versions of hardware network engine does not support 2335 * individually enable/disable/reset the Tx or Rx queue. These devices 2336 * must enable/disable/reset Tx and Rx queues at the same time. When the 2337 * numbers of Tx queues allocated by upper applications are not equal to 2338 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues 2339 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not 2340 * work as usual. But these fake queues are imperceptible, and can not 2341 * be used by upper applications. 2342 */ 2343 if (!hns3_dev_indep_txrx_supported(hw)) { 2344 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); 2345 if (ret) { 2346 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", 2347 ret); 2348 return ret; 2349 } 2350 } 2351 2352 hw->adapter_state = HNS3_NIC_CONFIGURING; 2353 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 2354 hns3_err(hw, "setting link speed/duplex not supported"); 2355 ret = -EINVAL; 2356 goto cfg_err; 2357 } 2358 2359 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { 2360 ret = hns3_check_dcb_cfg(dev); 2361 if (ret) 2362 goto cfg_err; 2363 } 2364 2365 /* When RSS is not configured, redirect the packet queue 0 */ 2366 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { 2367 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2368 rss_conf = conf->rx_adv_conf.rss_conf; 2369 hw->rss_dis_flag = false; 2370 if (rss_conf.rss_key == NULL) { 2371 rss_conf.rss_key = rss_cfg->key; 2372 rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; 2373 } 2374 2375 ret = hns3_dev_rss_hash_update(dev, &rss_conf); 2376 if (ret) 2377 goto cfg_err; 2378 } 2379 2380 /* 2381 * If jumbo frames are enabled, MTU needs to be refreshed 2382 * according to the maximum RX packet length. 2383 */ 2384 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 2385 /* 2386 * Security of max_rx_pkt_len is guaranteed in dpdk frame. 2387 * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it 2388 * can safely assign to "uint16_t" type variable. 2389 */ 2390 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); 2391 ret = hns3_dev_mtu_set(dev, mtu); 2392 if (ret) 2393 goto cfg_err; 2394 dev->data->mtu = mtu; 2395 } 2396 2397 ret = hns3_dev_configure_vlan(dev); 2398 if (ret) 2399 goto cfg_err; 2400 2401 /* config hardware GRO */ 2402 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; 2403 ret = hns3_config_gro(hw, gro_en); 2404 if (ret) 2405 goto cfg_err; 2406 2407 hns->rx_simple_allowed = true; 2408 hns->rx_vec_allowed = true; 2409 hns->tx_simple_allowed = true; 2410 hns->tx_vec_allowed = true; 2411 2412 hns3_init_rx_ptype_tble(dev); 2413 hw->adapter_state = HNS3_NIC_CONFIGURED; 2414 2415 return 0; 2416 2417 cfg_err: 2418 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); 2419 hw->adapter_state = HNS3_NIC_INITIALIZED; 2420 2421 return ret; 2422 } 2423 2424 static int 2425 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) 2426 { 2427 struct hns3_config_max_frm_size_cmd *req; 2428 struct hns3_cmd_desc desc; 2429 2430 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); 2431 2432 req = (struct hns3_config_max_frm_size_cmd *)desc.data; 2433 req->max_frm_size = rte_cpu_to_le_16(new_mps); 2434 req->min_frm_size = RTE_ETHER_MIN_LEN; 2435 2436 return hns3_cmd_send(hw, &desc, 1); 2437 } 2438 2439 static int 2440 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) 2441 { 2442 int ret; 2443 2444 ret = hns3_set_mac_mtu(hw, mps); 2445 if (ret) { 2446 hns3_err(hw, "Failed to set mtu, ret = %d", ret); 2447 return ret; 2448 } 2449 2450 ret = hns3_buffer_alloc(hw); 2451 if (ret) 2452 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret); 2453 2454 return ret; 2455 } 2456 2457 static int 2458 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2459 { 2460 struct hns3_adapter *hns = dev->data->dev_private; 2461 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; 2462 struct hns3_hw *hw = &hns->hw; 2463 bool is_jumbo_frame; 2464 int ret; 2465 2466 if (dev->data->dev_started) { 2467 hns3_err(hw, "Failed to set mtu, port %u must be stopped " 2468 "before configuration", dev->data->port_id); 2469 return -EBUSY; 2470 } 2471 2472 rte_spinlock_lock(&hw->lock); 2473 is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false; 2474 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); 2475 2476 /* 2477 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely 2478 * assign to "uint16_t" type variable. 2479 */ 2480 ret = hns3_config_mtu(hw, (uint16_t)frame_size); 2481 if (ret) { 2482 rte_spinlock_unlock(&hw->lock); 2483 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", 2484 dev->data->port_id, mtu, ret); 2485 return ret; 2486 } 2487 hns->pf.mps = (uint16_t)frame_size; 2488 if (is_jumbo_frame) 2489 dev->data->dev_conf.rxmode.offloads |= 2490 DEV_RX_OFFLOAD_JUMBO_FRAME; 2491 else 2492 dev->data->dev_conf.rxmode.offloads &= 2493 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2494 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2495 rte_spinlock_unlock(&hw->lock); 2496 2497 return 0; 2498 } 2499 2500 static int 2501 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) 2502 { 2503 struct hns3_adapter *hns = eth_dev->data->dev_private; 2504 struct hns3_hw *hw = &hns->hw; 2505 uint16_t queue_num = hw->tqps_num; 2506 2507 /* 2508 * In interrupt mode, 'max_rx_queues' is set based on the number of 2509 * MSI-X interrupt resources of the hardware. 2510 */ 2511 if (hw->data->dev_conf.intr_conf.rxq == 1) 2512 queue_num = hw->intr_tqps_num; 2513 2514 info->max_rx_queues = queue_num; 2515 info->max_tx_queues = hw->tqps_num; 2516 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ 2517 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; 2518 info->max_mac_addrs = HNS3_UC_MACADDR_NUM; 2519 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; 2520 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; 2521 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 2522 DEV_RX_OFFLOAD_TCP_CKSUM | 2523 DEV_RX_OFFLOAD_UDP_CKSUM | 2524 DEV_RX_OFFLOAD_SCTP_CKSUM | 2525 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 2526 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 2527 DEV_RX_OFFLOAD_KEEP_CRC | 2528 DEV_RX_OFFLOAD_SCATTER | 2529 DEV_RX_OFFLOAD_VLAN_STRIP | 2530 DEV_RX_OFFLOAD_VLAN_FILTER | 2531 DEV_RX_OFFLOAD_JUMBO_FRAME | 2532 DEV_RX_OFFLOAD_RSS_HASH | 2533 DEV_RX_OFFLOAD_TCP_LRO); 2534 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 2535 DEV_TX_OFFLOAD_IPV4_CKSUM | 2536 DEV_TX_OFFLOAD_TCP_CKSUM | 2537 DEV_TX_OFFLOAD_UDP_CKSUM | 2538 DEV_TX_OFFLOAD_SCTP_CKSUM | 2539 DEV_TX_OFFLOAD_MULTI_SEGS | 2540 DEV_TX_OFFLOAD_TCP_TSO | 2541 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 2542 DEV_TX_OFFLOAD_GRE_TNL_TSO | 2543 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | 2544 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 2545 hns3_txvlan_cap_get(hw)); 2546 2547 if (hns3_dev_indep_txrx_supported(hw)) 2548 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 2549 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 2550 2551 info->rx_desc_lim = (struct rte_eth_desc_lim) { 2552 .nb_max = HNS3_MAX_RING_DESC, 2553 .nb_min = HNS3_MIN_RING_DESC, 2554 .nb_align = HNS3_ALIGN_RING_DESC, 2555 }; 2556 2557 info->tx_desc_lim = (struct rte_eth_desc_lim) { 2558 .nb_max = HNS3_MAX_RING_DESC, 2559 .nb_min = HNS3_MIN_RING_DESC, 2560 .nb_align = HNS3_ALIGN_RING_DESC, 2561 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, 2562 .nb_mtu_seg_max = hw->max_non_tso_bd_num, 2563 }; 2564 2565 info->default_rxconf = (struct rte_eth_rxconf) { 2566 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, 2567 /* 2568 * If there are no available Rx buffer descriptors, incoming 2569 * packets are always dropped by hardware based on hns3 network 2570 * engine. 2571 */ 2572 .rx_drop_en = 1, 2573 .offloads = 0, 2574 }; 2575 info->default_txconf = (struct rte_eth_txconf) { 2576 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, 2577 .offloads = 0, 2578 }; 2579 2580 info->vmdq_queue_num = 0; 2581 2582 info->reta_size = HNS3_RSS_IND_TBL_SIZE; 2583 info->hash_key_size = HNS3_RSS_KEY_SIZE; 2584 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; 2585 2586 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2587 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; 2588 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2589 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; 2590 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2591 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; 2592 2593 return 0; 2594 } 2595 2596 static int 2597 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 2598 size_t fw_size) 2599 { 2600 struct hns3_adapter *hns = eth_dev->data->dev_private; 2601 struct hns3_hw *hw = &hns->hw; 2602 uint32_t version = hw->fw_version; 2603 int ret; 2604 2605 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", 2606 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 2607 HNS3_FW_VERSION_BYTE3_S), 2608 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 2609 HNS3_FW_VERSION_BYTE2_S), 2610 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 2611 HNS3_FW_VERSION_BYTE1_S), 2612 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 2613 HNS3_FW_VERSION_BYTE0_S)); 2614 ret += 1; /* add the size of '\0' */ 2615 if (fw_size < (uint32_t)ret) 2616 return ret; 2617 else 2618 return 0; 2619 } 2620 2621 static int 2622 hns3_dev_link_update(struct rte_eth_dev *eth_dev, 2623 __rte_unused int wait_to_complete) 2624 { 2625 struct hns3_adapter *hns = eth_dev->data->dev_private; 2626 struct hns3_hw *hw = &hns->hw; 2627 struct hns3_mac *mac = &hw->mac; 2628 struct rte_eth_link new_link; 2629 2630 if (!hns3_is_reset_pending(hns)) { 2631 hns3_update_speed_duplex(eth_dev); 2632 hns3_update_link_status(hw); 2633 } 2634 2635 memset(&new_link, 0, sizeof(new_link)); 2636 switch (mac->link_speed) { 2637 case ETH_SPEED_NUM_10M: 2638 case ETH_SPEED_NUM_100M: 2639 case ETH_SPEED_NUM_1G: 2640 case ETH_SPEED_NUM_10G: 2641 case ETH_SPEED_NUM_25G: 2642 case ETH_SPEED_NUM_40G: 2643 case ETH_SPEED_NUM_50G: 2644 case ETH_SPEED_NUM_100G: 2645 case ETH_SPEED_NUM_200G: 2646 new_link.link_speed = mac->link_speed; 2647 break; 2648 default: 2649 new_link.link_speed = ETH_SPEED_NUM_100M; 2650 break; 2651 } 2652 2653 new_link.link_duplex = mac->link_duplex; 2654 new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 2655 new_link.link_autoneg = 2656 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); 2657 2658 return rte_eth_linkstatus_set(eth_dev, &new_link); 2659 } 2660 2661 static int 2662 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) 2663 { 2664 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2665 struct hns3_pf *pf = &hns->pf; 2666 2667 if (!(status->pf_state & HNS3_PF_STATE_DONE)) 2668 return -EINVAL; 2669 2670 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; 2671 2672 return 0; 2673 } 2674 2675 static int 2676 hns3_query_function_status(struct hns3_hw *hw) 2677 { 2678 #define HNS3_QUERY_MAX_CNT 10 2679 #define HNS3_QUERY_SLEEP_MSCOEND 1 2680 struct hns3_func_status_cmd *req; 2681 struct hns3_cmd_desc desc; 2682 int timeout = 0; 2683 int ret; 2684 2685 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); 2686 req = (struct hns3_func_status_cmd *)desc.data; 2687 2688 do { 2689 ret = hns3_cmd_send(hw, &desc, 1); 2690 if (ret) { 2691 PMD_INIT_LOG(ERR, "query function status failed %d", 2692 ret); 2693 return ret; 2694 } 2695 2696 /* Check pf reset is done */ 2697 if (req->pf_state) 2698 break; 2699 2700 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); 2701 } while (timeout++ < HNS3_QUERY_MAX_CNT); 2702 2703 return hns3_parse_func_status(hw, req); 2704 } 2705 2706 static int 2707 hns3_get_pf_max_tqp_num(struct hns3_hw *hw) 2708 { 2709 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2710 struct hns3_pf *pf = &hns->pf; 2711 2712 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) { 2713 /* 2714 * The total_tqps_num obtained from firmware is maximum tqp 2715 * numbers of this port, which should be used for PF and VFs. 2716 * There is no need for pf to have so many tqp numbers in 2717 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2718 * coming from config file, is assigned to maximum queue number 2719 * for the PF of this port by user. So users can modify the 2720 * maximum queue number of PF according to their own application 2721 * scenarios, which is more flexible to use. In addition, many 2722 * memories can be saved due to allocating queue statistics 2723 * room according to the actual number of queues required. The 2724 * maximum queue number of PF for network engine with 2725 * revision_id greater than 0x30 is assigned by config file. 2726 */ 2727 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) { 2728 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) " 2729 "must be greater than 0.", 2730 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF); 2731 return -EINVAL; 2732 } 2733 2734 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF, 2735 hw->total_tqps_num); 2736 } else { 2737 /* 2738 * Due to the limitation on the number of PF interrupts 2739 * available, the maximum queue number assigned to PF on 2740 * the network engine with revision_id 0x21 is 64. 2741 */ 2742 hw->tqps_num = RTE_MIN(hw->total_tqps_num, 2743 HNS3_MAX_TQP_NUM_HIP08_PF); 2744 } 2745 2746 return 0; 2747 } 2748 2749 static int 2750 hns3_query_pf_resource(struct hns3_hw *hw) 2751 { 2752 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2753 struct hns3_pf *pf = &hns->pf; 2754 struct hns3_pf_res_cmd *req; 2755 struct hns3_cmd_desc desc; 2756 int ret; 2757 2758 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); 2759 ret = hns3_cmd_send(hw, &desc, 1); 2760 if (ret) { 2761 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); 2762 return ret; 2763 } 2764 2765 req = (struct hns3_pf_res_cmd *)desc.data; 2766 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) + 2767 rte_le_to_cpu_16(req->ext_tqp_num); 2768 ret = hns3_get_pf_max_tqp_num(hw); 2769 if (ret) 2770 return ret; 2771 2772 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; 2773 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); 2774 2775 if (req->tx_buf_size) 2776 pf->tx_buf_size = 2777 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; 2778 else 2779 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; 2780 2781 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); 2782 2783 if (req->dv_buf_size) 2784 pf->dv_buf_size = 2785 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; 2786 else 2787 pf->dv_buf_size = HNS3_DEFAULT_DV; 2788 2789 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); 2790 2791 hw->num_msi = 2792 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number), 2793 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); 2794 2795 return 0; 2796 } 2797 2798 static void 2799 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) 2800 { 2801 struct hns3_cfg_param_cmd *req; 2802 uint64_t mac_addr_tmp_high; 2803 uint8_t ext_rss_size_max; 2804 uint64_t mac_addr_tmp; 2805 uint32_t i; 2806 2807 req = (struct hns3_cfg_param_cmd *)desc[0].data; 2808 2809 /* get the configuration */ 2810 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2811 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S); 2812 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2813 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); 2814 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), 2815 HNS3_CFG_TQP_DESC_N_M, 2816 HNS3_CFG_TQP_DESC_N_S); 2817 2818 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2819 HNS3_CFG_PHY_ADDR_M, 2820 HNS3_CFG_PHY_ADDR_S); 2821 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2822 HNS3_CFG_MEDIA_TP_M, 2823 HNS3_CFG_MEDIA_TP_S); 2824 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2825 HNS3_CFG_RX_BUF_LEN_M, 2826 HNS3_CFG_RX_BUF_LEN_S); 2827 /* get mac address */ 2828 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); 2829 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2830 HNS3_CFG_MAC_ADDR_H_M, 2831 HNS3_CFG_MAC_ADDR_H_S); 2832 2833 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 2834 2835 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2836 HNS3_CFG_DEFAULT_SPEED_M, 2837 HNS3_CFG_DEFAULT_SPEED_S); 2838 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), 2839 HNS3_CFG_RSS_SIZE_M, 2840 HNS3_CFG_RSS_SIZE_S); 2841 2842 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 2843 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 2844 2845 req = (struct hns3_cfg_param_cmd *)desc[1].data; 2846 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); 2847 2848 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2849 HNS3_CFG_SPEED_ABILITY_M, 2850 HNS3_CFG_SPEED_ABILITY_S); 2851 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), 2852 HNS3_CFG_UMV_TBL_SPACE_M, 2853 HNS3_CFG_UMV_TBL_SPACE_S); 2854 if (!cfg->umv_space) 2855 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; 2856 2857 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]), 2858 HNS3_CFG_EXT_RSS_SIZE_M, 2859 HNS3_CFG_EXT_RSS_SIZE_S); 2860 2861 /* 2862 * Field ext_rss_size_max obtained from firmware will be more flexible 2863 * for future changes and expansions, which is an exponent of 2, instead 2864 * of reading out directly. If this field is not zero, hns3 PF PMD 2865 * driver uses it as rss_size_max under one TC. Device, whose revision 2866 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the 2867 * maximum number of queues supported under a TC through this field. 2868 */ 2869 if (ext_rss_size_max) 2870 cfg->rss_size_max = 1U << ext_rss_size_max; 2871 } 2872 2873 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash 2874 * @hw: pointer to struct hns3_hw 2875 * @hcfg: the config structure to be getted 2876 */ 2877 static int 2878 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) 2879 { 2880 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; 2881 struct hns3_cfg_param_cmd *req; 2882 uint32_t offset; 2883 uint32_t i; 2884 int ret; 2885 2886 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { 2887 offset = 0; 2888 req = (struct hns3_cfg_param_cmd *)desc[i].data; 2889 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, 2890 true); 2891 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, 2892 i * HNS3_CFG_RD_LEN_BYTES); 2893 /* Len should be divided by 4 when send to hardware */ 2894 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, 2895 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); 2896 req->offset = rte_cpu_to_le_32(offset); 2897 } 2898 2899 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); 2900 if (ret) { 2901 PMD_INIT_LOG(ERR, "get config failed %d.", ret); 2902 return ret; 2903 } 2904 2905 hns3_parse_cfg(hcfg, desc); 2906 2907 return 0; 2908 } 2909 2910 static int 2911 hns3_parse_speed(int speed_cmd, uint32_t *speed) 2912 { 2913 switch (speed_cmd) { 2914 case HNS3_CFG_SPEED_10M: 2915 *speed = ETH_SPEED_NUM_10M; 2916 break; 2917 case HNS3_CFG_SPEED_100M: 2918 *speed = ETH_SPEED_NUM_100M; 2919 break; 2920 case HNS3_CFG_SPEED_1G: 2921 *speed = ETH_SPEED_NUM_1G; 2922 break; 2923 case HNS3_CFG_SPEED_10G: 2924 *speed = ETH_SPEED_NUM_10G; 2925 break; 2926 case HNS3_CFG_SPEED_25G: 2927 *speed = ETH_SPEED_NUM_25G; 2928 break; 2929 case HNS3_CFG_SPEED_40G: 2930 *speed = ETH_SPEED_NUM_40G; 2931 break; 2932 case HNS3_CFG_SPEED_50G: 2933 *speed = ETH_SPEED_NUM_50G; 2934 break; 2935 case HNS3_CFG_SPEED_100G: 2936 *speed = ETH_SPEED_NUM_100G; 2937 break; 2938 case HNS3_CFG_SPEED_200G: 2939 *speed = ETH_SPEED_NUM_200G; 2940 break; 2941 default: 2942 return -EINVAL; 2943 } 2944 2945 return 0; 2946 } 2947 2948 static void 2949 hns3_set_default_dev_specifications(struct hns3_hw *hw) 2950 { 2951 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; 2952 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; 2953 hw->rss_key_size = HNS3_RSS_KEY_SIZE; 2954 hw->max_tm_rate = HNS3_ETHER_MAX_RATE; 2955 } 2956 2957 static void 2958 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) 2959 { 2960 struct hns3_dev_specs_0_cmd *req0; 2961 2962 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; 2963 2964 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; 2965 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); 2966 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); 2967 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); 2968 } 2969 2970 static int 2971 hns3_query_dev_specifications(struct hns3_hw *hw) 2972 { 2973 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; 2974 int ret; 2975 int i; 2976 2977 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2978 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, 2979 true); 2980 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 2981 } 2982 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); 2983 2984 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); 2985 if (ret) 2986 return ret; 2987 2988 hns3_parse_dev_specifications(hw, desc); 2989 2990 return 0; 2991 } 2992 2993 static int 2994 hns3_get_capability(struct hns3_hw *hw) 2995 { 2996 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 2997 struct rte_pci_device *pci_dev; 2998 struct hns3_pf *pf = &hns->pf; 2999 struct rte_eth_dev *eth_dev; 3000 uint16_t device_id; 3001 uint8_t revision; 3002 int ret; 3003 3004 eth_dev = &rte_eth_devices[hw->data->port_id]; 3005 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 3006 device_id = pci_dev->id.device_id; 3007 3008 if (device_id == HNS3_DEV_ID_25GE_RDMA || 3009 device_id == HNS3_DEV_ID_50GE_RDMA || 3010 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 3011 device_id == HNS3_DEV_ID_200G_RDMA) 3012 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 3013 3014 ret = hns3_query_dev_fec_info(eth_dev); 3015 if (ret) { 3016 PMD_INIT_LOG(ERR, 3017 "failed to query FEC information, ret = %d", ret); 3018 return ret; 3019 } 3020 3021 /* Get PCI revision id */ 3022 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, 3023 HNS3_PCI_REVISION_ID); 3024 if (ret != HNS3_PCI_REVISION_ID_LEN) { 3025 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", 3026 ret); 3027 return -EIO; 3028 } 3029 hw->revision = revision; 3030 3031 if (revision < PCI_REVISION_ID_HIP09_A) { 3032 hns3_set_default_dev_specifications(hw); 3033 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; 3034 hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL; 3035 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; 3036 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; 3037 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE; 3038 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; 3039 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE; 3040 return 0; 3041 } 3042 3043 ret = hns3_query_dev_specifications(hw); 3044 if (ret) { 3045 PMD_INIT_LOG(ERR, 3046 "failed to query dev specifications, ret = %d", 3047 ret); 3048 return ret; 3049 } 3050 3051 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; 3052 hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL; 3053 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; 3054 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; 3055 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; 3056 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; 3057 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; 3058 3059 return 0; 3060 } 3061 3062 static int 3063 hns3_get_board_configuration(struct hns3_hw *hw) 3064 { 3065 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3066 struct hns3_pf *pf = &hns->pf; 3067 struct hns3_cfg cfg; 3068 int ret; 3069 3070 ret = hns3_get_board_cfg(hw, &cfg); 3071 if (ret) { 3072 PMD_INIT_LOG(ERR, "get board config failed %d", ret); 3073 return ret; 3074 } 3075 3076 if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER && 3077 !hns3_dev_copper_supported(hw)) { 3078 PMD_INIT_LOG(ERR, "media type is copper, not supported."); 3079 return -EOPNOTSUPP; 3080 } 3081 3082 hw->mac.media_type = cfg.media_type; 3083 hw->rss_size_max = cfg.rss_size_max; 3084 hw->rss_dis_flag = false; 3085 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); 3086 hw->mac.phy_addr = cfg.phy_addr; 3087 hw->mac.default_addr_setted = false; 3088 hw->num_tx_desc = cfg.tqp_desc_num; 3089 hw->num_rx_desc = cfg.tqp_desc_num; 3090 hw->dcb_info.num_pg = 1; 3091 hw->dcb_info.hw_pfc_map = 0; 3092 3093 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); 3094 if (ret) { 3095 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d", 3096 cfg.default_speed, ret); 3097 return ret; 3098 } 3099 3100 pf->tc_max = cfg.tc_num; 3101 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { 3102 PMD_INIT_LOG(WARNING, 3103 "Get TC num(%u) from flash, set TC num to 1", 3104 pf->tc_max); 3105 pf->tc_max = 1; 3106 } 3107 3108 /* Dev does not support DCB */ 3109 if (!hns3_dev_dcb_supported(hw)) { 3110 pf->tc_max = 1; 3111 pf->pfc_max = 0; 3112 } else 3113 pf->pfc_max = pf->tc_max; 3114 3115 hw->dcb_info.num_tc = 1; 3116 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, 3117 hw->tqps_num / hw->dcb_info.num_tc); 3118 hns3_set_bit(hw->hw_tc_map, 0, 1); 3119 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; 3120 3121 pf->wanted_umv_size = cfg.umv_space; 3122 3123 return ret; 3124 } 3125 3126 static int 3127 hns3_get_configuration(struct hns3_hw *hw) 3128 { 3129 int ret; 3130 3131 ret = hns3_query_function_status(hw); 3132 if (ret) { 3133 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); 3134 return ret; 3135 } 3136 3137 /* Get device capability */ 3138 ret = hns3_get_capability(hw); 3139 if (ret) { 3140 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret); 3141 return ret; 3142 } 3143 3144 /* Get pf resource */ 3145 ret = hns3_query_pf_resource(hw); 3146 if (ret) { 3147 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); 3148 return ret; 3149 } 3150 3151 ret = hns3_get_board_configuration(hw); 3152 if (ret) 3153 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret); 3154 3155 return ret; 3156 } 3157 3158 static int 3159 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, 3160 uint16_t tqp_vid, bool is_pf) 3161 { 3162 struct hns3_tqp_map_cmd *req; 3163 struct hns3_cmd_desc desc; 3164 int ret; 3165 3166 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); 3167 3168 req = (struct hns3_tqp_map_cmd *)desc.data; 3169 req->tqp_id = rte_cpu_to_le_16(tqp_pid); 3170 req->tqp_vf = func_id; 3171 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; 3172 if (!is_pf) 3173 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); 3174 req->tqp_vid = rte_cpu_to_le_16(tqp_vid); 3175 3176 ret = hns3_cmd_send(hw, &desc, 1); 3177 if (ret) 3178 PMD_INIT_LOG(ERR, "TQP map failed %d", ret); 3179 3180 return ret; 3181 } 3182 3183 static int 3184 hns3_map_tqp(struct hns3_hw *hw) 3185 { 3186 int ret; 3187 int i; 3188 3189 /* 3190 * In current version, VF is not supported when PF is driven by DPDK 3191 * driver, so we assign total tqps_num tqps allocated to this port 3192 * to PF. 3193 */ 3194 for (i = 0; i < hw->total_tqps_num; i++) { 3195 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true); 3196 if (ret) 3197 return ret; 3198 } 3199 3200 return 0; 3201 } 3202 3203 static int 3204 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 3205 { 3206 struct hns3_config_mac_speed_dup_cmd *req; 3207 struct hns3_cmd_desc desc; 3208 int ret; 3209 3210 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; 3211 3212 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); 3213 3214 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); 3215 3216 switch (speed) { 3217 case ETH_SPEED_NUM_10M: 3218 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3219 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); 3220 break; 3221 case ETH_SPEED_NUM_100M: 3222 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3223 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); 3224 break; 3225 case ETH_SPEED_NUM_1G: 3226 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3227 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); 3228 break; 3229 case ETH_SPEED_NUM_10G: 3230 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3231 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); 3232 break; 3233 case ETH_SPEED_NUM_25G: 3234 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3235 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); 3236 break; 3237 case ETH_SPEED_NUM_40G: 3238 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3239 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); 3240 break; 3241 case ETH_SPEED_NUM_50G: 3242 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3243 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); 3244 break; 3245 case ETH_SPEED_NUM_100G: 3246 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3247 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); 3248 break; 3249 case ETH_SPEED_NUM_200G: 3250 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, 3251 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G); 3252 break; 3253 default: 3254 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); 3255 return -EINVAL; 3256 } 3257 3258 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); 3259 3260 ret = hns3_cmd_send(hw, &desc, 1); 3261 if (ret) 3262 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); 3263 3264 return ret; 3265 } 3266 3267 static int 3268 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3269 { 3270 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3271 struct hns3_pf *pf = &hns->pf; 3272 struct hns3_priv_buf *priv; 3273 uint32_t i, total_size; 3274 3275 total_size = pf->pkt_buf_size; 3276 3277 /* alloc tx buffer for all enabled tc */ 3278 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3279 priv = &buf_alloc->priv_buf[i]; 3280 3281 if (hw->hw_tc_map & BIT(i)) { 3282 if (total_size < pf->tx_buf_size) 3283 return -ENOMEM; 3284 3285 priv->tx_buf_size = pf->tx_buf_size; 3286 } else 3287 priv->tx_buf_size = 0; 3288 3289 total_size -= priv->tx_buf_size; 3290 } 3291 3292 return 0; 3293 } 3294 3295 static int 3296 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3297 { 3298 /* TX buffer size is unit by 128 byte */ 3299 #define HNS3_BUF_SIZE_UNIT_SHIFT 7 3300 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) 3301 struct hns3_tx_buff_alloc_cmd *req; 3302 struct hns3_cmd_desc desc; 3303 uint32_t buf_size; 3304 uint32_t i; 3305 int ret; 3306 3307 req = (struct hns3_tx_buff_alloc_cmd *)desc.data; 3308 3309 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); 3310 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3311 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 3312 3313 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; 3314 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | 3315 HNS3_BUF_SIZE_UPDATE_EN_MSK); 3316 } 3317 3318 ret = hns3_cmd_send(hw, &desc, 1); 3319 if (ret) 3320 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); 3321 3322 return ret; 3323 } 3324 3325 static int 3326 hns3_get_tc_num(struct hns3_hw *hw) 3327 { 3328 int cnt = 0; 3329 uint8_t i; 3330 3331 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3332 if (hw->hw_tc_map & BIT(i)) 3333 cnt++; 3334 return cnt; 3335 } 3336 3337 static uint32_t 3338 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3339 { 3340 struct hns3_priv_buf *priv; 3341 uint32_t rx_priv = 0; 3342 int i; 3343 3344 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3345 priv = &buf_alloc->priv_buf[i]; 3346 if (priv->enable) 3347 rx_priv += priv->buf_size; 3348 } 3349 return rx_priv; 3350 } 3351 3352 static uint32_t 3353 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) 3354 { 3355 uint32_t total_tx_size = 0; 3356 uint32_t i; 3357 3358 for (i = 0; i < HNS3_MAX_TC_NUM; i++) 3359 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 3360 3361 return total_tx_size; 3362 } 3363 3364 /* Get the number of pfc enabled TCs, which have private buffer */ 3365 static int 3366 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3367 { 3368 struct hns3_priv_buf *priv; 3369 int cnt = 0; 3370 uint8_t i; 3371 3372 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3373 priv = &buf_alloc->priv_buf[i]; 3374 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3375 cnt++; 3376 } 3377 3378 return cnt; 3379 } 3380 3381 /* Get the number of pfc disabled TCs, which have private buffer */ 3382 static int 3383 hns3_get_no_pfc_priv_num(struct hns3_hw *hw, 3384 struct hns3_pkt_buf_alloc *buf_alloc) 3385 { 3386 struct hns3_priv_buf *priv; 3387 int cnt = 0; 3388 uint8_t i; 3389 3390 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3391 priv = &buf_alloc->priv_buf[i]; 3392 if (hw->hw_tc_map & BIT(i) && 3393 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) 3394 cnt++; 3395 } 3396 3397 return cnt; 3398 } 3399 3400 static bool 3401 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, 3402 uint32_t rx_all) 3403 { 3404 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 3405 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3406 struct hns3_pf *pf = &hns->pf; 3407 uint32_t shared_buf, aligned_mps; 3408 uint32_t rx_priv; 3409 uint8_t tc_num; 3410 uint8_t i; 3411 3412 tc_num = hns3_get_tc_num(hw); 3413 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3414 3415 if (hns3_dev_dcb_supported(hw)) 3416 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + 3417 pf->dv_buf_size; 3418 else 3419 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF 3420 + pf->dv_buf_size; 3421 3422 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 3423 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc), 3424 HNS3_BUF_SIZE_UNIT); 3425 3426 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); 3427 if (rx_all < rx_priv + shared_std) 3428 return false; 3429 3430 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); 3431 buf_alloc->s_buf.buf_size = shared_buf; 3432 if (hns3_dev_dcb_supported(hw)) { 3433 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; 3434 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 3435 - roundup(aligned_mps / HNS3_BUF_DIV_BY, 3436 HNS3_BUF_SIZE_UNIT); 3437 } else { 3438 buf_alloc->s_buf.self.high = 3439 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3440 buf_alloc->s_buf.self.low = aligned_mps; 3441 } 3442 3443 if (hns3_dev_dcb_supported(hw)) { 3444 hi_thrd = shared_buf - pf->dv_buf_size; 3445 3446 if (tc_num <= NEED_RESERVE_TC_NUM) 3447 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 3448 / BUF_MAX_PERCENT; 3449 3450 if (tc_num) 3451 hi_thrd = hi_thrd / tc_num; 3452 3453 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps); 3454 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); 3455 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; 3456 } else { 3457 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; 3458 lo_thrd = aligned_mps; 3459 } 3460 3461 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3462 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 3463 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 3464 } 3465 3466 return true; 3467 } 3468 3469 static bool 3470 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, 3471 struct hns3_pkt_buf_alloc *buf_alloc) 3472 { 3473 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3474 struct hns3_pf *pf = &hns->pf; 3475 struct hns3_priv_buf *priv; 3476 uint32_t aligned_mps; 3477 uint32_t rx_all; 3478 uint8_t i; 3479 3480 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3481 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); 3482 3483 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3484 priv = &buf_alloc->priv_buf[i]; 3485 3486 priv->enable = 0; 3487 priv->wl.low = 0; 3488 priv->wl.high = 0; 3489 priv->buf_size = 0; 3490 3491 if (!(hw->hw_tc_map & BIT(i))) 3492 continue; 3493 3494 priv->enable = 1; 3495 if (hw->dcb_info.hw_pfc_map & BIT(i)) { 3496 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; 3497 priv->wl.high = roundup(priv->wl.low + aligned_mps, 3498 HNS3_BUF_SIZE_UNIT); 3499 } else { 3500 priv->wl.low = 0; 3501 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : 3502 aligned_mps; 3503 } 3504 3505 priv->buf_size = priv->wl.high + pf->dv_buf_size; 3506 } 3507 3508 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3509 } 3510 3511 static bool 3512 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, 3513 struct hns3_pkt_buf_alloc *buf_alloc) 3514 { 3515 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3516 struct hns3_pf *pf = &hns->pf; 3517 struct hns3_priv_buf *priv; 3518 int no_pfc_priv_num; 3519 uint32_t rx_all; 3520 uint8_t mask; 3521 int i; 3522 3523 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3524 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); 3525 3526 /* let the last to be cleared first */ 3527 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3528 priv = &buf_alloc->priv_buf[i]; 3529 mask = BIT((uint8_t)i); 3530 3531 if (hw->hw_tc_map & mask && 3532 !(hw->dcb_info.hw_pfc_map & mask)) { 3533 /* Clear the no pfc TC private buffer */ 3534 priv->wl.low = 0; 3535 priv->wl.high = 0; 3536 priv->buf_size = 0; 3537 priv->enable = 0; 3538 no_pfc_priv_num--; 3539 } 3540 3541 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3542 no_pfc_priv_num == 0) 3543 break; 3544 } 3545 3546 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3547 } 3548 3549 static bool 3550 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, 3551 struct hns3_pkt_buf_alloc *buf_alloc) 3552 { 3553 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3554 struct hns3_pf *pf = &hns->pf; 3555 struct hns3_priv_buf *priv; 3556 uint32_t rx_all; 3557 int pfc_priv_num; 3558 uint8_t mask; 3559 int i; 3560 3561 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3562 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); 3563 3564 /* let the last to be cleared first */ 3565 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { 3566 priv = &buf_alloc->priv_buf[i]; 3567 mask = BIT((uint8_t)i); 3568 3569 if (hw->hw_tc_map & mask && 3570 hw->dcb_info.hw_pfc_map & mask) { 3571 /* Reduce the number of pfc TC with private buffer */ 3572 priv->wl.low = 0; 3573 priv->enable = 0; 3574 priv->wl.high = 0; 3575 priv->buf_size = 0; 3576 pfc_priv_num--; 3577 } 3578 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || 3579 pfc_priv_num == 0) 3580 break; 3581 } 3582 3583 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); 3584 } 3585 3586 static bool 3587 hns3_only_alloc_priv_buff(struct hns3_hw *hw, 3588 struct hns3_pkt_buf_alloc *buf_alloc) 3589 { 3590 #define COMPENSATE_BUFFER 0x3C00 3591 #define COMPENSATE_HALF_MPS_NUM 5 3592 #define PRIV_WL_GAP 0x1800 3593 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3594 struct hns3_pf *pf = &hns->pf; 3595 uint32_t tc_num = hns3_get_tc_num(hw); 3596 uint32_t half_mps = pf->mps >> 1; 3597 struct hns3_priv_buf *priv; 3598 uint32_t min_rx_priv; 3599 uint32_t rx_priv; 3600 uint8_t i; 3601 3602 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); 3603 if (tc_num) 3604 rx_priv = rx_priv / tc_num; 3605 3606 if (tc_num <= NEED_RESERVE_TC_NUM) 3607 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 3608 3609 /* 3610 * Minimum value of private buffer in rx direction (min_rx_priv) is 3611 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private 3612 * buffer if rx_priv is greater than min_rx_priv. 3613 */ 3614 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + 3615 COMPENSATE_HALF_MPS_NUM * half_mps; 3616 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); 3617 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); 3618 3619 if (rx_priv < min_rx_priv) 3620 return false; 3621 3622 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3623 priv = &buf_alloc->priv_buf[i]; 3624 3625 priv->enable = 0; 3626 priv->wl.low = 0; 3627 priv->wl.high = 0; 3628 priv->buf_size = 0; 3629 3630 if (!(hw->hw_tc_map & BIT(i))) 3631 continue; 3632 3633 priv->enable = 1; 3634 priv->buf_size = rx_priv; 3635 priv->wl.high = rx_priv - pf->dv_buf_size; 3636 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 3637 } 3638 3639 buf_alloc->s_buf.buf_size = 0; 3640 3641 return true; 3642 } 3643 3644 /* 3645 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs 3646 * @hw: pointer to struct hns3_hw 3647 * @buf_alloc: pointer to buffer calculation data 3648 * @return: 0: calculate sucessful, negative: fail 3649 */ 3650 static int 3651 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3652 { 3653 /* When DCB is not supported, rx private buffer is not allocated. */ 3654 if (!hns3_dev_dcb_supported(hw)) { 3655 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3656 struct hns3_pf *pf = &hns->pf; 3657 uint32_t rx_all = pf->pkt_buf_size; 3658 3659 rx_all -= hns3_get_tx_buff_alloced(buf_alloc); 3660 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) 3661 return -ENOMEM; 3662 3663 return 0; 3664 } 3665 3666 /* 3667 * Try to allocate privated packet buffer for all TCs without share 3668 * buffer. 3669 */ 3670 if (hns3_only_alloc_priv_buff(hw, buf_alloc)) 3671 return 0; 3672 3673 /* 3674 * Try to allocate privated packet buffer for all TCs with share 3675 * buffer. 3676 */ 3677 if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) 3678 return 0; 3679 3680 /* 3681 * For different application scenes, the enabled port number, TC number 3682 * and no_drop TC number are different. In order to obtain the better 3683 * performance, software could allocate the buffer size and configure 3684 * the waterline by tring to decrease the private buffer size according 3685 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc 3686 * enabled tc. 3687 */ 3688 if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) 3689 return 0; 3690 3691 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) 3692 return 0; 3693 3694 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) 3695 return 0; 3696 3697 return -ENOMEM; 3698 } 3699 3700 static int 3701 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3702 { 3703 struct hns3_rx_priv_buff_cmd *req; 3704 struct hns3_cmd_desc desc; 3705 uint32_t buf_size; 3706 int ret; 3707 int i; 3708 3709 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); 3710 req = (struct hns3_rx_priv_buff_cmd *)desc.data; 3711 3712 /* Alloc private buffer TCs */ 3713 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 3714 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; 3715 3716 req->buf_num[i] = 3717 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); 3718 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); 3719 } 3720 3721 buf_size = buf_alloc->s_buf.buf_size; 3722 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | 3723 (1 << HNS3_TC0_PRI_BUF_EN_B)); 3724 3725 ret = hns3_cmd_send(hw, &desc, 1); 3726 if (ret) 3727 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); 3728 3729 return ret; 3730 } 3731 3732 static int 3733 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3734 { 3735 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 3736 struct hns3_rx_priv_wl_buf *req; 3737 struct hns3_priv_buf *priv; 3738 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; 3739 int i, j; 3740 int ret; 3741 3742 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { 3743 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, 3744 false); 3745 req = (struct hns3_rx_priv_wl_buf *)desc[i].data; 3746 3747 /* The first descriptor set the NEXT bit to 1 */ 3748 if (i == 0) 3749 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3750 else 3751 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3752 3753 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3754 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; 3755 3756 priv = &buf_alloc->priv_buf[idx]; 3757 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> 3758 HNS3_BUF_UNIT_S); 3759 req->tc_wl[j].high |= 3760 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3761 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> 3762 HNS3_BUF_UNIT_S); 3763 req->tc_wl[j].low |= 3764 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3765 } 3766 } 3767 3768 /* Send 2 descriptor at one time */ 3769 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); 3770 if (ret) 3771 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", 3772 ret); 3773 return ret; 3774 } 3775 3776 static int 3777 hns3_common_thrd_config(struct hns3_hw *hw, 3778 struct hns3_pkt_buf_alloc *buf_alloc) 3779 { 3780 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 3781 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; 3782 struct hns3_rx_com_thrd *req; 3783 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; 3784 struct hns3_tc_thrd *tc; 3785 int tc_idx; 3786 int i, j; 3787 int ret; 3788 3789 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { 3790 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, 3791 false); 3792 req = (struct hns3_rx_com_thrd *)&desc[i].data; 3793 3794 /* The first descriptor set the NEXT bit to 1 */ 3795 if (i == 0) 3796 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3797 else 3798 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 3799 3800 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { 3801 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; 3802 tc = &s_buf->tc_thrd[tc_idx]; 3803 3804 req->com_thrd[j].high = 3805 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); 3806 req->com_thrd[j].high |= 3807 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3808 req->com_thrd[j].low = 3809 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); 3810 req->com_thrd[j].low |= 3811 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3812 } 3813 } 3814 3815 /* Send 2 descriptors at one time */ 3816 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); 3817 if (ret) 3818 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); 3819 3820 return ret; 3821 } 3822 3823 static int 3824 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) 3825 { 3826 struct hns3_shared_buf *buf = &buf_alloc->s_buf; 3827 struct hns3_rx_com_wl *req; 3828 struct hns3_cmd_desc desc; 3829 int ret; 3830 3831 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); 3832 3833 req = (struct hns3_rx_com_wl *)desc.data; 3834 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); 3835 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3836 3837 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); 3838 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); 3839 3840 ret = hns3_cmd_send(hw, &desc, 1); 3841 if (ret) 3842 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); 3843 3844 return ret; 3845 } 3846 3847 int 3848 hns3_buffer_alloc(struct hns3_hw *hw) 3849 { 3850 struct hns3_pkt_buf_alloc pkt_buf; 3851 int ret; 3852 3853 memset(&pkt_buf, 0, sizeof(pkt_buf)); 3854 ret = hns3_tx_buffer_calc(hw, &pkt_buf); 3855 if (ret) { 3856 PMD_INIT_LOG(ERR, 3857 "could not calc tx buffer size for all TCs %d", 3858 ret); 3859 return ret; 3860 } 3861 3862 ret = hns3_tx_buffer_alloc(hw, &pkt_buf); 3863 if (ret) { 3864 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); 3865 return ret; 3866 } 3867 3868 ret = hns3_rx_buffer_calc(hw, &pkt_buf); 3869 if (ret) { 3870 PMD_INIT_LOG(ERR, 3871 "could not calc rx priv buffer size for all TCs %d", 3872 ret); 3873 return ret; 3874 } 3875 3876 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); 3877 if (ret) { 3878 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); 3879 return ret; 3880 } 3881 3882 if (hns3_dev_dcb_supported(hw)) { 3883 ret = hns3_rx_priv_wl_config(hw, &pkt_buf); 3884 if (ret) { 3885 PMD_INIT_LOG(ERR, 3886 "could not configure rx private waterline %d", 3887 ret); 3888 return ret; 3889 } 3890 3891 ret = hns3_common_thrd_config(hw, &pkt_buf); 3892 if (ret) { 3893 PMD_INIT_LOG(ERR, 3894 "could not configure common threshold %d", 3895 ret); 3896 return ret; 3897 } 3898 } 3899 3900 ret = hns3_common_wl_config(hw, &pkt_buf); 3901 if (ret) 3902 PMD_INIT_LOG(ERR, "could not configure common waterline %d", 3903 ret); 3904 3905 return ret; 3906 } 3907 3908 static int 3909 hns3_mac_init(struct hns3_hw *hw) 3910 { 3911 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 3912 struct hns3_mac *mac = &hw->mac; 3913 struct hns3_pf *pf = &hns->pf; 3914 int ret; 3915 3916 pf->support_sfp_query = true; 3917 mac->link_duplex = ETH_LINK_FULL_DUPLEX; 3918 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); 3919 if (ret) { 3920 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); 3921 return ret; 3922 } 3923 3924 mac->link_status = ETH_LINK_DOWN; 3925 3926 return hns3_config_mtu(hw, pf->mps); 3927 } 3928 3929 static int 3930 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) 3931 { 3932 #define HNS3_ETHERTYPE_SUCCESS_ADD 0 3933 #define HNS3_ETHERTYPE_ALREADY_ADD 1 3934 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 3935 #define HNS3_ETHERTYPE_KEY_CONFLICT 3 3936 int return_status; 3937 3938 if (cmdq_resp) { 3939 PMD_INIT_LOG(ERR, 3940 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 3941 cmdq_resp); 3942 return -EIO; 3943 } 3944 3945 switch (resp_code) { 3946 case HNS3_ETHERTYPE_SUCCESS_ADD: 3947 case HNS3_ETHERTYPE_ALREADY_ADD: 3948 return_status = 0; 3949 break; 3950 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: 3951 PMD_INIT_LOG(ERR, 3952 "add mac ethertype failed for manager table overflow."); 3953 return_status = -EIO; 3954 break; 3955 case HNS3_ETHERTYPE_KEY_CONFLICT: 3956 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); 3957 return_status = -EIO; 3958 break; 3959 default: 3960 PMD_INIT_LOG(ERR, 3961 "add mac ethertype failed for undefined, code=%d.", 3962 resp_code); 3963 return_status = -EIO; 3964 break; 3965 } 3966 3967 return return_status; 3968 } 3969 3970 static int 3971 hns3_add_mgr_tbl(struct hns3_hw *hw, 3972 const struct hns3_mac_mgr_tbl_entry_cmd *req) 3973 { 3974 struct hns3_cmd_desc desc; 3975 uint8_t resp_code; 3976 uint16_t retval; 3977 int ret; 3978 3979 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); 3980 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); 3981 3982 ret = hns3_cmd_send(hw, &desc, 1); 3983 if (ret) { 3984 PMD_INIT_LOG(ERR, 3985 "add mac ethertype failed for cmd_send, ret =%d.", 3986 ret); 3987 return ret; 3988 } 3989 3990 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; 3991 retval = rte_le_to_cpu_16(desc.retval); 3992 3993 return hns3_get_mac_ethertype_cmd_status(retval, resp_code); 3994 } 3995 3996 static void 3997 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, 3998 int *table_item_num) 3999 { 4000 struct hns3_mac_mgr_tbl_entry_cmd *tbl; 4001 4002 /* 4003 * In current version, we add one item in management table as below: 4004 * 0x0180C200000E -- LLDP MC address 4005 */ 4006 tbl = mgr_table; 4007 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; 4008 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); 4009 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); 4010 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); 4011 tbl->i_port_bitmap = 0x1; 4012 *table_item_num = 1; 4013 } 4014 4015 static int 4016 hns3_init_mgr_tbl(struct hns3_hw *hw) 4017 { 4018 #define HNS_MAC_MGR_TBL_MAX_SIZE 16 4019 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; 4020 int table_item_num; 4021 int ret; 4022 int i; 4023 4024 memset(mgr_table, 0, sizeof(mgr_table)); 4025 hns3_prepare_mgr_tbl(mgr_table, &table_item_num); 4026 for (i = 0; i < table_item_num; i++) { 4027 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); 4028 if (ret) { 4029 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", 4030 ret); 4031 return ret; 4032 } 4033 } 4034 4035 return 0; 4036 } 4037 4038 static void 4039 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, 4040 bool en_mc, bool en_bc, int vport_id) 4041 { 4042 if (!param) 4043 return; 4044 4045 memset(param, 0, sizeof(struct hns3_promisc_param)); 4046 if (en_uc) 4047 param->enable = HNS3_PROMISC_EN_UC; 4048 if (en_mc) 4049 param->enable |= HNS3_PROMISC_EN_MC; 4050 if (en_bc) 4051 param->enable |= HNS3_PROMISC_EN_BC; 4052 param->vf_id = vport_id; 4053 } 4054 4055 static int 4056 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) 4057 { 4058 struct hns3_promisc_cfg_cmd *req; 4059 struct hns3_cmd_desc desc; 4060 int ret; 4061 4062 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); 4063 4064 req = (struct hns3_promisc_cfg_cmd *)desc.data; 4065 req->vf_id = param->vf_id; 4066 req->flag = (param->enable << HNS3_PROMISC_EN_B) | 4067 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; 4068 4069 ret = hns3_cmd_send(hw, &desc, 1); 4070 if (ret) 4071 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); 4072 4073 return ret; 4074 } 4075 4076 static int 4077 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) 4078 { 4079 struct hns3_promisc_param param; 4080 bool en_bc_pmc = true; 4081 uint8_t vf_id; 4082 4083 /* 4084 * In current version VF is not supported when PF is driven by DPDK 4085 * driver, just need to configure parameters for PF vport. 4086 */ 4087 vf_id = HNS3_PF_FUNC_ID; 4088 4089 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); 4090 return hns3_cmd_set_promisc_mode(hw, ¶m); 4091 } 4092 4093 static int 4094 hns3_promisc_init(struct hns3_hw *hw) 4095 { 4096 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4097 struct hns3_pf *pf = &hns->pf; 4098 struct hns3_promisc_param param; 4099 uint16_t func_id; 4100 int ret; 4101 4102 ret = hns3_set_promisc_mode(hw, false, false); 4103 if (ret) { 4104 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret); 4105 return ret; 4106 } 4107 4108 /* 4109 * In current version VFs are not supported when PF is driven by DPDK 4110 * driver. After PF has been taken over by DPDK, the original VF will 4111 * be invalid. So, there is a possibility of entry residues. It should 4112 * clear VFs's promisc mode to avoid unnecessary bandwidth usage 4113 * during init. 4114 */ 4115 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) { 4116 hns3_promisc_param_init(¶m, false, false, false, func_id); 4117 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4118 if (ret) { 4119 PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode," 4120 " ret = %d", func_id, ret); 4121 return ret; 4122 } 4123 } 4124 4125 return 0; 4126 } 4127 4128 static void 4129 hns3_promisc_uninit(struct hns3_hw *hw) 4130 { 4131 struct hns3_promisc_param param; 4132 uint16_t func_id; 4133 int ret; 4134 4135 func_id = HNS3_PF_FUNC_ID; 4136 4137 /* 4138 * In current version VFs are not supported when PF is driven by 4139 * DPDK driver, and VFs' promisc mode status has been cleared during 4140 * init and their status will not change. So just clear PF's promisc 4141 * mode status during uninit. 4142 */ 4143 hns3_promisc_param_init(¶m, false, false, false, func_id); 4144 ret = hns3_cmd_set_promisc_mode(hw, ¶m); 4145 if (ret) 4146 PMD_INIT_LOG(ERR, "failed to clear promisc status during" 4147 " uninit, ret = %d", ret); 4148 } 4149 4150 static int 4151 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) 4152 { 4153 bool allmulti = dev->data->all_multicast ? true : false; 4154 struct hns3_adapter *hns = dev->data->dev_private; 4155 struct hns3_hw *hw = &hns->hw; 4156 uint64_t offloads; 4157 int err; 4158 int ret; 4159 4160 rte_spinlock_lock(&hw->lock); 4161 ret = hns3_set_promisc_mode(hw, true, true); 4162 if (ret) { 4163 rte_spinlock_unlock(&hw->lock); 4164 hns3_err(hw, "failed to enable promiscuous mode, ret = %d", 4165 ret); 4166 return ret; 4167 } 4168 4169 /* 4170 * When promiscuous mode was enabled, disable the vlan filter to let 4171 * all packets coming in in the receiving direction. 4172 */ 4173 offloads = dev->data->dev_conf.rxmode.offloads; 4174 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4175 ret = hns3_enable_vlan_filter(hns, false); 4176 if (ret) { 4177 hns3_err(hw, "failed to enable promiscuous mode due to " 4178 "failure to disable vlan filter, ret = %d", 4179 ret); 4180 err = hns3_set_promisc_mode(hw, false, allmulti); 4181 if (err) 4182 hns3_err(hw, "failed to restore promiscuous " 4183 "status after disable vlan filter " 4184 "failed during enabling promiscuous " 4185 "mode, ret = %d", ret); 4186 } 4187 } 4188 4189 rte_spinlock_unlock(&hw->lock); 4190 4191 return ret; 4192 } 4193 4194 static int 4195 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) 4196 { 4197 bool allmulti = dev->data->all_multicast ? true : false; 4198 struct hns3_adapter *hns = dev->data->dev_private; 4199 struct hns3_hw *hw = &hns->hw; 4200 uint64_t offloads; 4201 int err; 4202 int ret; 4203 4204 /* If now in all_multicast mode, must remain in all_multicast mode. */ 4205 rte_spinlock_lock(&hw->lock); 4206 ret = hns3_set_promisc_mode(hw, false, allmulti); 4207 if (ret) { 4208 rte_spinlock_unlock(&hw->lock); 4209 hns3_err(hw, "failed to disable promiscuous mode, ret = %d", 4210 ret); 4211 return ret; 4212 } 4213 /* when promiscuous mode was disabled, restore the vlan filter status */ 4214 offloads = dev->data->dev_conf.rxmode.offloads; 4215 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 4216 ret = hns3_enable_vlan_filter(hns, true); 4217 if (ret) { 4218 hns3_err(hw, "failed to disable promiscuous mode due to" 4219 " failure to restore vlan filter, ret = %d", 4220 ret); 4221 err = hns3_set_promisc_mode(hw, true, true); 4222 if (err) 4223 hns3_err(hw, "failed to restore promiscuous " 4224 "status after enabling vlan filter " 4225 "failed during disabling promiscuous " 4226 "mode, ret = %d", ret); 4227 } 4228 } 4229 rte_spinlock_unlock(&hw->lock); 4230 4231 return ret; 4232 } 4233 4234 static int 4235 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) 4236 { 4237 struct hns3_adapter *hns = dev->data->dev_private; 4238 struct hns3_hw *hw = &hns->hw; 4239 int ret; 4240 4241 if (dev->data->promiscuous) 4242 return 0; 4243 4244 rte_spinlock_lock(&hw->lock); 4245 ret = hns3_set_promisc_mode(hw, false, true); 4246 rte_spinlock_unlock(&hw->lock); 4247 if (ret) 4248 hns3_err(hw, "failed to enable allmulticast mode, ret = %d", 4249 ret); 4250 4251 return ret; 4252 } 4253 4254 static int 4255 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) 4256 { 4257 struct hns3_adapter *hns = dev->data->dev_private; 4258 struct hns3_hw *hw = &hns->hw; 4259 int ret; 4260 4261 /* If now in promiscuous mode, must remain in all_multicast mode. */ 4262 if (dev->data->promiscuous) 4263 return 0; 4264 4265 rte_spinlock_lock(&hw->lock); 4266 ret = hns3_set_promisc_mode(hw, false, false); 4267 rte_spinlock_unlock(&hw->lock); 4268 if (ret) 4269 hns3_err(hw, "failed to disable allmulticast mode, ret = %d", 4270 ret); 4271 4272 return ret; 4273 } 4274 4275 static int 4276 hns3_dev_promisc_restore(struct hns3_adapter *hns) 4277 { 4278 struct hns3_hw *hw = &hns->hw; 4279 bool allmulti = hw->data->all_multicast ? true : false; 4280 int ret; 4281 4282 if (hw->data->promiscuous) { 4283 ret = hns3_set_promisc_mode(hw, true, true); 4284 if (ret) 4285 hns3_err(hw, "failed to restore promiscuous mode, " 4286 "ret = %d", ret); 4287 return ret; 4288 } 4289 4290 ret = hns3_set_promisc_mode(hw, false, allmulti); 4291 if (ret) 4292 hns3_err(hw, "failed to restore allmulticast mode, ret = %d", 4293 ret); 4294 return ret; 4295 } 4296 4297 static int 4298 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed) 4299 { 4300 struct hns3_sfp_speed_cmd *resp; 4301 struct hns3_cmd_desc desc; 4302 int ret; 4303 4304 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); 4305 resp = (struct hns3_sfp_speed_cmd *)desc.data; 4306 ret = hns3_cmd_send(hw, &desc, 1); 4307 if (ret == -EOPNOTSUPP) { 4308 hns3_err(hw, "IMP do not support get SFP speed %d", ret); 4309 return ret; 4310 } else if (ret) { 4311 hns3_err(hw, "get sfp speed failed %d", ret); 4312 return ret; 4313 } 4314 4315 *speed = resp->sfp_speed; 4316 4317 return 0; 4318 } 4319 4320 static uint8_t 4321 hns3_check_speed_dup(uint8_t duplex, uint32_t speed) 4322 { 4323 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) 4324 duplex = ETH_LINK_FULL_DUPLEX; 4325 4326 return duplex; 4327 } 4328 4329 static int 4330 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) 4331 { 4332 struct hns3_mac *mac = &hw->mac; 4333 int ret; 4334 4335 duplex = hns3_check_speed_dup(duplex, speed); 4336 if (mac->link_speed == speed && mac->link_duplex == duplex) 4337 return 0; 4338 4339 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); 4340 if (ret) 4341 return ret; 4342 4343 mac->link_speed = speed; 4344 mac->link_duplex = duplex; 4345 4346 return 0; 4347 } 4348 4349 static int 4350 hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) 4351 { 4352 struct hns3_adapter *hns = eth_dev->data->dev_private; 4353 struct hns3_hw *hw = &hns->hw; 4354 struct hns3_pf *pf = &hns->pf; 4355 uint32_t speed; 4356 int ret; 4357 4358 /* If IMP do not support get SFP/qSFP speed, return directly */ 4359 if (!pf->support_sfp_query) 4360 return 0; 4361 4362 ret = hns3_get_sfp_speed(hw, &speed); 4363 if (ret == -EOPNOTSUPP) { 4364 pf->support_sfp_query = false; 4365 return ret; 4366 } else if (ret) 4367 return ret; 4368 4369 if (speed == ETH_SPEED_NUM_NONE) 4370 return 0; /* do nothing if no SFP */ 4371 4372 /* Config full duplex for SFP */ 4373 return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); 4374 } 4375 4376 static int 4377 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) 4378 { 4379 struct hns3_config_mac_mode_cmd *req; 4380 struct hns3_cmd_desc desc; 4381 uint32_t loop_en = 0; 4382 uint8_t val = 0; 4383 int ret; 4384 4385 req = (struct hns3_config_mac_mode_cmd *)desc.data; 4386 4387 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); 4388 if (enable) 4389 val = 1; 4390 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); 4391 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); 4392 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); 4393 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); 4394 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); 4395 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); 4396 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); 4397 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); 4398 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); 4399 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); 4400 4401 /* 4402 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC 4403 * when receiving frames. Otherwise, CRC will be stripped. 4404 */ 4405 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) 4406 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0); 4407 else 4408 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); 4409 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); 4410 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); 4411 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); 4412 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); 4413 4414 ret = hns3_cmd_send(hw, &desc, 1); 4415 if (ret) 4416 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); 4417 4418 return ret; 4419 } 4420 4421 static int 4422 hns3_get_mac_link_status(struct hns3_hw *hw) 4423 { 4424 struct hns3_link_status_cmd *req; 4425 struct hns3_cmd_desc desc; 4426 int link_status; 4427 int ret; 4428 4429 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); 4430 ret = hns3_cmd_send(hw, &desc, 1); 4431 if (ret) { 4432 hns3_err(hw, "get link status cmd failed %d", ret); 4433 return ETH_LINK_DOWN; 4434 } 4435 4436 req = (struct hns3_link_status_cmd *)desc.data; 4437 link_status = req->status & HNS3_LINK_STATUS_UP_M; 4438 4439 return !!link_status; 4440 } 4441 4442 void 4443 hns3_update_link_status(struct hns3_hw *hw) 4444 { 4445 int state; 4446 4447 state = hns3_get_mac_link_status(hw); 4448 if (state != hw->mac.link_status) { 4449 hw->mac.link_status = state; 4450 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); 4451 } 4452 } 4453 4454 static void 4455 hns3_service_handler(void *param) 4456 { 4457 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 4458 struct hns3_adapter *hns = eth_dev->data->dev_private; 4459 struct hns3_hw *hw = &hns->hw; 4460 4461 if (!hns3_is_reset_pending(hns)) { 4462 hns3_update_speed_duplex(eth_dev); 4463 hns3_update_link_status(hw); 4464 } else 4465 hns3_warn(hw, "Cancel the query when reset is pending"); 4466 4467 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); 4468 } 4469 4470 static int 4471 hns3_init_hardware(struct hns3_adapter *hns) 4472 { 4473 struct hns3_hw *hw = &hns->hw; 4474 int ret; 4475 4476 ret = hns3_map_tqp(hw); 4477 if (ret) { 4478 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); 4479 return ret; 4480 } 4481 4482 ret = hns3_init_umv_space(hw); 4483 if (ret) { 4484 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); 4485 return ret; 4486 } 4487 4488 ret = hns3_mac_init(hw); 4489 if (ret) { 4490 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); 4491 goto err_mac_init; 4492 } 4493 4494 ret = hns3_init_mgr_tbl(hw); 4495 if (ret) { 4496 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); 4497 goto err_mac_init; 4498 } 4499 4500 ret = hns3_promisc_init(hw); 4501 if (ret) { 4502 PMD_INIT_LOG(ERR, "Failed to init promisc: %d", 4503 ret); 4504 goto err_mac_init; 4505 } 4506 4507 ret = hns3_init_vlan_config(hns); 4508 if (ret) { 4509 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); 4510 goto err_mac_init; 4511 } 4512 4513 ret = hns3_dcb_init(hw); 4514 if (ret) { 4515 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); 4516 goto err_mac_init; 4517 } 4518 4519 ret = hns3_init_fd_config(hns); 4520 if (ret) { 4521 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); 4522 goto err_mac_init; 4523 } 4524 4525 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); 4526 if (ret) { 4527 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); 4528 goto err_mac_init; 4529 } 4530 4531 ret = hns3_config_gro(hw, false); 4532 if (ret) { 4533 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); 4534 goto err_mac_init; 4535 } 4536 4537 /* 4538 * In the initialization clearing the all hardware mapping relationship 4539 * configurations between queues and interrupt vectors is needed, so 4540 * some error caused by the residual configurations, such as the 4541 * unexpected interrupt, can be avoid. 4542 */ 4543 ret = hns3_init_ring_with_vector(hw); 4544 if (ret) { 4545 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); 4546 goto err_mac_init; 4547 } 4548 4549 return 0; 4550 4551 err_mac_init: 4552 hns3_uninit_umv_space(hw); 4553 return ret; 4554 } 4555 4556 static int 4557 hns3_clear_hw(struct hns3_hw *hw) 4558 { 4559 struct hns3_cmd_desc desc; 4560 int ret; 4561 4562 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false); 4563 4564 ret = hns3_cmd_send(hw, &desc, 1); 4565 if (ret && ret != -EOPNOTSUPP) 4566 return ret; 4567 4568 return 0; 4569 } 4570 4571 static void 4572 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable) 4573 { 4574 uint32_t val; 4575 4576 /* 4577 * The new firmware support report more hardware error types by 4578 * msix mode. These errors are defined as RAS errors in hardware 4579 * and belong to a different type from the MSI-x errors processed 4580 * by the network driver. 4581 * 4582 * Network driver should open the new error report on initialition 4583 */ 4584 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 4585 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0); 4586 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); 4587 } 4588 4589 static int 4590 hns3_init_pf(struct rte_eth_dev *eth_dev) 4591 { 4592 struct rte_device *dev = eth_dev->device; 4593 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4594 struct hns3_adapter *hns = eth_dev->data->dev_private; 4595 struct hns3_hw *hw = &hns->hw; 4596 int ret; 4597 4598 PMD_INIT_FUNC_TRACE(); 4599 4600 /* Get hardware io base address from pcie BAR2 IO space */ 4601 hw->io_base = pci_dev->mem_resource[2].addr; 4602 4603 /* Firmware command queue initialize */ 4604 ret = hns3_cmd_init_queue(hw); 4605 if (ret) { 4606 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); 4607 goto err_cmd_init_queue; 4608 } 4609 4610 hns3_clear_all_event_cause(hw); 4611 4612 /* Firmware command initialize */ 4613 ret = hns3_cmd_init(hw); 4614 if (ret) { 4615 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); 4616 goto err_cmd_init; 4617 } 4618 4619 /* 4620 * To ensure that the hardware environment is clean during 4621 * initialization, the driver actively clear the hardware environment 4622 * during initialization, including PF and corresponding VFs' vlan, mac, 4623 * flow table configurations, etc. 4624 */ 4625 ret = hns3_clear_hw(hw); 4626 if (ret) { 4627 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret); 4628 goto err_cmd_init; 4629 } 4630 4631 hns3_config_all_msix_error(hw, true); 4632 4633 ret = rte_intr_callback_register(&pci_dev->intr_handle, 4634 hns3_interrupt_handler, 4635 eth_dev); 4636 if (ret) { 4637 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); 4638 goto err_intr_callback_register; 4639 } 4640 4641 /* Enable interrupt */ 4642 rte_intr_enable(&pci_dev->intr_handle); 4643 hns3_pf_enable_irq0(hw); 4644 4645 /* Get configuration */ 4646 ret = hns3_get_configuration(hw); 4647 if (ret) { 4648 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); 4649 goto err_get_config; 4650 } 4651 4652 ret = hns3_tqp_stats_init(hw); 4653 if (ret) 4654 goto err_get_config; 4655 4656 ret = hns3_init_hardware(hns); 4657 if (ret) { 4658 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); 4659 goto err_init_hw; 4660 } 4661 4662 /* Initialize flow director filter list & hash */ 4663 ret = hns3_fdir_filter_init(hns); 4664 if (ret) { 4665 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); 4666 goto err_fdir; 4667 } 4668 4669 hns3_set_default_rss_args(hw); 4670 4671 ret = hns3_enable_hw_error_intr(hns, true); 4672 if (ret) { 4673 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", 4674 ret); 4675 goto err_enable_intr; 4676 } 4677 4678 return 0; 4679 4680 err_enable_intr: 4681 hns3_fdir_filter_uninit(hns); 4682 err_fdir: 4683 hns3_uninit_umv_space(hw); 4684 err_init_hw: 4685 hns3_tqp_stats_uninit(hw); 4686 err_get_config: 4687 hns3_pf_disable_irq0(hw); 4688 rte_intr_disable(&pci_dev->intr_handle); 4689 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 4690 eth_dev); 4691 err_intr_callback_register: 4692 err_cmd_init: 4693 hns3_cmd_uninit(hw); 4694 hns3_cmd_destroy_queue(hw); 4695 err_cmd_init_queue: 4696 hw->io_base = NULL; 4697 4698 return ret; 4699 } 4700 4701 static void 4702 hns3_uninit_pf(struct rte_eth_dev *eth_dev) 4703 { 4704 struct hns3_adapter *hns = eth_dev->data->dev_private; 4705 struct rte_device *dev = eth_dev->device; 4706 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 4707 struct hns3_hw *hw = &hns->hw; 4708 4709 PMD_INIT_FUNC_TRACE(); 4710 4711 hns3_enable_hw_error_intr(hns, false); 4712 hns3_rss_uninit(hns); 4713 (void)hns3_config_gro(hw, false); 4714 hns3_promisc_uninit(hw); 4715 hns3_fdir_filter_uninit(hns); 4716 hns3_uninit_umv_space(hw); 4717 hns3_tqp_stats_uninit(hw); 4718 hns3_pf_disable_irq0(hw); 4719 rte_intr_disable(&pci_dev->intr_handle); 4720 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, 4721 eth_dev); 4722 hns3_config_all_msix_error(hw, false); 4723 hns3_cmd_uninit(hw); 4724 hns3_cmd_destroy_queue(hw); 4725 hw->io_base = NULL; 4726 } 4727 4728 static int 4729 hns3_do_start(struct hns3_adapter *hns, bool reset_queue) 4730 { 4731 struct hns3_hw *hw = &hns->hw; 4732 int ret; 4733 4734 ret = hns3_dcb_cfg_update(hns); 4735 if (ret) 4736 return ret; 4737 4738 ret = hns3_init_queues(hns, reset_queue); 4739 if (ret) { 4740 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); 4741 return ret; 4742 } 4743 4744 ret = hns3_cfg_mac_mode(hw, true); 4745 if (ret) { 4746 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); 4747 goto err_config_mac_mode; 4748 } 4749 return 0; 4750 4751 err_config_mac_mode: 4752 hns3_dev_release_mbufs(hns); 4753 hns3_reset_all_tqps(hns); 4754 return ret; 4755 } 4756 4757 static int 4758 hns3_map_rx_interrupt(struct rte_eth_dev *dev) 4759 { 4760 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4761 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4762 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4763 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 4764 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 4765 uint32_t intr_vector; 4766 uint16_t q_id; 4767 int ret; 4768 4769 if (dev->data->dev_conf.intr_conf.rxq == 0) 4770 return 0; 4771 4772 /* disable uio/vfio intr/eventfd mapping */ 4773 rte_intr_disable(intr_handle); 4774 4775 /* check and configure queue intr-vector mapping */ 4776 if (rte_intr_cap_multiple(intr_handle) || 4777 !RTE_ETH_DEV_SRIOV(dev).active) { 4778 intr_vector = hw->used_rx_queues; 4779 /* creates event fd for each intr vector when MSIX is used */ 4780 if (rte_intr_efd_enable(intr_handle, intr_vector)) 4781 return -EINVAL; 4782 } 4783 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 4784 intr_handle->intr_vec = 4785 rte_zmalloc("intr_vec", 4786 hw->used_rx_queues * sizeof(int), 0); 4787 if (intr_handle->intr_vec == NULL) { 4788 hns3_err(hw, "Failed to allocate %d rx_queues" 4789 " intr_vec", hw->used_rx_queues); 4790 ret = -ENOMEM; 4791 goto alloc_intr_vec_error; 4792 } 4793 } 4794 4795 if (rte_intr_allow_others(intr_handle)) { 4796 vec = RTE_INTR_VEC_RXTX_OFFSET; 4797 base = RTE_INTR_VEC_RXTX_OFFSET; 4798 } 4799 if (rte_intr_dp_is_en(intr_handle)) { 4800 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 4801 ret = hns3_bind_ring_with_vector(hw, vec, true, 4802 HNS3_RING_TYPE_RX, 4803 q_id); 4804 if (ret) 4805 goto bind_vector_error; 4806 intr_handle->intr_vec[q_id] = vec; 4807 if (vec < base + intr_handle->nb_efd - 1) 4808 vec++; 4809 } 4810 } 4811 rte_intr_enable(intr_handle); 4812 return 0; 4813 4814 bind_vector_error: 4815 rte_intr_efd_disable(intr_handle); 4816 if (intr_handle->intr_vec) { 4817 free(intr_handle->intr_vec); 4818 intr_handle->intr_vec = NULL; 4819 } 4820 return ret; 4821 alloc_intr_vec_error: 4822 rte_intr_efd_disable(intr_handle); 4823 return ret; 4824 } 4825 4826 static int 4827 hns3_restore_rx_interrupt(struct hns3_hw *hw) 4828 { 4829 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 4830 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4831 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4832 uint16_t q_id; 4833 int ret; 4834 4835 if (dev->data->dev_conf.intr_conf.rxq == 0) 4836 return 0; 4837 4838 if (rte_intr_dp_is_en(intr_handle)) { 4839 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 4840 ret = hns3_bind_ring_with_vector(hw, 4841 intr_handle->intr_vec[q_id], true, 4842 HNS3_RING_TYPE_RX, q_id); 4843 if (ret) 4844 return ret; 4845 } 4846 } 4847 4848 return 0; 4849 } 4850 4851 static void 4852 hns3_restore_filter(struct rte_eth_dev *dev) 4853 { 4854 hns3_restore_rss_filter(dev); 4855 } 4856 4857 static int 4858 hns3_dev_start(struct rte_eth_dev *dev) 4859 { 4860 struct hns3_adapter *hns = dev->data->dev_private; 4861 struct hns3_hw *hw = &hns->hw; 4862 int ret; 4863 4864 PMD_INIT_FUNC_TRACE(); 4865 if (rte_atomic16_read(&hw->reset.resetting)) 4866 return -EBUSY; 4867 4868 rte_spinlock_lock(&hw->lock); 4869 hw->adapter_state = HNS3_NIC_STARTING; 4870 4871 ret = hns3_do_start(hns, true); 4872 if (ret) { 4873 hw->adapter_state = HNS3_NIC_CONFIGURED; 4874 rte_spinlock_unlock(&hw->lock); 4875 return ret; 4876 } 4877 ret = hns3_map_rx_interrupt(dev); 4878 if (ret) { 4879 hw->adapter_state = HNS3_NIC_CONFIGURED; 4880 rte_spinlock_unlock(&hw->lock); 4881 return ret; 4882 } 4883 4884 /* 4885 * There are three register used to control the status of a TQP 4886 * (contains a pair of Tx queue and Rx queue) in the new version network 4887 * engine. One is used to control the enabling of Tx queue, the other is 4888 * used to control the enabling of Rx queue, and the last is the master 4889 * switch used to control the enabling of the tqp. The Tx register and 4890 * TQP register must be enabled at the same time to enable a Tx queue. 4891 * The same applies to the Rx queue. For the older network engine, this 4892 * function only refresh the enabled flag, and it is used to update the 4893 * status of queue in the dpdk framework. 4894 */ 4895 ret = hns3_start_all_txqs(dev); 4896 if (ret) { 4897 hw->adapter_state = HNS3_NIC_CONFIGURED; 4898 rte_spinlock_unlock(&hw->lock); 4899 return ret; 4900 } 4901 4902 ret = hns3_start_all_rxqs(dev); 4903 if (ret) { 4904 hns3_stop_all_txqs(dev); 4905 hw->adapter_state = HNS3_NIC_CONFIGURED; 4906 rte_spinlock_unlock(&hw->lock); 4907 return ret; 4908 } 4909 4910 hw->adapter_state = HNS3_NIC_STARTED; 4911 rte_spinlock_unlock(&hw->lock); 4912 4913 hns3_rx_scattered_calc(dev); 4914 hns3_set_rxtx_function(dev); 4915 hns3_mp_req_start_rxtx(dev); 4916 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); 4917 4918 hns3_restore_filter(dev); 4919 4920 /* Enable interrupt of all rx queues before enabling queues */ 4921 hns3_dev_all_rx_queue_intr_enable(hw, true); 4922 4923 /* 4924 * After finished the initialization, enable tqps to receive/transmit 4925 * packets and refresh all queue status. 4926 */ 4927 hns3_start_tqps(hw); 4928 4929 hns3_info(hw, "hns3 dev start successful!"); 4930 return 0; 4931 } 4932 4933 static int 4934 hns3_do_stop(struct hns3_adapter *hns) 4935 { 4936 struct hns3_hw *hw = &hns->hw; 4937 int ret; 4938 4939 ret = hns3_cfg_mac_mode(hw, false); 4940 if (ret) 4941 return ret; 4942 hw->mac.link_status = ETH_LINK_DOWN; 4943 4944 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { 4945 hns3_configure_all_mac_addr(hns, true); 4946 ret = hns3_reset_all_tqps(hns); 4947 if (ret) { 4948 hns3_err(hw, "failed to reset all queues ret = %d.", 4949 ret); 4950 return ret; 4951 } 4952 } 4953 hw->mac.default_addr_setted = false; 4954 return 0; 4955 } 4956 4957 static void 4958 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) 4959 { 4960 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4961 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4962 struct hns3_adapter *hns = dev->data->dev_private; 4963 struct hns3_hw *hw = &hns->hw; 4964 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; 4965 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; 4966 uint16_t q_id; 4967 4968 if (dev->data->dev_conf.intr_conf.rxq == 0) 4969 return; 4970 4971 /* unmap the ring with vector */ 4972 if (rte_intr_allow_others(intr_handle)) { 4973 vec = RTE_INTR_VEC_RXTX_OFFSET; 4974 base = RTE_INTR_VEC_RXTX_OFFSET; 4975 } 4976 if (rte_intr_dp_is_en(intr_handle)) { 4977 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { 4978 (void)hns3_bind_ring_with_vector(hw, vec, false, 4979 HNS3_RING_TYPE_RX, 4980 q_id); 4981 if (vec < base + intr_handle->nb_efd - 1) 4982 vec++; 4983 } 4984 } 4985 /* Clean datapath event and queue/vec mapping */ 4986 rte_intr_efd_disable(intr_handle); 4987 if (intr_handle->intr_vec) { 4988 rte_free(intr_handle->intr_vec); 4989 intr_handle->intr_vec = NULL; 4990 } 4991 } 4992 4993 static int 4994 hns3_dev_stop(struct rte_eth_dev *dev) 4995 { 4996 struct hns3_adapter *hns = dev->data->dev_private; 4997 struct hns3_hw *hw = &hns->hw; 4998 4999 PMD_INIT_FUNC_TRACE(); 5000 dev->data->dev_started = 0; 5001 5002 hw->adapter_state = HNS3_NIC_STOPPING; 5003 hns3_set_rxtx_function(dev); 5004 rte_wmb(); 5005 /* Disable datapath on secondary process. */ 5006 hns3_mp_req_stop_rxtx(dev); 5007 /* Prevent crashes when queues are still in use. */ 5008 rte_delay_ms(hw->tqps_num); 5009 5010 rte_spinlock_lock(&hw->lock); 5011 if (rte_atomic16_read(&hw->reset.resetting) == 0) { 5012 hns3_stop_tqps(hw); 5013 hns3_do_stop(hns); 5014 hns3_unmap_rx_interrupt(dev); 5015 hns3_dev_release_mbufs(hns); 5016 hw->adapter_state = HNS3_NIC_CONFIGURED; 5017 } 5018 hns3_rx_scattered_reset(dev); 5019 rte_eal_alarm_cancel(hns3_service_handler, dev); 5020 rte_spinlock_unlock(&hw->lock); 5021 5022 return 0; 5023 } 5024 5025 static int 5026 hns3_dev_close(struct rte_eth_dev *eth_dev) 5027 { 5028 struct hns3_adapter *hns = eth_dev->data->dev_private; 5029 struct hns3_hw *hw = &hns->hw; 5030 int ret = 0; 5031 5032 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 5033 rte_free(eth_dev->process_private); 5034 eth_dev->process_private = NULL; 5035 return 0; 5036 } 5037 5038 if (hw->adapter_state == HNS3_NIC_STARTED) 5039 ret = hns3_dev_stop(eth_dev); 5040 5041 hw->adapter_state = HNS3_NIC_CLOSING; 5042 hns3_reset_abort(hns); 5043 hw->adapter_state = HNS3_NIC_CLOSED; 5044 5045 hns3_configure_all_mc_mac_addr(hns, true); 5046 hns3_remove_all_vlan_table(hns); 5047 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); 5048 hns3_uninit_pf(eth_dev); 5049 hns3_free_all_queues(eth_dev); 5050 rte_free(hw->reset.wait_data); 5051 rte_free(eth_dev->process_private); 5052 eth_dev->process_private = NULL; 5053 hns3_mp_uninit_primary(); 5054 hns3_warn(hw, "Close port %d finished", hw->data->port_id); 5055 5056 return ret; 5057 } 5058 5059 static int 5060 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5061 { 5062 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5063 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5064 5065 fc_conf->pause_time = pf->pause_time; 5066 5067 /* return fc current mode */ 5068 switch (hw->current_mode) { 5069 case HNS3_FC_FULL: 5070 fc_conf->mode = RTE_FC_FULL; 5071 break; 5072 case HNS3_FC_TX_PAUSE: 5073 fc_conf->mode = RTE_FC_TX_PAUSE; 5074 break; 5075 case HNS3_FC_RX_PAUSE: 5076 fc_conf->mode = RTE_FC_RX_PAUSE; 5077 break; 5078 case HNS3_FC_NONE: 5079 default: 5080 fc_conf->mode = RTE_FC_NONE; 5081 break; 5082 } 5083 5084 return 0; 5085 } 5086 5087 static void 5088 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) 5089 { 5090 switch (mode) { 5091 case RTE_FC_NONE: 5092 hw->requested_mode = HNS3_FC_NONE; 5093 break; 5094 case RTE_FC_RX_PAUSE: 5095 hw->requested_mode = HNS3_FC_RX_PAUSE; 5096 break; 5097 case RTE_FC_TX_PAUSE: 5098 hw->requested_mode = HNS3_FC_TX_PAUSE; 5099 break; 5100 case RTE_FC_FULL: 5101 hw->requested_mode = HNS3_FC_FULL; 5102 break; 5103 default: 5104 hw->requested_mode = HNS3_FC_NONE; 5105 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is " 5106 "configured to RTE_FC_NONE", mode); 5107 break; 5108 } 5109 } 5110 5111 static int 5112 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 5113 { 5114 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5115 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5116 int ret; 5117 5118 if (fc_conf->high_water || fc_conf->low_water || 5119 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { 5120 hns3_err(hw, "Unsupported flow control settings specified, " 5121 "high_water(%u), low_water(%u), send_xon(%u) and " 5122 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5123 fc_conf->high_water, fc_conf->low_water, 5124 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); 5125 return -EINVAL; 5126 } 5127 if (fc_conf->autoneg) { 5128 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5129 return -EINVAL; 5130 } 5131 if (!fc_conf->pause_time) { 5132 hns3_err(hw, "Invalid pause time %d setting.", 5133 fc_conf->pause_time); 5134 return -EINVAL; 5135 } 5136 5137 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5138 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { 5139 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " 5140 "current_fc_status = %d", hw->current_fc_status); 5141 return -EOPNOTSUPP; 5142 } 5143 5144 hns3_get_fc_mode(hw, fc_conf->mode); 5145 if (hw->requested_mode == hw->current_mode && 5146 pf->pause_time == fc_conf->pause_time) 5147 return 0; 5148 5149 rte_spinlock_lock(&hw->lock); 5150 ret = hns3_fc_enable(dev, fc_conf); 5151 rte_spinlock_unlock(&hw->lock); 5152 5153 return ret; 5154 } 5155 5156 static int 5157 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, 5158 struct rte_eth_pfc_conf *pfc_conf) 5159 { 5160 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5161 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5162 uint8_t priority; 5163 int ret; 5164 5165 if (!hns3_dev_dcb_supported(hw)) { 5166 hns3_err(hw, "This port does not support dcb configurations."); 5167 return -EOPNOTSUPP; 5168 } 5169 5170 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || 5171 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { 5172 hns3_err(hw, "Unsupported flow control settings specified, " 5173 "high_water(%u), low_water(%u), send_xon(%u) and " 5174 "mac_ctrl_frame_fwd(%u) must be set to '0'", 5175 pfc_conf->fc.high_water, pfc_conf->fc.low_water, 5176 pfc_conf->fc.send_xon, 5177 pfc_conf->fc.mac_ctrl_frame_fwd); 5178 return -EINVAL; 5179 } 5180 if (pfc_conf->fc.autoneg) { 5181 hns3_err(hw, "Unsupported fc auto-negotiation setting."); 5182 return -EINVAL; 5183 } 5184 if (pfc_conf->fc.pause_time == 0) { 5185 hns3_err(hw, "Invalid pause time %d setting.", 5186 pfc_conf->fc.pause_time); 5187 return -EINVAL; 5188 } 5189 5190 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || 5191 hw->current_fc_status == HNS3_FC_STATUS_PFC)) { 5192 hns3_err(hw, "MAC pause is enabled. Cannot set PFC." 5193 "current_fc_status = %d", hw->current_fc_status); 5194 return -EOPNOTSUPP; 5195 } 5196 5197 priority = pfc_conf->priority; 5198 hns3_get_fc_mode(hw, pfc_conf->fc.mode); 5199 if (hw->dcb_info.pfc_en & BIT(priority) && 5200 hw->requested_mode == hw->current_mode && 5201 pfc_conf->fc.pause_time == pf->pause_time) 5202 return 0; 5203 5204 rte_spinlock_lock(&hw->lock); 5205 ret = hns3_dcb_pfc_enable(dev, pfc_conf); 5206 rte_spinlock_unlock(&hw->lock); 5207 5208 return ret; 5209 } 5210 5211 static int 5212 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) 5213 { 5214 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5215 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 5216 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; 5217 int i; 5218 5219 rte_spinlock_lock(&hw->lock); 5220 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) 5221 dcb_info->nb_tcs = pf->local_max_tc; 5222 else 5223 dcb_info->nb_tcs = 1; 5224 5225 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) 5226 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; 5227 for (i = 0; i < dcb_info->nb_tcs; i++) 5228 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; 5229 5230 for (i = 0; i < hw->num_tc; i++) { 5231 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; 5232 dcb_info->tc_queue.tc_txq[0][i].base = 5233 hw->tc_queue[i].tqp_offset; 5234 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; 5235 dcb_info->tc_queue.tc_txq[0][i].nb_queue = 5236 hw->tc_queue[i].tqp_count; 5237 } 5238 rte_spinlock_unlock(&hw->lock); 5239 5240 return 0; 5241 } 5242 5243 static int 5244 hns3_reinit_dev(struct hns3_adapter *hns) 5245 { 5246 struct hns3_hw *hw = &hns->hw; 5247 int ret; 5248 5249 ret = hns3_cmd_init(hw); 5250 if (ret) { 5251 hns3_err(hw, "Failed to init cmd: %d", ret); 5252 return ret; 5253 } 5254 5255 ret = hns3_reset_all_tqps(hns); 5256 if (ret) { 5257 hns3_err(hw, "Failed to reset all queues: %d", ret); 5258 return ret; 5259 } 5260 5261 ret = hns3_init_hardware(hns); 5262 if (ret) { 5263 hns3_err(hw, "Failed to init hardware: %d", ret); 5264 return ret; 5265 } 5266 5267 ret = hns3_enable_hw_error_intr(hns, true); 5268 if (ret) { 5269 hns3_err(hw, "fail to enable hw error interrupts: %d", 5270 ret); 5271 return ret; 5272 } 5273 hns3_info(hw, "Reset done, driver initialization finished."); 5274 5275 return 0; 5276 } 5277 5278 static bool 5279 is_pf_reset_done(struct hns3_hw *hw) 5280 { 5281 uint32_t val, reg, reg_bit; 5282 5283 switch (hw->reset.level) { 5284 case HNS3_IMP_RESET: 5285 reg = HNS3_GLOBAL_RESET_REG; 5286 reg_bit = HNS3_IMP_RESET_BIT; 5287 break; 5288 case HNS3_GLOBAL_RESET: 5289 reg = HNS3_GLOBAL_RESET_REG; 5290 reg_bit = HNS3_GLOBAL_RESET_BIT; 5291 break; 5292 case HNS3_FUNC_RESET: 5293 reg = HNS3_FUN_RST_ING; 5294 reg_bit = HNS3_FUN_RST_ING_B; 5295 break; 5296 case HNS3_FLR_RESET: 5297 default: 5298 hns3_err(hw, "Wait for unsupported reset level: %d", 5299 hw->reset.level); 5300 return true; 5301 } 5302 val = hns3_read_dev(hw, reg); 5303 if (hns3_get_bit(val, reg_bit)) 5304 return false; 5305 else 5306 return true; 5307 } 5308 5309 bool 5310 hns3_is_reset_pending(struct hns3_adapter *hns) 5311 { 5312 struct hns3_hw *hw = &hns->hw; 5313 enum hns3_reset_level reset; 5314 5315 hns3_check_event_cause(hns, NULL); 5316 reset = hns3_get_reset_level(hns, &hw->reset.pending); 5317 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { 5318 hns3_warn(hw, "High level reset %d is pending", reset); 5319 return true; 5320 } 5321 reset = hns3_get_reset_level(hns, &hw->reset.request); 5322 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { 5323 hns3_warn(hw, "High level reset %d is request", reset); 5324 return true; 5325 } 5326 return false; 5327 } 5328 5329 static int 5330 hns3_wait_hardware_ready(struct hns3_adapter *hns) 5331 { 5332 struct hns3_hw *hw = &hns->hw; 5333 struct hns3_wait_data *wait_data = hw->reset.wait_data; 5334 struct timeval tv; 5335 5336 if (wait_data->result == HNS3_WAIT_SUCCESS) 5337 return 0; 5338 else if (wait_data->result == HNS3_WAIT_TIMEOUT) { 5339 gettimeofday(&tv, NULL); 5340 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", 5341 tv.tv_sec, tv.tv_usec); 5342 return -ETIME; 5343 } else if (wait_data->result == HNS3_WAIT_REQUEST) 5344 return -EAGAIN; 5345 5346 wait_data->hns = hns; 5347 wait_data->check_completion = is_pf_reset_done; 5348 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * 5349 HNS3_RESET_WAIT_MS + get_timeofday_ms(); 5350 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; 5351 wait_data->count = HNS3_RESET_WAIT_CNT; 5352 wait_data->result = HNS3_WAIT_REQUEST; 5353 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); 5354 return -EAGAIN; 5355 } 5356 5357 static int 5358 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) 5359 { 5360 struct hns3_cmd_desc desc; 5361 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; 5362 5363 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 5364 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); 5365 req->fun_reset_vfid = func_id; 5366 5367 return hns3_cmd_send(hw, &desc, 1); 5368 } 5369 5370 static int 5371 hns3_imp_reset_cmd(struct hns3_hw *hw) 5372 { 5373 struct hns3_cmd_desc desc; 5374 5375 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); 5376 desc.data[0] = 0xeedd; 5377 5378 return hns3_cmd_send(hw, &desc, 1); 5379 } 5380 5381 static void 5382 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) 5383 { 5384 struct hns3_hw *hw = &hns->hw; 5385 struct timeval tv; 5386 uint32_t val; 5387 5388 gettimeofday(&tv, NULL); 5389 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || 5390 hns3_read_dev(hw, HNS3_FUN_RST_ING)) { 5391 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", 5392 tv.tv_sec, tv.tv_usec); 5393 return; 5394 } 5395 5396 switch (reset_level) { 5397 case HNS3_IMP_RESET: 5398 hns3_imp_reset_cmd(hw); 5399 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", 5400 tv.tv_sec, tv.tv_usec); 5401 break; 5402 case HNS3_GLOBAL_RESET: 5403 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); 5404 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); 5405 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); 5406 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", 5407 tv.tv_sec, tv.tv_usec); 5408 break; 5409 case HNS3_FUNC_RESET: 5410 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", 5411 tv.tv_sec, tv.tv_usec); 5412 /* schedule again to check later */ 5413 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); 5414 hns3_schedule_reset(hns); 5415 break; 5416 default: 5417 hns3_warn(hw, "Unsupported reset level: %d", reset_level); 5418 return; 5419 } 5420 hns3_atomic_clear_bit(reset_level, &hw->reset.request); 5421 } 5422 5423 static enum hns3_reset_level 5424 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) 5425 { 5426 struct hns3_hw *hw = &hns->hw; 5427 enum hns3_reset_level reset_level = HNS3_NONE_RESET; 5428 5429 /* Return the highest priority reset level amongst all */ 5430 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) 5431 reset_level = HNS3_IMP_RESET; 5432 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) 5433 reset_level = HNS3_GLOBAL_RESET; 5434 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) 5435 reset_level = HNS3_FUNC_RESET; 5436 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) 5437 reset_level = HNS3_FLR_RESET; 5438 5439 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) 5440 return HNS3_NONE_RESET; 5441 5442 return reset_level; 5443 } 5444 5445 static void 5446 hns3_record_imp_error(struct hns3_adapter *hns) 5447 { 5448 struct hns3_hw *hw = &hns->hw; 5449 uint32_t reg_val; 5450 5451 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5452 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) { 5453 hns3_warn(hw, "Detected IMP RD poison!"); 5454 hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS"); 5455 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0); 5456 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5457 } 5458 5459 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) { 5460 hns3_warn(hw, "Detected IMP CMDQ error!"); 5461 hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS"); 5462 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0); 5463 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val); 5464 } 5465 } 5466 5467 static int 5468 hns3_prepare_reset(struct hns3_adapter *hns) 5469 { 5470 struct hns3_hw *hw = &hns->hw; 5471 uint32_t reg_val; 5472 int ret; 5473 5474 switch (hw->reset.level) { 5475 case HNS3_FUNC_RESET: 5476 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID); 5477 if (ret) 5478 return ret; 5479 5480 /* 5481 * After performaning pf reset, it is not necessary to do the 5482 * mailbox handling or send any command to firmware, because 5483 * any mailbox handling or command to firmware is only valid 5484 * after hns3_cmd_init is called. 5485 */ 5486 rte_atomic16_set(&hw->reset.disable_cmd, 1); 5487 hw->reset.stats.request_cnt++; 5488 break; 5489 case HNS3_IMP_RESET: 5490 hns3_record_imp_error(hns); 5491 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); 5492 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | 5493 BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); 5494 break; 5495 default: 5496 break; 5497 } 5498 return 0; 5499 } 5500 5501 static int 5502 hns3_set_rst_done(struct hns3_hw *hw) 5503 { 5504 struct hns3_pf_rst_done_cmd *req; 5505 struct hns3_cmd_desc desc; 5506 5507 req = (struct hns3_pf_rst_done_cmd *)desc.data; 5508 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); 5509 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; 5510 return hns3_cmd_send(hw, &desc, 1); 5511 } 5512 5513 static int 5514 hns3_stop_service(struct hns3_adapter *hns) 5515 { 5516 struct hns3_hw *hw = &hns->hw; 5517 struct rte_eth_dev *eth_dev; 5518 5519 eth_dev = &rte_eth_devices[hw->data->port_id]; 5520 if (hw->adapter_state == HNS3_NIC_STARTED) 5521 rte_eal_alarm_cancel(hns3_service_handler, eth_dev); 5522 hw->mac.link_status = ETH_LINK_DOWN; 5523 5524 hns3_set_rxtx_function(eth_dev); 5525 rte_wmb(); 5526 /* Disable datapath on secondary process. */ 5527 hns3_mp_req_stop_rxtx(eth_dev); 5528 rte_delay_ms(hw->tqps_num); 5529 5530 rte_spinlock_lock(&hw->lock); 5531 if (hns->hw.adapter_state == HNS3_NIC_STARTED || 5532 hw->adapter_state == HNS3_NIC_STOPPING) { 5533 hns3_enable_all_queues(hw, false); 5534 hns3_do_stop(hns); 5535 hw->reset.mbuf_deferred_free = true; 5536 } else 5537 hw->reset.mbuf_deferred_free = false; 5538 5539 /* 5540 * It is cumbersome for hardware to pick-and-choose entries for deletion 5541 * from table space. Hence, for function reset software intervention is 5542 * required to delete the entries 5543 */ 5544 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) 5545 hns3_configure_all_mc_mac_addr(hns, true); 5546 rte_spinlock_unlock(&hw->lock); 5547 5548 return 0; 5549 } 5550 5551 static int 5552 hns3_start_service(struct hns3_adapter *hns) 5553 { 5554 struct hns3_hw *hw = &hns->hw; 5555 struct rte_eth_dev *eth_dev; 5556 5557 if (hw->reset.level == HNS3_IMP_RESET || 5558 hw->reset.level == HNS3_GLOBAL_RESET) 5559 hns3_set_rst_done(hw); 5560 eth_dev = &rte_eth_devices[hw->data->port_id]; 5561 hns3_set_rxtx_function(eth_dev); 5562 hns3_mp_req_start_rxtx(eth_dev); 5563 if (hw->adapter_state == HNS3_NIC_STARTED) { 5564 hns3_service_handler(eth_dev); 5565 5566 /* Enable interrupt of all rx queues before enabling queues */ 5567 hns3_dev_all_rx_queue_intr_enable(hw, true); 5568 /* 5569 * When finished the initialization, enable queues to receive 5570 * and transmit packets. 5571 */ 5572 hns3_enable_all_queues(hw, true); 5573 } 5574 5575 return 0; 5576 } 5577 5578 static int 5579 hns3_restore_conf(struct hns3_adapter *hns) 5580 { 5581 struct hns3_hw *hw = &hns->hw; 5582 int ret; 5583 5584 ret = hns3_configure_all_mac_addr(hns, false); 5585 if (ret) 5586 return ret; 5587 5588 ret = hns3_configure_all_mc_mac_addr(hns, false); 5589 if (ret) 5590 goto err_mc_mac; 5591 5592 ret = hns3_dev_promisc_restore(hns); 5593 if (ret) 5594 goto err_promisc; 5595 5596 ret = hns3_restore_vlan_table(hns); 5597 if (ret) 5598 goto err_promisc; 5599 5600 ret = hns3_restore_vlan_conf(hns); 5601 if (ret) 5602 goto err_promisc; 5603 5604 ret = hns3_restore_all_fdir_filter(hns); 5605 if (ret) 5606 goto err_promisc; 5607 5608 ret = hns3_restore_rx_interrupt(hw); 5609 if (ret) 5610 goto err_promisc; 5611 5612 ret = hns3_restore_gro_conf(hw); 5613 if (ret) 5614 goto err_promisc; 5615 5616 ret = hns3_restore_fec(hw); 5617 if (ret) 5618 goto err_promisc; 5619 5620 if (hns->hw.adapter_state == HNS3_NIC_STARTED) { 5621 ret = hns3_do_start(hns, false); 5622 if (ret) 5623 goto err_promisc; 5624 hns3_info(hw, "hns3 dev restart successful!"); 5625 } else if (hw->adapter_state == HNS3_NIC_STOPPING) 5626 hw->adapter_state = HNS3_NIC_CONFIGURED; 5627 return 0; 5628 5629 err_promisc: 5630 hns3_configure_all_mc_mac_addr(hns, true); 5631 err_mc_mac: 5632 hns3_configure_all_mac_addr(hns, true); 5633 return ret; 5634 } 5635 5636 static void 5637 hns3_reset_service(void *param) 5638 { 5639 struct hns3_adapter *hns = (struct hns3_adapter *)param; 5640 struct hns3_hw *hw = &hns->hw; 5641 enum hns3_reset_level reset_level; 5642 struct timeval tv_delta; 5643 struct timeval tv_start; 5644 struct timeval tv; 5645 uint64_t msec; 5646 int ret; 5647 5648 /* 5649 * The interrupt is not triggered within the delay time. 5650 * The interrupt may have been lost. It is necessary to handle 5651 * the interrupt to recover from the error. 5652 */ 5653 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { 5654 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); 5655 hns3_err(hw, "Handling interrupts in delayed tasks"); 5656 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); 5657 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5658 if (reset_level == HNS3_NONE_RESET) { 5659 hns3_err(hw, "No reset level is set, try IMP reset"); 5660 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); 5661 } 5662 } 5663 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); 5664 5665 /* 5666 * Check if there is any ongoing reset in the hardware. This status can 5667 * be checked from reset_pending. If there is then, we need to wait for 5668 * hardware to complete reset. 5669 * a. If we are able to figure out in reasonable time that hardware 5670 * has fully resetted then, we can proceed with driver, client 5671 * reset. 5672 * b. else, we can come back later to check this status so re-sched 5673 * now. 5674 */ 5675 reset_level = hns3_get_reset_level(hns, &hw->reset.pending); 5676 if (reset_level != HNS3_NONE_RESET) { 5677 gettimeofday(&tv_start, NULL); 5678 ret = hns3_reset_process(hns, reset_level); 5679 gettimeofday(&tv, NULL); 5680 timersub(&tv, &tv_start, &tv_delta); 5681 msec = tv_delta.tv_sec * MSEC_PER_SEC + 5682 tv_delta.tv_usec / USEC_PER_MSEC; 5683 if (msec > HNS3_RESET_PROCESS_MS) 5684 hns3_err(hw, "%d handle long time delta %" PRIx64 5685 " ms time=%ld.%.6ld", 5686 hw->reset.level, msec, 5687 tv.tv_sec, tv.tv_usec); 5688 if (ret == -EAGAIN) 5689 return; 5690 } 5691 5692 /* Check if we got any *new* reset requests to be honored */ 5693 reset_level = hns3_get_reset_level(hns, &hw->reset.request); 5694 if (reset_level != HNS3_NONE_RESET) 5695 hns3_msix_process(hns, reset_level); 5696 } 5697 5698 static unsigned int 5699 hns3_get_speed_capa_num(uint16_t device_id) 5700 { 5701 unsigned int num; 5702 5703 switch (device_id) { 5704 case HNS3_DEV_ID_25GE: 5705 case HNS3_DEV_ID_25GE_RDMA: 5706 num = 2; 5707 break; 5708 case HNS3_DEV_ID_100G_RDMA_MACSEC: 5709 case HNS3_DEV_ID_200G_RDMA: 5710 num = 1; 5711 break; 5712 default: 5713 num = 0; 5714 break; 5715 } 5716 5717 return num; 5718 } 5719 5720 static int 5721 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, 5722 uint16_t device_id) 5723 { 5724 switch (device_id) { 5725 case HNS3_DEV_ID_25GE: 5726 /* fallthrough */ 5727 case HNS3_DEV_ID_25GE_RDMA: 5728 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; 5729 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; 5730 5731 /* In HNS3 device, the 25G NIC is compatible with 10G rate */ 5732 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; 5733 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; 5734 break; 5735 case HNS3_DEV_ID_100G_RDMA_MACSEC: 5736 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; 5737 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; 5738 break; 5739 case HNS3_DEV_ID_200G_RDMA: 5740 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; 5741 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; 5742 break; 5743 default: 5744 return -ENOTSUP; 5745 } 5746 5747 return 0; 5748 } 5749 5750 static int 5751 hns3_fec_get_capability(struct rte_eth_dev *dev, 5752 struct rte_eth_fec_capa *speed_fec_capa, 5753 unsigned int num) 5754 { 5755 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5756 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5757 uint16_t device_id = pci_dev->id.device_id; 5758 unsigned int capa_num; 5759 int ret; 5760 5761 capa_num = hns3_get_speed_capa_num(device_id); 5762 if (capa_num == 0) { 5763 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", 5764 device_id); 5765 return -ENOTSUP; 5766 } 5767 5768 if (speed_fec_capa == NULL || num < capa_num) 5769 return capa_num; 5770 5771 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); 5772 if (ret) 5773 return -ENOTSUP; 5774 5775 return capa_num; 5776 } 5777 5778 static int 5779 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) 5780 { 5781 struct hns3_config_fec_cmd *req; 5782 struct hns3_cmd_desc desc; 5783 int ret; 5784 5785 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true); 5786 req = (struct hns3_config_fec_cmd *)desc.data; 5787 ret = hns3_cmd_send(hw, &desc, 1); 5788 if (ret) { 5789 hns3_err(hw, "get current fec auto state failed, ret = %d", 5790 ret); 5791 return ret; 5792 } 5793 5794 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B); 5795 return 0; 5796 } 5797 5798 static int 5799 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) 5800 { 5801 #define QUERY_ACTIVE_SPEED 1 5802 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5803 struct hns3_sfp_speed_cmd *resp; 5804 uint32_t tmp_fec_capa; 5805 uint8_t auto_state; 5806 struct hns3_cmd_desc desc; 5807 int ret; 5808 5809 /* 5810 * If link is down and AUTO is enabled, AUTO is returned, otherwise, 5811 * configured FEC mode is returned. 5812 * If link is up, current FEC mode is returned. 5813 */ 5814 if (hw->mac.link_status == ETH_LINK_DOWN) { 5815 ret = get_current_fec_auto_state(hw, &auto_state); 5816 if (ret) 5817 return ret; 5818 5819 if (auto_state == 0x1) { 5820 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO); 5821 return 0; 5822 } 5823 } 5824 5825 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); 5826 resp = (struct hns3_sfp_speed_cmd *)desc.data; 5827 resp->query_type = QUERY_ACTIVE_SPEED; 5828 5829 ret = hns3_cmd_send(hw, &desc, 1); 5830 if (ret == -EOPNOTSUPP) { 5831 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret); 5832 return ret; 5833 } else if (ret) { 5834 hns3_err(hw, "get FEC failed, ret = %d", ret); 5835 return ret; 5836 } 5837 5838 /* 5839 * FEC mode order defined in hns3 hardware is inconsistend with 5840 * that defined in the ethdev library. So the sequence needs 5841 * to be converted. 5842 */ 5843 switch (resp->active_fec) { 5844 case HNS3_HW_FEC_MODE_NOFEC: 5845 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 5846 break; 5847 case HNS3_HW_FEC_MODE_BASER: 5848 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); 5849 break; 5850 case HNS3_HW_FEC_MODE_RS: 5851 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS); 5852 break; 5853 default: 5854 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); 5855 break; 5856 } 5857 5858 *fec_capa = tmp_fec_capa; 5859 return 0; 5860 } 5861 5862 static int 5863 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) 5864 { 5865 struct hns3_config_fec_cmd *req; 5866 struct hns3_cmd_desc desc; 5867 int ret; 5868 5869 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false); 5870 5871 req = (struct hns3_config_fec_cmd *)desc.data; 5872 switch (mode) { 5873 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC): 5874 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 5875 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF); 5876 break; 5877 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER): 5878 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 5879 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER); 5880 break; 5881 case RTE_ETH_FEC_MODE_CAPA_MASK(RS): 5882 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M, 5883 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS); 5884 break; 5885 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO): 5886 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1); 5887 break; 5888 default: 5889 return 0; 5890 } 5891 ret = hns3_cmd_send(hw, &desc, 1); 5892 if (ret) 5893 hns3_err(hw, "set fec mode failed, ret = %d", ret); 5894 5895 return ret; 5896 } 5897 5898 static uint32_t 5899 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) 5900 { 5901 struct hns3_mac *mac = &hw->mac; 5902 uint32_t cur_capa; 5903 5904 switch (mac->link_speed) { 5905 case ETH_SPEED_NUM_10G: 5906 cur_capa = fec_capa[1].capa; 5907 break; 5908 case ETH_SPEED_NUM_25G: 5909 case ETH_SPEED_NUM_100G: 5910 case ETH_SPEED_NUM_200G: 5911 cur_capa = fec_capa[0].capa; 5912 break; 5913 default: 5914 cur_capa = 0; 5915 break; 5916 } 5917 5918 return cur_capa; 5919 } 5920 5921 static bool 5922 is_fec_mode_one_bit_set(uint32_t mode) 5923 { 5924 int cnt = 0; 5925 uint8_t i; 5926 5927 for (i = 0; i < sizeof(mode); i++) 5928 if (mode >> i & 0x1) 5929 cnt++; 5930 5931 return cnt == 1 ? true : false; 5932 } 5933 5934 static int 5935 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) 5936 { 5937 #define FEC_CAPA_NUM 2 5938 struct hns3_adapter *hns = dev->data->dev_private; 5939 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 5940 struct hns3_pf *pf = &hns->pf; 5941 5942 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; 5943 uint32_t cur_capa; 5944 uint32_t num = FEC_CAPA_NUM; 5945 int ret; 5946 5947 ret = hns3_fec_get_capability(dev, fec_capa, num); 5948 if (ret < 0) 5949 return ret; 5950 5951 /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */ 5952 if (!is_fec_mode_one_bit_set(mode)) 5953 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD," 5954 "FEC mode should be only one bit set", mode); 5955 5956 /* 5957 * Check whether the configured mode is within the FEC capability. 5958 * If not, the configured mode will not be supported. 5959 */ 5960 cur_capa = get_current_speed_fec_cap(hw, fec_capa); 5961 if (!(cur_capa & mode)) { 5962 hns3_err(hw, "unsupported FEC mode = 0x%x", mode); 5963 return -EINVAL; 5964 } 5965 5966 ret = hns3_set_fec_hw(hw, mode); 5967 if (ret) 5968 return ret; 5969 5970 pf->fec_mode = mode; 5971 return 0; 5972 } 5973 5974 static int 5975 hns3_restore_fec(struct hns3_hw *hw) 5976 { 5977 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 5978 struct hns3_pf *pf = &hns->pf; 5979 uint32_t mode = pf->fec_mode; 5980 int ret; 5981 5982 ret = hns3_set_fec_hw(hw, mode); 5983 if (ret) 5984 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d", 5985 mode, ret); 5986 5987 return ret; 5988 } 5989 5990 static int 5991 hns3_query_dev_fec_info(struct rte_eth_dev *dev) 5992 { 5993 struct hns3_adapter *hns = dev->data->dev_private; 5994 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); 5995 struct hns3_pf *pf = &hns->pf; 5996 int ret; 5997 5998 ret = hns3_fec_get(dev, &pf->fec_mode); 5999 if (ret) 6000 hns3_err(hw, "query device FEC info failed, ret = %d", ret); 6001 6002 return ret; 6003 } 6004 6005 static const struct eth_dev_ops hns3_eth_dev_ops = { 6006 .dev_configure = hns3_dev_configure, 6007 .dev_start = hns3_dev_start, 6008 .dev_stop = hns3_dev_stop, 6009 .dev_close = hns3_dev_close, 6010 .promiscuous_enable = hns3_dev_promiscuous_enable, 6011 .promiscuous_disable = hns3_dev_promiscuous_disable, 6012 .allmulticast_enable = hns3_dev_allmulticast_enable, 6013 .allmulticast_disable = hns3_dev_allmulticast_disable, 6014 .mtu_set = hns3_dev_mtu_set, 6015 .stats_get = hns3_stats_get, 6016 .stats_reset = hns3_stats_reset, 6017 .xstats_get = hns3_dev_xstats_get, 6018 .xstats_get_names = hns3_dev_xstats_get_names, 6019 .xstats_reset = hns3_dev_xstats_reset, 6020 .xstats_get_by_id = hns3_dev_xstats_get_by_id, 6021 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, 6022 .dev_infos_get = hns3_dev_infos_get, 6023 .fw_version_get = hns3_fw_version_get, 6024 .rx_queue_setup = hns3_rx_queue_setup, 6025 .tx_queue_setup = hns3_tx_queue_setup, 6026 .rx_queue_release = hns3_dev_rx_queue_release, 6027 .tx_queue_release = hns3_dev_tx_queue_release, 6028 .rx_queue_start = hns3_dev_rx_queue_start, 6029 .rx_queue_stop = hns3_dev_rx_queue_stop, 6030 .tx_queue_start = hns3_dev_tx_queue_start, 6031 .tx_queue_stop = hns3_dev_tx_queue_stop, 6032 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, 6033 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, 6034 .rxq_info_get = hns3_rxq_info_get, 6035 .txq_info_get = hns3_txq_info_get, 6036 .rx_burst_mode_get = hns3_rx_burst_mode_get, 6037 .tx_burst_mode_get = hns3_tx_burst_mode_get, 6038 .flow_ctrl_get = hns3_flow_ctrl_get, 6039 .flow_ctrl_set = hns3_flow_ctrl_set, 6040 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, 6041 .mac_addr_add = hns3_add_mac_addr, 6042 .mac_addr_remove = hns3_remove_mac_addr, 6043 .mac_addr_set = hns3_set_default_mac_addr, 6044 .set_mc_addr_list = hns3_set_mc_mac_addr_list, 6045 .link_update = hns3_dev_link_update, 6046 .rss_hash_update = hns3_dev_rss_hash_update, 6047 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, 6048 .reta_update = hns3_dev_rss_reta_update, 6049 .reta_query = hns3_dev_rss_reta_query, 6050 .filter_ctrl = hns3_dev_filter_ctrl, 6051 .vlan_filter_set = hns3_vlan_filter_set, 6052 .vlan_tpid_set = hns3_vlan_tpid_set, 6053 .vlan_offload_set = hns3_vlan_offload_set, 6054 .vlan_pvid_set = hns3_vlan_pvid_set, 6055 .get_reg = hns3_get_regs, 6056 .get_dcb_info = hns3_get_dcb_info, 6057 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, 6058 .fec_get_capability = hns3_fec_get_capability, 6059 .fec_get = hns3_fec_get, 6060 .fec_set = hns3_fec_set, 6061 }; 6062 6063 static const struct hns3_reset_ops hns3_reset_ops = { 6064 .reset_service = hns3_reset_service, 6065 .stop_service = hns3_stop_service, 6066 .prepare_reset = hns3_prepare_reset, 6067 .wait_hardware_ready = hns3_wait_hardware_ready, 6068 .reinit_dev = hns3_reinit_dev, 6069 .restore_conf = hns3_restore_conf, 6070 .start_service = hns3_start_service, 6071 }; 6072 6073 static int 6074 hns3_dev_init(struct rte_eth_dev *eth_dev) 6075 { 6076 struct hns3_adapter *hns = eth_dev->data->dev_private; 6077 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 6078 struct rte_ether_addr *eth_addr; 6079 struct hns3_hw *hw = &hns->hw; 6080 int ret; 6081 6082 PMD_INIT_FUNC_TRACE(); 6083 6084 eth_dev->process_private = (struct hns3_process_private *) 6085 rte_zmalloc_socket("hns3_filter_list", 6086 sizeof(struct hns3_process_private), 6087 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); 6088 if (eth_dev->process_private == NULL) { 6089 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); 6090 return -ENOMEM; 6091 } 6092 /* initialize flow filter lists */ 6093 hns3_filterlist_init(eth_dev); 6094 6095 hns3_set_rxtx_function(eth_dev); 6096 eth_dev->dev_ops = &hns3_eth_dev_ops; 6097 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 6098 ret = hns3_mp_init_secondary(); 6099 if (ret) { 6100 PMD_INIT_LOG(ERR, "Failed to init for secondary " 6101 "process, ret = %d", ret); 6102 goto err_mp_init_secondary; 6103 } 6104 6105 hw->secondary_cnt++; 6106 return 0; 6107 } 6108 6109 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6110 6111 ret = hns3_mp_init_primary(); 6112 if (ret) { 6113 PMD_INIT_LOG(ERR, 6114 "Failed to init for primary process, ret = %d", 6115 ret); 6116 goto err_mp_init_primary; 6117 } 6118 6119 hw->adapter_state = HNS3_NIC_UNINITIALIZED; 6120 hns->is_vf = false; 6121 hw->data = eth_dev->data; 6122 6123 /* 6124 * Set default max packet size according to the mtu 6125 * default vale in DPDK frame. 6126 */ 6127 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; 6128 6129 ret = hns3_reset_init(hw); 6130 if (ret) 6131 goto err_init_reset; 6132 hw->reset.ops = &hns3_reset_ops; 6133 6134 ret = hns3_init_pf(eth_dev); 6135 if (ret) { 6136 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); 6137 goto err_init_pf; 6138 } 6139 6140 /* Allocate memory for storing MAC addresses */ 6141 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", 6142 sizeof(struct rte_ether_addr) * 6143 HNS3_UC_MACADDR_NUM, 0); 6144 if (eth_dev->data->mac_addrs == NULL) { 6145 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " 6146 "to store MAC addresses", 6147 sizeof(struct rte_ether_addr) * 6148 HNS3_UC_MACADDR_NUM); 6149 ret = -ENOMEM; 6150 goto err_rte_zmalloc; 6151 } 6152 6153 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr; 6154 if (!rte_is_valid_assigned_ether_addr(eth_addr)) { 6155 rte_eth_random_addr(hw->mac.mac_addr); 6156 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, 6157 (struct rte_ether_addr *)hw->mac.mac_addr); 6158 hns3_warn(hw, "default mac_addr from firmware is an invalid " 6159 "unicast address, using random MAC address %s", 6160 mac_str); 6161 } 6162 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, 6163 ð_dev->data->mac_addrs[0]); 6164 6165 hw->adapter_state = HNS3_NIC_INITIALIZED; 6166 6167 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { 6168 hns3_err(hw, "Reschedule reset service after dev_init"); 6169 hns3_schedule_reset(hns); 6170 } else { 6171 /* IMP will wait ready flag before reset */ 6172 hns3_notify_reset_ready(hw, false); 6173 } 6174 6175 hns3_info(hw, "hns3 dev initialization successful!"); 6176 return 0; 6177 6178 err_rte_zmalloc: 6179 hns3_uninit_pf(eth_dev); 6180 6181 err_init_pf: 6182 rte_free(hw->reset.wait_data); 6183 6184 err_init_reset: 6185 hns3_mp_uninit_primary(); 6186 6187 err_mp_init_primary: 6188 err_mp_init_secondary: 6189 eth_dev->dev_ops = NULL; 6190 eth_dev->rx_pkt_burst = NULL; 6191 eth_dev->tx_pkt_burst = NULL; 6192 eth_dev->tx_pkt_prepare = NULL; 6193 rte_free(eth_dev->process_private); 6194 eth_dev->process_private = NULL; 6195 return ret; 6196 } 6197 6198 static int 6199 hns3_dev_uninit(struct rte_eth_dev *eth_dev) 6200 { 6201 struct hns3_adapter *hns = eth_dev->data->dev_private; 6202 struct hns3_hw *hw = &hns->hw; 6203 6204 PMD_INIT_FUNC_TRACE(); 6205 6206 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6207 return -EPERM; 6208 6209 if (hw->adapter_state < HNS3_NIC_CLOSING) 6210 hns3_dev_close(eth_dev); 6211 6212 hw->adapter_state = HNS3_NIC_REMOVED; 6213 return 0; 6214 } 6215 6216 static int 6217 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6218 struct rte_pci_device *pci_dev) 6219 { 6220 return rte_eth_dev_pci_generic_probe(pci_dev, 6221 sizeof(struct hns3_adapter), 6222 hns3_dev_init); 6223 } 6224 6225 static int 6226 eth_hns3_pci_remove(struct rte_pci_device *pci_dev) 6227 { 6228 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); 6229 } 6230 6231 static const struct rte_pci_id pci_id_hns3_map[] = { 6232 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, 6233 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, 6234 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, 6235 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, 6236 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, 6237 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, 6238 { .vendor_id = 0, /* sentinel */ }, 6239 }; 6240 6241 static struct rte_pci_driver rte_hns3_pmd = { 6242 .id_table = pci_id_hns3_map, 6243 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 6244 .probe = eth_hns3_pci_probe, 6245 .remove = eth_hns3_pci_remove, 6246 }; 6247 6248 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); 6249 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); 6250 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); 6251 RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); 6252 RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE); 6253