1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <ethdev_pci.h> 6 #include <rte_io.h> 7 8 #include "hns3_common.h" 9 #include "hns3_regs.h" 10 #include "hns3_intr.h" 11 #include "hns3_logs.h" 12 13 static int 14 hns3_ring_space(struct hns3_cmq_ring *ring) 15 { 16 int ntu = ring->next_to_use; 17 int ntc = ring->next_to_clean; 18 int used = (ntu - ntc + ring->desc_num) % ring->desc_num; 19 20 return ring->desc_num - used - 1; 21 } 22 23 static bool 24 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head) 25 { 26 int ntu = ring->next_to_use; 27 int ntc = ring->next_to_clean; 28 29 if (ntu > ntc) 30 return head >= ntc && head <= ntu; 31 32 return head >= ntc || head <= ntu; 33 } 34 35 /* 36 * hns3_allocate_dma_mem - Specific memory alloc for command function. 37 * Malloc a memzone, which is a contiguous portion of physical memory identified 38 * by a name. 39 * @ring: pointer to the ring structure 40 * @size: size of memory requested 41 * @alignment: what to align the allocation to 42 */ 43 static int 44 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring, 45 uint64_t size, uint32_t alignment) 46 { 47 static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id; 48 const struct rte_memzone *mz = NULL; 49 char z_name[RTE_MEMZONE_NAMESIZE]; 50 51 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, 52 rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed)); 53 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 54 RTE_MEMZONE_IOVA_CONTIG, alignment, 55 RTE_PGSIZE_2M); 56 if (mz == NULL) 57 return -ENOMEM; 58 59 ring->buf_size = size; 60 ring->desc = mz->addr; 61 ring->desc_dma_addr = mz->iova; 62 ring->zone = (const void *)mz; 63 hns3_dbg(hw, "cmd ring memzone name: %s", mz->name); 64 65 return 0; 66 } 67 68 static void 69 hns3_free_dma_mem(struct hns3_cmq_ring *ring) 70 { 71 rte_memzone_free((const struct rte_memzone *)ring->zone); 72 ring->buf_size = 0; 73 ring->desc = NULL; 74 ring->desc_dma_addr = 0; 75 ring->zone = NULL; 76 } 77 78 static int 79 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 80 { 81 int size = ring->desc_num * sizeof(struct hns3_cmd_desc); 82 83 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) { 84 hns3_err(hw, "allocate dma mem failed"); 85 return -ENOMEM; 86 } 87 88 return 0; 89 } 90 91 static void 92 hns3_free_cmd_desc(__rte_unused struct hns3_hw *hw, struct hns3_cmq_ring *ring) 93 { 94 if (ring->desc) 95 hns3_free_dma_mem(ring); 96 } 97 98 static int 99 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type) 100 { 101 struct hns3_cmq_ring *ring = 102 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; 103 int ret; 104 105 ring->ring_type = ring_type; 106 ring->hw = hw; 107 108 ret = hns3_alloc_cmd_desc(hw, ring); 109 if (ret) 110 hns3_err(hw, "descriptor %s alloc error %d", 111 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret); 112 113 return ret; 114 } 115 116 void 117 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read) 118 { 119 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); 120 if (is_read) 121 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); 122 else 123 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR); 124 } 125 126 void 127 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc, 128 enum hns3_opcode_type opcode, bool is_read) 129 { 130 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc)); 131 desc->opcode = rte_cpu_to_le_16(opcode); 132 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); 133 134 if (is_read) 135 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); 136 } 137 138 static void 139 hns3_cmd_clear_regs(struct hns3_hw *hw) 140 { 141 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0); 142 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0); 143 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0); 144 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0); 145 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0); 146 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0); 147 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0); 148 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0); 149 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0); 150 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0); 151 } 152 153 static void 154 hns3_cmd_config_regs(struct hns3_cmq_ring *ring) 155 { 156 uint64_t dma = ring->desc_dma_addr; 157 158 if (ring->ring_type == HNS3_TYPE_CSQ) { 159 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG, 160 lower_32_bits(dma)); 161 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG, 162 upper_32_bits(dma)); 163 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG, 164 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S | 165 HNS3_NIC_SW_RST_RDY); 166 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0); 167 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0); 168 } else { 169 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG, 170 lower_32_bits(dma)); 171 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG, 172 upper_32_bits(dma)); 173 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG, 174 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S); 175 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0); 176 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0); 177 } 178 } 179 180 static void 181 hns3_cmd_init_regs(struct hns3_hw *hw) 182 { 183 hns3_cmd_config_regs(&hw->cmq.csq); 184 hns3_cmd_config_regs(&hw->cmq.crq); 185 } 186 187 static int 188 hns3_cmd_csq_clean(struct hns3_hw *hw) 189 { 190 struct hns3_cmq_ring *csq = &hw->cmq.csq; 191 uint32_t head; 192 uint32_t addr; 193 int clean; 194 195 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); 196 addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG); 197 if (!is_valid_csq_clean_head(csq, head) || addr == 0) { 198 hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head, 199 csq->next_to_use, csq->next_to_clean); 200 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 201 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 202 rte_memory_order_relaxed); 203 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); 204 } 205 206 return -EIO; 207 } 208 209 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; 210 csq->next_to_clean = head; 211 return clean; 212 } 213 214 static int 215 hns3_cmd_csq_done(struct hns3_hw *hw) 216 { 217 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); 218 219 return head == hw->cmq.csq.next_to_use; 220 } 221 222 static bool 223 hns3_is_special_opcode(uint16_t opcode) 224 { 225 /* 226 * These commands have several descriptors, 227 * and use the first one to save opcode and return value. 228 */ 229 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT, 230 HNS3_OPC_STATS_32_BIT, 231 HNS3_OPC_STATS_MAC, 232 HNS3_OPC_STATS_MAC_ALL, 233 HNS3_OPC_QUERY_32_BIT_REG, 234 HNS3_OPC_QUERY_64_BIT_REG, 235 HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT, 236 HNS3_OPC_QUERY_CLEAR_PF_RAS_INT, 237 HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT, 238 HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT, 239 HNS3_OPC_QUERY_ALL_ERR_INFO,}; 240 uint32_t i; 241 242 for (i = 0; i < RTE_DIM(spec_opcode); i++) 243 if (spec_opcode[i] == opcode) 244 return true; 245 246 return false; 247 } 248 249 static int 250 hns3_cmd_convert_err_code(uint16_t desc_ret) 251 { 252 static const struct { 253 uint16_t imp_errcode; 254 int linux_errcode; 255 } hns3_cmdq_status[] = { 256 {HNS3_CMD_EXEC_SUCCESS, 0}, 257 {HNS3_CMD_NO_AUTH, -EPERM}, 258 {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP}, 259 {HNS3_CMD_QUEUE_FULL, -EXFULL}, 260 {HNS3_CMD_NEXT_ERR, -ENOSR}, 261 {HNS3_CMD_UNEXE_ERR, -ENOTBLK}, 262 {HNS3_CMD_PARA_ERR, -EINVAL}, 263 {HNS3_CMD_RESULT_ERR, -ERANGE}, 264 {HNS3_CMD_TIMEOUT, -ETIME}, 265 {HNS3_CMD_HILINK_ERR, -ENOLINK}, 266 {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO}, 267 {HNS3_CMD_INVALID, -EBADR}, 268 {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL} 269 }; 270 271 uint32_t i; 272 273 for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++) 274 if (hns3_cmdq_status[i].imp_errcode == desc_ret) 275 return hns3_cmdq_status[i].linux_errcode; 276 277 return -EREMOTEIO; 278 } 279 280 static int 281 hns3_cmd_get_hardware_reply(struct hns3_hw *hw, 282 struct hns3_cmd_desc *desc, int num, int ntc) 283 { 284 uint16_t opcode, desc_ret; 285 int current_ntc = ntc; 286 int handle; 287 288 opcode = rte_le_to_cpu_16(desc[0].opcode); 289 for (handle = 0; handle < num; handle++) { 290 /* Get the result of hardware write back */ 291 desc[handle] = hw->cmq.csq.desc[current_ntc]; 292 293 current_ntc++; 294 if (current_ntc == hw->cmq.csq.desc_num) 295 current_ntc = 0; 296 } 297 298 if (likely(!hns3_is_special_opcode(opcode))) 299 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval); 300 else 301 desc_ret = rte_le_to_cpu_16(desc[0].retval); 302 303 hw->cmq.last_status = desc_ret; 304 return hns3_cmd_convert_err_code(desc_ret); 305 } 306 307 static int hns3_cmd_poll_reply(struct hns3_hw *hw) 308 { 309 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 310 uint32_t timeout = 0; 311 312 do { 313 if (hns3_cmd_csq_done(hw)) 314 return 0; 315 316 if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) { 317 hns3_err(hw, 318 "Don't wait for reply because of disable_cmd"); 319 return -EBUSY; 320 } 321 322 if (is_reset_pending(hns)) { 323 hns3_err(hw, "Don't wait for reply because of reset pending"); 324 return -EIO; 325 } 326 327 rte_delay_us(1); 328 timeout++; 329 } while (timeout < hw->cmq.tx_timeout); 330 hns3_err(hw, "Wait for reply timeout"); 331 return -ETIME; 332 } 333 334 /* 335 * hns3_cmd_send - send command to command queue 336 * 337 * @param hw 338 * pointer to the hw struct 339 * @param desc 340 * prefilled descriptor for describing the command 341 * @param num 342 * the number of descriptors to be sent 343 * @return 344 * - -EBUSY if detect device is in resetting 345 * - -EIO if detect cmd csq corrupted (due to reset) or 346 * there is reset pending 347 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case 348 * - Zero if operation completed successfully 349 * 350 * Note -BUSY/-EIO only used in reset case 351 * 352 * Note this is the main send command for command queue, it 353 * sends the queue, cleans the queue, etc 354 */ 355 int 356 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) 357 { 358 struct hns3_cmd_desc *desc_to_use; 359 int handle = 0; 360 int retval; 361 uint32_t ntc; 362 363 if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) 364 return -EBUSY; 365 366 rte_spinlock_lock(&hw->cmq.csq.lock); 367 368 /* Clean the command send queue */ 369 retval = hns3_cmd_csq_clean(hw); 370 if (retval < 0) { 371 rte_spinlock_unlock(&hw->cmq.csq.lock); 372 return retval; 373 } 374 375 if (num > hns3_ring_space(&hw->cmq.csq)) { 376 rte_spinlock_unlock(&hw->cmq.csq.lock); 377 return -ENOMEM; 378 } 379 380 /* 381 * Record the location of desc in the ring for this time 382 * which will be use for hardware to write back 383 */ 384 ntc = hw->cmq.csq.next_to_use; 385 386 while (handle < num) { 387 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; 388 *desc_to_use = desc[handle]; 389 (hw->cmq.csq.next_to_use)++; 390 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) 391 hw->cmq.csq.next_to_use = 0; 392 handle++; 393 } 394 395 /* Write to hardware */ 396 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use); 397 398 /* 399 * If the command is sync, wait for the firmware to write back, 400 * if multi descriptors to be sent, use the first one to check. 401 */ 402 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) { 403 retval = hns3_cmd_poll_reply(hw); 404 if (!retval) 405 retval = hns3_cmd_get_hardware_reply(hw, desc, num, 406 ntc); 407 } 408 409 rte_spinlock_unlock(&hw->cmq.csq.lock); 410 return retval; 411 } 412 413 static const char * 414 hns3_get_caps_name(uint32_t caps_id) 415 { 416 const struct { 417 enum HNS3_CAPS_BITS caps; 418 const char *name; 419 } dev_caps[] = { 420 { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" }, 421 { HNS3_CAPS_PTP_B, "ptp" }, 422 { HNS3_CAPS_SIMPLE_BD_B, "simple_bd" }, 423 { HNS3_CAPS_TX_PUSH_B, "tx_push" }, 424 { HNS3_CAPS_PHY_IMP_B, "phy_imp" }, 425 { HNS3_CAPS_TQP_TXRX_INDEP_B, "tqp_txrx_indep" }, 426 { HNS3_CAPS_HW_PAD_B, "hw_pad" }, 427 { HNS3_CAPS_STASH_B, "stash" }, 428 { HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" }, 429 { HNS3_CAPS_RAS_IMP_B, "ras_imp" }, 430 { HNS3_CAPS_RXD_ADV_LAYOUT_B, "rxd_adv_layout" }, 431 { HNS3_CAPS_TM_B, "tm_capability" }, 432 { HNS3_CAPS_FC_AUTO_B, "fc_autoneg" } 433 }; 434 uint32_t i; 435 436 for (i = 0; i < RTE_DIM(dev_caps); i++) { 437 if (dev_caps[i].caps == caps_id) 438 return dev_caps[i].name; 439 } 440 441 return "unknown"; 442 } 443 444 static void 445 hns3_mask_capability(struct hns3_hw *hw, 446 struct hns3_query_version_cmd *cmd) 447 { 448 #define MAX_CAPS_BIT 64 449 450 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 451 uint64_t caps_org, caps_new, caps_masked; 452 uint32_t i; 453 454 if (hns->dev_caps_mask == 0) 455 return; 456 457 memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org)); 458 caps_org = rte_le_to_cpu_64(caps_org); 459 caps_new = caps_org ^ (caps_org & hns->dev_caps_mask); 460 caps_masked = caps_org ^ caps_new; 461 caps_new = rte_cpu_to_le_64(caps_new); 462 memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new)); 463 464 for (i = 0; i < MAX_CAPS_BIT; i++) { 465 if (!(caps_masked & BIT_ULL(i))) 466 continue; 467 hns3_info(hw, "mask capability: id-%u, name-%s.", 468 i, hns3_get_caps_name(i)); 469 } 470 } 471 472 static void 473 hns3_parse_capability(struct hns3_hw *hw, 474 struct hns3_query_version_cmd *cmd) 475 { 476 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]); 477 478 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B)) 479 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B, 480 1); 481 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) { 482 /* 483 * PTP depends on special packet type reported by hardware which 484 * enabled rxd advanced layout, so if the hardware doesn't 485 * support rxd advanced layout, driver should ignore the PTP 486 * capability. 487 */ 488 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B)) 489 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1); 490 else 491 hns3_warn(hw, "ignore PTP capability due to lack of " 492 "rxd advanced layout capability."); 493 } 494 if (hns3_get_bit(caps, HNS3_CAPS_SIMPLE_BD_B)) 495 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_SIMPLE_BD_B, 1); 496 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B)) 497 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1); 498 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B)) 499 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1); 500 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B)) 501 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1); 502 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B)) 503 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1); 504 if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B)) 505 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, 506 1); 507 if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B)) 508 hns3_set_bit(hw->capability, 509 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1); 510 if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B)) 511 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1); 512 if (hns3_get_bit(caps, HNS3_CAPS_TM_B)) 513 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1); 514 if (hns3_get_bit(caps, HNS3_CAPS_FC_AUTO_B)) 515 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FC_AUTO_B, 1); 516 if (hns3_get_bit(caps, HNS3_CAPS_GRO_B)) 517 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); 518 } 519 520 static uint32_t 521 hns3_build_api_caps(void) 522 { 523 uint32_t api_caps = 0; 524 525 hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1); 526 527 return rte_cpu_to_le_32(api_caps); 528 } 529 530 static void 531 hns3_set_dcb_capability(struct hns3_hw *hw) 532 { 533 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 534 struct rte_pci_device *pci_dev; 535 struct rte_eth_dev *eth_dev; 536 uint16_t device_id; 537 538 if (hns->is_vf) 539 return; 540 541 eth_dev = &rte_eth_devices[hw->data->port_id]; 542 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 543 device_id = pci_dev->id.device_id; 544 545 if (device_id == HNS3_DEV_ID_25GE_RDMA || 546 device_id == HNS3_DEV_ID_50GE_RDMA || 547 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || 548 device_id == HNS3_DEV_ID_200G_RDMA) 549 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); 550 } 551 552 static void 553 hns3_set_default_capability(struct hns3_hw *hw) 554 { 555 hns3_set_dcb_capability(hw); 556 557 /* 558 * The firmware of the network engines with HIP08 do not report some 559 * capabilities, like GRO. Set default capabilities for it. 560 */ 561 if (hw->revision < PCI_REVISION_ID_HIP09_A) 562 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); 563 } 564 565 static int 566 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) 567 { 568 struct hns3_query_version_cmd *resp; 569 struct hns3_cmd_desc desc; 570 int ret; 571 572 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1); 573 resp = (struct hns3_query_version_cmd *)desc.data; 574 resp->api_caps = hns3_build_api_caps(); 575 576 /* Initialize the cmd function */ 577 ret = hns3_cmd_send(hw, &desc, 1); 578 if (ret) 579 return ret; 580 581 hw->fw_version = rte_le_to_cpu_32(resp->firmware); 582 583 hns3_set_default_capability(hw); 584 585 /* 586 * Make sure mask the capability before parse capability because it 587 * may overwrite resp's data. 588 */ 589 hns3_mask_capability(hw, resp); 590 hns3_parse_capability(hw, resp); 591 592 return 0; 593 } 594 595 int 596 hns3_cmd_init_queue(struct hns3_hw *hw) 597 { 598 int ret; 599 600 /* Setup the lock for command queue */ 601 rte_spinlock_init(&hw->cmq.csq.lock); 602 rte_spinlock_init(&hw->cmq.crq.lock); 603 604 /* 605 * Clear up all command register, 606 * in case there are some residual values 607 */ 608 hns3_cmd_clear_regs(hw); 609 610 /* Setup the queue entries for use cmd queue */ 611 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM; 612 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM; 613 614 /* Setup Tx write back timeout */ 615 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT; 616 617 /* Setup queue rings */ 618 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ); 619 if (ret) { 620 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret); 621 return ret; 622 } 623 624 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ); 625 if (ret) { 626 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret); 627 goto err_crq; 628 } 629 630 return 0; 631 632 err_crq: 633 hns3_free_cmd_desc(hw, &hw->cmq.csq); 634 635 return ret; 636 } 637 638 static void 639 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result) 640 { 641 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 642 643 if (hw->adapter_state != HNS3_NIC_UNINITIALIZED) 644 return; 645 646 if (fw_compact_cmd_result != 0) { 647 /* 648 * If fw_compact_cmd_result is not zero, it means firmware don't 649 * support link status change interrupt. 650 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver 651 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear 652 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware 653 * don't support link status change interrupt. 654 */ 655 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 656 } 657 } 658 659 static void 660 hns3_set_fc_autoneg_cap(struct hns3_adapter *hns, int fw_compact_cmd_result) 661 { 662 struct hns3_hw *hw = &hns->hw; 663 struct hns3_mac *mac = &hw->mac; 664 665 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) { 666 hns->pf.support_fc_autoneg = true; 667 return; 668 } 669 670 /* 671 * Flow control auto-negotiation requires the cooperation of the driver 672 * and firmware. 673 */ 674 hns->pf.support_fc_autoneg = (hns3_dev_get_support(hw, FC_AUTO) && 675 fw_compact_cmd_result == 0) ? 676 true : false; 677 } 678 679 static int 680 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result) 681 { 682 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 683 684 if (result != 0 && hns3_dev_get_support(hw, COPPER)) { 685 hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.", 686 result); 687 return result; 688 } 689 690 hns3_update_dev_lsc_cap(hw, result); 691 hns3_set_fc_autoneg_cap(hns, result); 692 693 return 0; 694 } 695 696 static int 697 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) 698 { 699 struct hns3_firmware_compat_cmd *req; 700 struct hns3_cmd_desc desc; 701 uint32_t compat = 0; 702 703 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); 704 req = (struct hns3_firmware_compat_cmd *)desc.data; 705 706 if (is_init) { 707 hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); 708 hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); 709 hns3_set_bit(compat, HNS3_LLRS_FEC_EN_B, 1); 710 if (hns3_dev_get_support(hw, COPPER)) 711 hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); 712 if (hns3_dev_get_support(hw, FC_AUTO)) 713 hns3_set_bit(compat, HNS3_MAC_FC_AUTONEG_EN_B, 1); 714 } 715 req->compat = rte_cpu_to_le_32(compat); 716 717 return hns3_cmd_send(hw, &desc, 1); 718 } 719 720 int 721 hns3_cmd_init(struct hns3_hw *hw) 722 { 723 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 724 uint32_t version; 725 int ret; 726 727 rte_spinlock_lock(&hw->cmq.csq.lock); 728 rte_spinlock_lock(&hw->cmq.crq.lock); 729 730 hw->cmq.csq.next_to_clean = 0; 731 hw->cmq.csq.next_to_use = 0; 732 hw->cmq.crq.next_to_clean = 0; 733 hw->cmq.crq.next_to_use = 0; 734 hns3_cmd_init_regs(hw); 735 736 rte_spinlock_unlock(&hw->cmq.crq.lock); 737 rte_spinlock_unlock(&hw->cmq.csq.lock); 738 739 /* 740 * Check if there is new reset pending, because the higher level 741 * reset may happen when lower level reset is being processed. 742 */ 743 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) { 744 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd"); 745 ret = -EBUSY; 746 goto err_cmd_init; 747 } 748 rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed); 749 750 ret = hns3_cmd_query_firmware_version_and_capability(hw); 751 if (ret) { 752 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret); 753 goto err_cmd_init; 754 } 755 756 version = hw->fw_version; 757 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu", 758 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 759 HNS3_FW_VERSION_BYTE3_S), 760 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 761 HNS3_FW_VERSION_BYTE2_S), 762 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 763 HNS3_FW_VERSION_BYTE1_S), 764 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 765 HNS3_FW_VERSION_BYTE0_S)); 766 767 if (hns->is_vf) 768 return 0; 769 770 /* 771 * Requiring firmware to enable some features, fiber port can still 772 * work without it, but copper port can't work because the firmware 773 * fails to take over the PHY. 774 */ 775 ret = hns3_firmware_compat_config(hw, true); 776 if (ret) 777 PMD_INIT_LOG(WARNING, "firmware compatible features not " 778 "supported, ret = %d.", ret); 779 780 /* 781 * Perform some corresponding operations based on the firmware 782 * compatibility configuration result. 783 */ 784 ret = hns3_apply_fw_compat_cmd_result(hw, ret); 785 if (ret) 786 goto err_cmd_init; 787 788 return 0; 789 790 err_cmd_init: 791 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 792 return ret; 793 } 794 795 static void 796 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 797 { 798 rte_spinlock_lock(&ring->lock); 799 800 hns3_free_cmd_desc(hw, ring); 801 802 rte_spinlock_unlock(&ring->lock); 803 } 804 805 void 806 hns3_cmd_destroy_queue(struct hns3_hw *hw) 807 { 808 hns3_destroy_queue(hw, &hw->cmq.csq); 809 hns3_destroy_queue(hw, &hw->cmq.crq); 810 } 811 812 void 813 hns3_cmd_uninit(struct hns3_hw *hw) 814 { 815 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 816 817 if (!hns->is_vf) 818 (void)hns3_firmware_compat_config(hw, false); 819 820 rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); 821 822 /* 823 * A delay is added to ensure that the register cleanup operations 824 * will not be performed concurrently with the firmware command and 825 * ensure that all the reserved commands are executed. 826 * Concurrency may occur in two scenarios: asynchronous command and 827 * timeout command. If the command fails to be executed due to busy 828 * scheduling, the command will be processed in the next scheduling 829 * of the firmware. 830 */ 831 rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME); 832 833 rte_spinlock_lock(&hw->cmq.csq.lock); 834 rte_spinlock_lock(&hw->cmq.crq.lock); 835 hns3_cmd_clear_regs(hw); 836 rte_spinlock_unlock(&hw->cmq.crq.lock); 837 rte_spinlock_unlock(&hw->cmq.csq.lock); 838 } 839