1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Hisilicon Limited. 3 */ 4 5 #include <errno.h> 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <sys/queue.h> 11 #include <inttypes.h> 12 #include <unistd.h> 13 #include <rte_bus_pci.h> 14 #include <rte_common.h> 15 #include <rte_cycles.h> 16 #include <rte_dev.h> 17 #include <rte_eal.h> 18 #include <rte_ether.h> 19 #include <rte_ethdev_driver.h> 20 #include <rte_ethdev_pci.h> 21 #include <rte_io.h> 22 23 #include "hns3_ethdev.h" 24 #include "hns3_regs.h" 25 #include "hns3_intr.h" 26 #include "hns3_logs.h" 27 28 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ) 29 30 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) 31 32 static int 33 hns3_ring_space(struct hns3_cmq_ring *ring) 34 { 35 int ntu = ring->next_to_use; 36 int ntc = ring->next_to_clean; 37 int used = (ntu - ntc + ring->desc_num) % ring->desc_num; 38 39 return ring->desc_num - used - 1; 40 } 41 42 static bool 43 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head) 44 { 45 int ntu = ring->next_to_use; 46 int ntc = ring->next_to_clean; 47 48 if (ntu > ntc) 49 return head >= ntc && head <= ntu; 50 51 return head >= ntc || head <= ntu; 52 } 53 54 /* 55 * hns3_allocate_dma_mem - Specific memory alloc for command function. 56 * Malloc a memzone, which is a contiguous portion of physical memory identified 57 * by a name. 58 * @ring: pointer to the ring structure 59 * @size: size of memory requested 60 * @alignment: what to align the allocation to 61 */ 62 static int 63 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring, 64 uint64_t size, uint32_t alignment) 65 { 66 const struct rte_memzone *mz = NULL; 67 char z_name[RTE_MEMZONE_NAMESIZE]; 68 69 snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand()); 70 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 71 RTE_MEMZONE_IOVA_CONTIG, alignment, 72 RTE_PGSIZE_2M); 73 if (mz == NULL) 74 return -ENOMEM; 75 76 ring->buf_size = size; 77 ring->desc = mz->addr; 78 ring->desc_dma_addr = mz->iova; 79 ring->zone = (const void *)mz; 80 hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64, 81 mz->name, ring->desc_dma_addr); 82 83 return 0; 84 } 85 86 static void 87 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 88 { 89 hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64, 90 ((const struct rte_memzone *)ring->zone)->name, 91 ring->desc_dma_addr); 92 rte_memzone_free((const struct rte_memzone *)ring->zone); 93 ring->buf_size = 0; 94 ring->desc = NULL; 95 ring->desc_dma_addr = 0; 96 ring->zone = NULL; 97 } 98 99 static int 100 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 101 { 102 int size = ring->desc_num * sizeof(struct hns3_cmd_desc); 103 104 if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) { 105 hns3_err(hw, "allocate dma mem failed"); 106 return -ENOMEM; 107 } 108 109 return 0; 110 } 111 112 static void 113 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 114 { 115 if (ring->desc) 116 hns3_free_dma_mem(hw, ring); 117 } 118 119 static int 120 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type) 121 { 122 struct hns3_cmq_ring *ring = 123 (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; 124 int ret; 125 126 ring->ring_type = ring_type; 127 ring->hw = hw; 128 129 ret = hns3_alloc_cmd_desc(hw, ring); 130 if (ret) 131 hns3_err(hw, "descriptor %s alloc error %d", 132 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret); 133 134 return ret; 135 } 136 137 void 138 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read) 139 { 140 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); 141 if (is_read) 142 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); 143 else 144 desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR); 145 } 146 147 void 148 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc, 149 enum hns3_opcode_type opcode, bool is_read) 150 { 151 memset((void *)desc, 0, sizeof(struct hns3_cmd_desc)); 152 desc->opcode = rte_cpu_to_le_16(opcode); 153 desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); 154 155 if (is_read) 156 desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); 157 } 158 159 static void 160 hns3_cmd_clear_regs(struct hns3_hw *hw) 161 { 162 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0); 163 hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0); 164 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0); 165 hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0); 166 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0); 167 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0); 168 hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0); 169 hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0); 170 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0); 171 hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0); 172 } 173 174 static void 175 hns3_cmd_config_regs(struct hns3_cmq_ring *ring) 176 { 177 uint64_t dma = ring->desc_dma_addr; 178 179 if (ring->ring_type == HNS3_TYPE_CSQ) { 180 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG, 181 lower_32_bits(dma)); 182 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG, 183 upper_32_bits(dma)); 184 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG, 185 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S | 186 HNS3_NIC_SW_RST_RDY); 187 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0); 188 hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0); 189 } else { 190 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG, 191 lower_32_bits(dma)); 192 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG, 193 upper_32_bits(dma)); 194 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG, 195 ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S); 196 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0); 197 hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0); 198 } 199 } 200 201 static void 202 hns3_cmd_init_regs(struct hns3_hw *hw) 203 { 204 hns3_cmd_config_regs(&hw->cmq.csq); 205 hns3_cmd_config_regs(&hw->cmq.crq); 206 } 207 208 static int 209 hns3_cmd_csq_clean(struct hns3_hw *hw) 210 { 211 struct hns3_cmq_ring *csq = &hw->cmq.csq; 212 uint32_t head; 213 int clean; 214 215 head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); 216 217 if (!is_valid_csq_clean_head(csq, head)) { 218 hns3_err(hw, "wrong cmd head (%u, %u-%u)", head, 219 csq->next_to_use, csq->next_to_clean); 220 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 221 rte_atomic16_set(&hw->reset.disable_cmd, 1); 222 hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); 223 } 224 225 return -EIO; 226 } 227 228 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; 229 csq->next_to_clean = head; 230 return clean; 231 } 232 233 static int 234 hns3_cmd_csq_done(struct hns3_hw *hw) 235 { 236 uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); 237 238 return head == hw->cmq.csq.next_to_use; 239 } 240 241 static bool 242 hns3_is_special_opcode(uint16_t opcode) 243 { 244 /* 245 * These commands have several descriptors, 246 * and use the first one to save opcode and return value. 247 */ 248 uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT, 249 HNS3_OPC_STATS_32_BIT, 250 HNS3_OPC_STATS_MAC, 251 HNS3_OPC_STATS_MAC_ALL, 252 HNS3_OPC_QUERY_32_BIT_REG, 253 HNS3_OPC_QUERY_64_BIT_REG}; 254 uint32_t i; 255 256 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) 257 if (spec_opcode[i] == opcode) 258 return true; 259 260 return false; 261 } 262 263 static int 264 hns3_cmd_convert_err_code(uint16_t desc_ret) 265 { 266 switch (desc_ret) { 267 case HNS3_CMD_EXEC_SUCCESS: 268 return 0; 269 case HNS3_CMD_NO_AUTH: 270 return -EPERM; 271 case HNS3_CMD_NOT_SUPPORTED: 272 return -EOPNOTSUPP; 273 case HNS3_CMD_QUEUE_FULL: 274 return -EXFULL; 275 case HNS3_CMD_NEXT_ERR: 276 return -ENOSR; 277 case HNS3_CMD_UNEXE_ERR: 278 return -ENOTBLK; 279 case HNS3_CMD_PARA_ERR: 280 return -EINVAL; 281 case HNS3_CMD_RESULT_ERR: 282 return -ERANGE; 283 case HNS3_CMD_TIMEOUT: 284 return -ETIME; 285 case HNS3_CMD_HILINK_ERR: 286 return -ENOLINK; 287 case HNS3_CMD_QUEUE_ILLEGAL: 288 return -ENXIO; 289 case HNS3_CMD_INVALID: 290 return -EBADR; 291 default: 292 return -EREMOTEIO; 293 } 294 } 295 296 static int 297 hns3_cmd_get_hardware_reply(struct hns3_hw *hw, 298 struct hns3_cmd_desc *desc, int num, int ntc) 299 { 300 uint16_t opcode, desc_ret; 301 int current_ntc = ntc; 302 int handle; 303 304 opcode = rte_le_to_cpu_16(desc[0].opcode); 305 for (handle = 0; handle < num; handle++) { 306 /* Get the result of hardware write back */ 307 desc[handle] = hw->cmq.csq.desc[current_ntc]; 308 309 current_ntc++; 310 if (current_ntc == hw->cmq.csq.desc_num) 311 current_ntc = 0; 312 } 313 314 if (likely(!hns3_is_special_opcode(opcode))) 315 desc_ret = rte_le_to_cpu_16(desc[num - 1].retval); 316 else 317 desc_ret = rte_le_to_cpu_16(desc[0].retval); 318 319 hw->cmq.last_status = desc_ret; 320 return hns3_cmd_convert_err_code(desc_ret); 321 } 322 323 static int hns3_cmd_poll_reply(struct hns3_hw *hw) 324 { 325 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 326 uint32_t timeout = 0; 327 328 do { 329 if (hns3_cmd_csq_done(hw)) 330 return 0; 331 332 if (rte_atomic16_read(&hw->reset.disable_cmd)) { 333 hns3_err(hw, 334 "Don't wait for reply because of disable_cmd"); 335 return -EBUSY; 336 } 337 338 if (is_reset_pending(hns)) { 339 hns3_err(hw, "Don't wait for reply because of reset pending"); 340 return -EIO; 341 } 342 343 rte_delay_us(1); 344 timeout++; 345 } while (timeout < hw->cmq.tx_timeout); 346 hns3_err(hw, "Wait for reply timeout"); 347 return -ETIME; 348 } 349 350 /* 351 * hns3_cmd_send - send command to command queue 352 * 353 * @param hw 354 * pointer to the hw struct 355 * @param desc 356 * prefilled descriptor for describing the command 357 * @param num 358 * the number of descriptors to be sent 359 * @return 360 * - -EBUSY if detect device is in resetting 361 * - -EIO if detect cmd csq corrupted (due to reset) or 362 * there is reset pending 363 * - -ENOMEM/-ETIME/...(Non-Zero) if other error case 364 * - Zero if operation completed successfully 365 * 366 * Note -BUSY/-EIO only used in reset case 367 * 368 * Note this is the main send command for command queue, it 369 * sends the queue, cleans the queue, etc 370 */ 371 int 372 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) 373 { 374 struct hns3_cmd_desc *desc_to_use; 375 int handle = 0; 376 int retval; 377 uint32_t ntc; 378 379 if (rte_atomic16_read(&hw->reset.disable_cmd)) 380 return -EBUSY; 381 382 rte_spinlock_lock(&hw->cmq.csq.lock); 383 384 /* Clean the command send queue */ 385 retval = hns3_cmd_csq_clean(hw); 386 if (retval < 0) { 387 rte_spinlock_unlock(&hw->cmq.csq.lock); 388 return retval; 389 } 390 391 if (num > hns3_ring_space(&hw->cmq.csq)) { 392 rte_spinlock_unlock(&hw->cmq.csq.lock); 393 return -ENOMEM; 394 } 395 396 /* 397 * Record the location of desc in the ring for this time 398 * which will be use for hardware to write back 399 */ 400 ntc = hw->cmq.csq.next_to_use; 401 402 while (handle < num) { 403 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; 404 *desc_to_use = desc[handle]; 405 (hw->cmq.csq.next_to_use)++; 406 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) 407 hw->cmq.csq.next_to_use = 0; 408 handle++; 409 } 410 411 /* Write to hardware */ 412 hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use); 413 414 /* 415 * If the command is sync, wait for the firmware to write back, 416 * if multi descriptors to be sent, use the first one to check. 417 */ 418 if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) { 419 retval = hns3_cmd_poll_reply(hw); 420 if (!retval) 421 retval = hns3_cmd_get_hardware_reply(hw, desc, num, 422 ntc); 423 } 424 425 rte_spinlock_unlock(&hw->cmq.csq.lock); 426 return retval; 427 } 428 429 static void hns3_parse_capability(struct hns3_hw *hw, 430 struct hns3_query_version_cmd *cmd) 431 { 432 uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]); 433 434 if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B)) 435 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1); 436 if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B)) 437 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B, 438 1); 439 if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) 440 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1); 441 if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B)) 442 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1); 443 if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B)) 444 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1); 445 if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B)) 446 hns3_set_bit(hw->capability, HNS3_CAPS_TQP_TXRX_INDEP_B, 1); 447 if (hns3_get_bit(caps, HNS3_CAPS_STASH_B)) 448 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1); 449 } 450 451 static enum hns3_cmd_status 452 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) 453 { 454 struct hns3_query_version_cmd *resp; 455 struct hns3_cmd_desc desc; 456 int ret; 457 458 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1); 459 resp = (struct hns3_query_version_cmd *)desc.data; 460 461 /* Initialize the cmd function */ 462 ret = hns3_cmd_send(hw, &desc, 1); 463 if (ret) 464 return ret; 465 466 hw->fw_version = rte_le_to_cpu_32(resp->firmware); 467 hns3_parse_capability(hw, resp); 468 469 return 0; 470 } 471 472 int 473 hns3_cmd_init_queue(struct hns3_hw *hw) 474 { 475 int ret; 476 477 /* Setup the lock for command queue */ 478 rte_spinlock_init(&hw->cmq.csq.lock); 479 rte_spinlock_init(&hw->cmq.crq.lock); 480 481 /* 482 * Clear up all command register, 483 * in case there are some residual values 484 */ 485 hns3_cmd_clear_regs(hw); 486 487 /* Setup the queue entries for use cmd queue */ 488 hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM; 489 hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM; 490 491 /* Setup Tx write back timeout */ 492 hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT; 493 494 /* Setup queue rings */ 495 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ); 496 if (ret) { 497 PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret); 498 return ret; 499 } 500 501 ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ); 502 if (ret) { 503 PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret); 504 goto err_crq; 505 } 506 507 return 0; 508 509 err_crq: 510 hns3_free_cmd_desc(hw, &hw->cmq.csq); 511 512 return ret; 513 } 514 515 int 516 hns3_cmd_init(struct hns3_hw *hw) 517 { 518 uint32_t version; 519 int ret; 520 521 rte_spinlock_lock(&hw->cmq.csq.lock); 522 rte_spinlock_lock(&hw->cmq.crq.lock); 523 524 hw->cmq.csq.next_to_clean = 0; 525 hw->cmq.csq.next_to_use = 0; 526 hw->cmq.crq.next_to_clean = 0; 527 hw->cmq.crq.next_to_use = 0; 528 hw->mbx_resp.head = 0; 529 hw->mbx_resp.tail = 0; 530 hw->mbx_resp.lost = 0; 531 hns3_cmd_init_regs(hw); 532 533 rte_spinlock_unlock(&hw->cmq.crq.lock); 534 rte_spinlock_unlock(&hw->cmq.csq.lock); 535 536 /* 537 * Check if there is new reset pending, because the higher level 538 * reset may happen when lower level reset is being processed. 539 */ 540 if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) { 541 PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd"); 542 ret = -EBUSY; 543 goto err_cmd_init; 544 } 545 rte_atomic16_clear(&hw->reset.disable_cmd); 546 547 ret = hns3_cmd_query_firmware_version_and_capability(hw); 548 if (ret) { 549 PMD_INIT_LOG(ERR, "firmware version query failed %d", ret); 550 goto err_cmd_init; 551 } 552 553 version = hw->fw_version; 554 PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu", 555 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, 556 HNS3_FW_VERSION_BYTE3_S), 557 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, 558 HNS3_FW_VERSION_BYTE2_S), 559 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, 560 HNS3_FW_VERSION_BYTE1_S), 561 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, 562 HNS3_FW_VERSION_BYTE0_S)); 563 564 return 0; 565 566 err_cmd_init: 567 rte_atomic16_set(&hw->reset.disable_cmd, 1); 568 return ret; 569 } 570 571 static void 572 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring) 573 { 574 rte_spinlock_lock(&ring->lock); 575 576 hns3_free_cmd_desc(hw, ring); 577 578 rte_spinlock_unlock(&ring->lock); 579 } 580 581 void 582 hns3_cmd_destroy_queue(struct hns3_hw *hw) 583 { 584 hns3_destroy_queue(hw, &hw->cmq.csq); 585 hns3_destroy_queue(hw, &hw->cmq.crq); 586 } 587 588 void 589 hns3_cmd_uninit(struct hns3_hw *hw) 590 { 591 rte_spinlock_lock(&hw->cmq.csq.lock); 592 rte_spinlock_lock(&hw->cmq.crq.lock); 593 rte_atomic16_set(&hw->reset.disable_cmd, 1); 594 hns3_cmd_clear_regs(hw); 595 rte_spinlock_unlock(&hw->cmq.crq.lock); 596 rte_spinlock_unlock(&hw->cmq.csq.lock); 597 } 598