1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <bus_pci_driver.h> 6 #include <rte_common.h> 7 #include <rte_cycles.h> 8 #include <rte_geneve.h> 9 #include <rte_vxlan.h> 10 #include <ethdev_driver.h> 11 #include <rte_io.h> 12 #include <rte_net.h> 13 #include <rte_malloc.h> 14 #if defined(RTE_ARCH_ARM64) 15 #include <rte_cpuflags.h> 16 #include <rte_vect.h> 17 #endif 18 19 #include "hns3_common.h" 20 #include "hns3_regs.h" 21 #include "hns3_logs.h" 22 #include "hns3_mp.h" 23 #include "hns3_rxtx.h" 24 25 #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1) 26 #define HNS3_RX_RING_PREFETCTH_MASK 3 27 28 static void 29 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) 30 { 31 uint16_t i; 32 33 /* Note: Fake rx queue will not enter here */ 34 if (rxq->sw_ring == NULL) 35 return; 36 37 if (rxq->rx_rearm_nb == 0) { 38 for (i = 0; i < rxq->nb_rx_desc; i++) { 39 if (rxq->sw_ring[i].mbuf != NULL) { 40 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 41 rxq->sw_ring[i].mbuf = NULL; 42 } 43 } 44 } else { 45 for (i = rxq->next_to_use; 46 i != rxq->rx_rearm_start; 47 i = (i + 1) % rxq->nb_rx_desc) { 48 if (rxq->sw_ring[i].mbuf != NULL) { 49 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 50 rxq->sw_ring[i].mbuf = NULL; 51 } 52 } 53 for (i = 0; i < rxq->rx_rearm_nb; i++) 54 rxq->sw_ring[(rxq->rx_rearm_start + i) % rxq->nb_rx_desc].mbuf = NULL; 55 } 56 57 for (i = 0; i < rxq->bulk_mbuf_num; i++) 58 rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]); 59 rxq->bulk_mbuf_num = 0; 60 61 if (rxq->pkt_first_seg) { 62 rte_pktmbuf_free(rxq->pkt_first_seg); 63 rxq->pkt_first_seg = NULL; 64 } 65 } 66 67 static void 68 hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq) 69 { 70 uint16_t i; 71 72 /* Note: Fake tx queue will not enter here */ 73 if (txq->sw_ring) { 74 for (i = 0; i < txq->nb_tx_desc; i++) { 75 if (txq->sw_ring[i].mbuf) { 76 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); 77 txq->sw_ring[i].mbuf = NULL; 78 } 79 } 80 } 81 } 82 83 static void 84 hns3_rx_queue_release(void *queue) 85 { 86 struct hns3_rx_queue *rxq = queue; 87 if (rxq) { 88 hns3_rx_queue_release_mbufs(rxq); 89 if (rxq->mz) 90 rte_memzone_free(rxq->mz); 91 rte_free(rxq->sw_ring); 92 rte_free(rxq); 93 } 94 } 95 96 static void 97 hns3_tx_queue_release(void *queue) 98 { 99 struct hns3_tx_queue *txq = queue; 100 if (txq) { 101 hns3_tx_queue_release_mbufs(txq); 102 if (txq->mz) 103 rte_memzone_free(txq->mz); 104 rte_free(txq->sw_ring); 105 rte_free(txq->free); 106 rte_free(txq); 107 } 108 } 109 110 static void 111 hns3_rx_queue_release_lock(void *queue) 112 { 113 struct hns3_rx_queue *rxq = queue; 114 struct hns3_adapter *hns; 115 116 if (rxq == NULL) 117 return; 118 119 hns = rxq->hns; 120 rte_spinlock_lock(&hns->hw.lock); 121 hns3_rx_queue_release(queue); 122 rte_spinlock_unlock(&hns->hw.lock); 123 } 124 125 void 126 hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id) 127 { 128 hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]); 129 } 130 131 static void 132 hns3_tx_queue_release_lock(void *queue) 133 { 134 struct hns3_tx_queue *txq = queue; 135 struct hns3_adapter *hns; 136 137 if (txq == NULL) 138 return; 139 140 hns = txq->hns; 141 rte_spinlock_lock(&hns->hw.lock); 142 hns3_tx_queue_release(queue); 143 rte_spinlock_unlock(&hns->hw.lock); 144 } 145 146 void 147 hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id) 148 { 149 hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]); 150 } 151 152 static void 153 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue) 154 { 155 struct hns3_rx_queue *rxq = queue; 156 struct hns3_adapter *hns; 157 struct hns3_hw *hw; 158 uint16_t idx; 159 160 if (rxq == NULL) 161 return; 162 163 hns = rxq->hns; 164 hw = &hns->hw; 165 idx = rxq->queue_id; 166 if (hw->fkq_data.rx_queues[idx]) { 167 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]); 168 hw->fkq_data.rx_queues[idx] = NULL; 169 } 170 171 /* free fake rx queue arrays */ 172 if (idx == hw->fkq_data.nb_fake_rx_queues - 1) { 173 hw->fkq_data.nb_fake_rx_queues = 0; 174 rte_free(hw->fkq_data.rx_queues); 175 hw->fkq_data.rx_queues = NULL; 176 } 177 } 178 179 static void 180 hns3_fake_tx_queue_release(struct hns3_tx_queue *queue) 181 { 182 struct hns3_tx_queue *txq = queue; 183 struct hns3_adapter *hns; 184 struct hns3_hw *hw; 185 uint16_t idx; 186 187 if (txq == NULL) 188 return; 189 190 hns = txq->hns; 191 hw = &hns->hw; 192 idx = txq->queue_id; 193 if (hw->fkq_data.tx_queues[idx]) { 194 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]); 195 hw->fkq_data.tx_queues[idx] = NULL; 196 } 197 198 /* free fake tx queue arrays */ 199 if (idx == hw->fkq_data.nb_fake_tx_queues - 1) { 200 hw->fkq_data.nb_fake_tx_queues = 0; 201 rte_free(hw->fkq_data.tx_queues); 202 hw->fkq_data.tx_queues = NULL; 203 } 204 } 205 206 static void 207 hns3_free_rx_queues(struct rte_eth_dev *dev) 208 { 209 struct hns3_adapter *hns = dev->data->dev_private; 210 struct hns3_fake_queue_data *fkq_data; 211 struct hns3_hw *hw = &hns->hw; 212 uint16_t nb_rx_q; 213 uint16_t i; 214 215 nb_rx_q = hw->data->nb_rx_queues; 216 for (i = 0; i < nb_rx_q; i++) { 217 if (dev->data->rx_queues[i]) { 218 hns3_rx_queue_release(dev->data->rx_queues[i]); 219 dev->data->rx_queues[i] = NULL; 220 } 221 } 222 223 /* Free fake Rx queues */ 224 fkq_data = &hw->fkq_data; 225 for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) { 226 if (fkq_data->rx_queues[i]) 227 hns3_fake_rx_queue_release(fkq_data->rx_queues[i]); 228 } 229 } 230 231 static void 232 hns3_free_tx_queues(struct rte_eth_dev *dev) 233 { 234 struct hns3_adapter *hns = dev->data->dev_private; 235 struct hns3_fake_queue_data *fkq_data; 236 struct hns3_hw *hw = &hns->hw; 237 uint16_t nb_tx_q; 238 uint16_t i; 239 240 nb_tx_q = hw->data->nb_tx_queues; 241 for (i = 0; i < nb_tx_q; i++) { 242 if (dev->data->tx_queues[i]) { 243 hns3_tx_queue_release(dev->data->tx_queues[i]); 244 dev->data->tx_queues[i] = NULL; 245 } 246 } 247 248 /* Free fake Tx queues */ 249 fkq_data = &hw->fkq_data; 250 for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) { 251 if (fkq_data->tx_queues[i]) 252 hns3_fake_tx_queue_release(fkq_data->tx_queues[i]); 253 } 254 } 255 256 void 257 hns3_free_all_queues(struct rte_eth_dev *dev) 258 { 259 hns3_free_rx_queues(dev); 260 hns3_free_tx_queues(dev); 261 } 262 263 static int 264 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) 265 { 266 struct rte_mbuf *mbuf; 267 uint64_t dma_addr; 268 uint16_t i; 269 270 for (i = 0; i < rxq->nb_rx_desc; i++) { 271 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 272 if (unlikely(mbuf == NULL)) { 273 hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!", 274 i); 275 hns3_rx_queue_release_mbufs(rxq); 276 return -ENOMEM; 277 } 278 279 rte_mbuf_refcnt_set(mbuf, 1); 280 mbuf->next = NULL; 281 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 282 mbuf->nb_segs = 1; 283 mbuf->port = rxq->port_id; 284 285 rxq->sw_ring[i].mbuf = mbuf; 286 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 287 rxq->rx_ring[i].addr = dma_addr; 288 rxq->rx_ring[i].rx.bd_base_info = 0; 289 } 290 291 return 0; 292 } 293 294 static int 295 hns3_buf_size2type(uint32_t buf_size) 296 { 297 int bd_size_type; 298 299 switch (buf_size) { 300 case 512: 301 bd_size_type = HNS3_BD_SIZE_512_TYPE; 302 break; 303 case 1024: 304 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 305 break; 306 case 4096: 307 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 308 break; 309 default: 310 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 311 } 312 313 return bd_size_type; 314 } 315 316 static void 317 hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq) 318 { 319 uint32_t rx_buf_len = rxq->rx_buf_len; 320 uint64_t dma_addr = rxq->rx_ring_phys_addr; 321 322 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr); 323 hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG, 324 (uint32_t)(dma_addr >> 32)); 325 326 hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG, 327 hns3_buf_size2type(rx_buf_len)); 328 hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG, 329 HNS3_CFG_DESC_NUM(rxq->nb_rx_desc)); 330 } 331 332 static void 333 hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) 334 { 335 uint64_t dma_addr = txq->tx_ring_phys_addr; 336 337 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr); 338 hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG, 339 (uint32_t)(dma_addr >> 32)); 340 341 hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG, 342 HNS3_CFG_DESC_NUM(txq->nb_tx_desc)); 343 } 344 345 void 346 hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw) 347 { 348 uint16_t nb_rx_q = hw->data->nb_rx_queues; 349 uint16_t nb_tx_q = hw->data->nb_tx_queues; 350 struct hns3_rx_queue *rxq; 351 struct hns3_tx_queue *txq; 352 bool pvid_en; 353 int i; 354 355 pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE; 356 for (i = 0; i < hw->cfg_max_queues; i++) { 357 if (i < nb_rx_q) { 358 rxq = hw->data->rx_queues[i]; 359 if (rxq != NULL) 360 rxq->pvid_sw_discard_en = pvid_en; 361 } 362 if (i < nb_tx_q) { 363 txq = hw->data->tx_queues[i]; 364 if (txq != NULL) 365 txq->pvid_sw_shift_en = pvid_en; 366 } 367 } 368 } 369 370 static void 371 hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type) 372 { 373 uint32_t reg_offset; 374 uint32_t reg; 375 376 reg_offset = queue_type == HNS3_RING_TYPE_TX ? 377 HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG; 378 reg = hns3_read_reg(tqp_base, reg_offset); 379 reg &= ~BIT(HNS3_RING_EN_B); 380 hns3_write_reg(tqp_base, reg_offset, reg); 381 } 382 383 void 384 hns3_enable_all_queues(struct hns3_hw *hw, bool en) 385 { 386 uint16_t nb_rx_q = hw->data->nb_rx_queues; 387 uint16_t nb_tx_q = hw->data->nb_tx_queues; 388 struct hns3_rx_queue *rxq; 389 struct hns3_tx_queue *txq; 390 uint32_t rcb_reg; 391 void *tqp_base; 392 uint16_t i; 393 394 for (i = 0; i < hw->cfg_max_queues; i++) { 395 if (hns3_dev_get_support(hw, INDEP_TXRX)) { 396 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; 397 txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; 398 399 tqp_base = (void *)((char *)hw->io_base + 400 hns3_get_tqp_reg_offset(i)); 401 /* 402 * If queue struct is not initialized, it means the 403 * related HW ring has not been initialized yet. 404 * So, these queues should be disabled before enable 405 * the tqps to avoid a HW exception since the queues 406 * are enabled by default. 407 */ 408 if (rxq == NULL) 409 hns3_stop_unused_queue(tqp_base, 410 HNS3_RING_TYPE_RX); 411 if (txq == NULL) 412 hns3_stop_unused_queue(tqp_base, 413 HNS3_RING_TYPE_TX); 414 } else { 415 rxq = i < nb_rx_q ? hw->data->rx_queues[i] : 416 hw->fkq_data.rx_queues[i - nb_rx_q]; 417 418 tqp_base = rxq->io_base; 419 } 420 /* 421 * This is the master switch that used to control the enabling 422 * of a pair of Tx and Rx queues. Both the Rx and Tx point to 423 * the same register 424 */ 425 rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG); 426 if (en) 427 rcb_reg |= BIT(HNS3_RING_EN_B); 428 else 429 rcb_reg &= ~BIT(HNS3_RING_EN_B); 430 hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg); 431 } 432 } 433 434 static void 435 hns3_enable_txq(struct hns3_tx_queue *txq, bool en) 436 { 437 struct hns3_hw *hw = &txq->hns->hw; 438 uint32_t reg; 439 440 if (hns3_dev_get_support(hw, INDEP_TXRX)) { 441 reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG); 442 if (en) 443 reg |= BIT(HNS3_RING_EN_B); 444 else 445 reg &= ~BIT(HNS3_RING_EN_B); 446 hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg); 447 } 448 txq->enabled = en; 449 } 450 451 static void 452 hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en) 453 { 454 struct hns3_hw *hw = &rxq->hns->hw; 455 uint32_t reg; 456 457 if (hns3_dev_get_support(hw, INDEP_TXRX)) { 458 reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG); 459 if (en) 460 reg |= BIT(HNS3_RING_EN_B); 461 else 462 reg &= ~BIT(HNS3_RING_EN_B); 463 hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg); 464 } 465 rxq->enabled = en; 466 } 467 468 int 469 hns3_start_all_txqs(struct rte_eth_dev *dev) 470 { 471 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 472 struct hns3_tx_queue *txq; 473 uint16_t i, j; 474 475 for (i = 0; i < dev->data->nb_tx_queues; i++) { 476 txq = hw->data->tx_queues[i]; 477 if (!txq) { 478 hns3_err(hw, "Tx queue %u not available or setup.", i); 479 goto start_txqs_fail; 480 } 481 /* 482 * Tx queue is enabled by default. Therefore, the Tx queues 483 * needs to be disabled when deferred_start is set. There is 484 * another master switch used to control the enabling of a pair 485 * of Tx and Rx queues. And the master switch is disabled by 486 * default. 487 */ 488 if (txq->tx_deferred_start) 489 hns3_enable_txq(txq, false); 490 else 491 hns3_enable_txq(txq, true); 492 } 493 return 0; 494 495 start_txqs_fail: 496 for (j = 0; j < i; j++) { 497 txq = hw->data->tx_queues[j]; 498 hns3_enable_txq(txq, false); 499 } 500 return -EINVAL; 501 } 502 503 int 504 hns3_start_all_rxqs(struct rte_eth_dev *dev) 505 { 506 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 507 struct hns3_rx_queue *rxq; 508 uint16_t i, j; 509 510 for (i = 0; i < dev->data->nb_rx_queues; i++) { 511 rxq = hw->data->rx_queues[i]; 512 if (!rxq) { 513 hns3_err(hw, "Rx queue %u not available or setup.", i); 514 goto start_rxqs_fail; 515 } 516 /* 517 * Rx queue is enabled by default. Therefore, the Rx queues 518 * needs to be disabled when deferred_start is set. There is 519 * another master switch used to control the enabling of a pair 520 * of Tx and Rx queues. And the master switch is disabled by 521 * default. 522 */ 523 if (rxq->rx_deferred_start) 524 hns3_enable_rxq(rxq, false); 525 else 526 hns3_enable_rxq(rxq, true); 527 } 528 return 0; 529 530 start_rxqs_fail: 531 for (j = 0; j < i; j++) { 532 rxq = hw->data->rx_queues[j]; 533 hns3_enable_rxq(rxq, false); 534 } 535 return -EINVAL; 536 } 537 538 void 539 hns3_restore_tqp_enable_state(struct hns3_hw *hw) 540 { 541 struct hns3_rx_queue *rxq; 542 struct hns3_tx_queue *txq; 543 uint16_t i; 544 545 for (i = 0; i < hw->data->nb_rx_queues; i++) { 546 rxq = hw->data->rx_queues[i]; 547 if (rxq != NULL) 548 hns3_enable_rxq(rxq, rxq->enabled); 549 } 550 551 for (i = 0; i < hw->data->nb_tx_queues; i++) { 552 txq = hw->data->tx_queues[i]; 553 if (txq != NULL) 554 hns3_enable_txq(txq, txq->enabled); 555 } 556 } 557 558 void 559 hns3_stop_all_txqs(struct rte_eth_dev *dev) 560 { 561 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 562 struct hns3_tx_queue *txq; 563 uint16_t i; 564 565 for (i = 0; i < dev->data->nb_tx_queues; i++) { 566 txq = hw->data->tx_queues[i]; 567 if (!txq) 568 continue; 569 hns3_enable_txq(txq, false); 570 } 571 } 572 573 static int 574 hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) 575 { 576 struct hns3_cfg_com_tqp_queue_cmd *req; 577 struct hns3_cmd_desc desc; 578 int ret; 579 580 req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data; 581 582 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false); 583 req->tqp_id = rte_cpu_to_le_16(queue_id); 584 req->stream_id = 0; 585 hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0); 586 587 ret = hns3_cmd_send(hw, &desc, 1); 588 if (ret) 589 hns3_err(hw, "TQP %s fail, ret = %d", enable ? "enable" : "disable", ret); 590 591 return ret; 592 } 593 594 static int 595 hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable) 596 { 597 struct hns3_reset_tqp_queue_cmd *req; 598 struct hns3_cmd_desc desc; 599 int ret; 600 601 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false); 602 603 req = (struct hns3_reset_tqp_queue_cmd *)desc.data; 604 req->tqp_id = rte_cpu_to_le_16(queue_id); 605 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); 606 ret = hns3_cmd_send(hw, &desc, 1); 607 if (ret) 608 hns3_err(hw, "send tqp reset cmd error, queue_id = %u, ret = %d", 609 queue_id, ret); 610 611 return ret; 612 } 613 614 static int 615 hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id, 616 uint8_t *reset_status) 617 { 618 struct hns3_reset_tqp_queue_cmd *req; 619 struct hns3_cmd_desc desc; 620 int ret; 621 622 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true); 623 624 req = (struct hns3_reset_tqp_queue_cmd *)desc.data; 625 req->tqp_id = rte_cpu_to_le_16(queue_id); 626 627 ret = hns3_cmd_send(hw, &desc, 1); 628 if (ret) { 629 hns3_err(hw, "get tqp reset status error, queue_id = %u, ret = %d.", 630 queue_id, ret); 631 return ret; 632 } 633 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); 634 return ret; 635 } 636 637 static int 638 hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) 639 { 640 #define HNS3_TQP_RESET_TRY_MS 200 641 uint16_t wait_time = 0; 642 uint8_t reset_status; 643 int ret; 644 645 /* 646 * In current version VF is not supported when PF is driven by DPDK 647 * driver, all task queue pairs are mapped to PF function, so PF's queue 648 * id is equals to the global queue id in PF range. 649 */ 650 ret = hns3_send_reset_tqp_cmd(hw, queue_id, true); 651 if (ret) { 652 hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret); 653 return ret; 654 } 655 656 do { 657 /* Wait for tqp hw reset */ 658 rte_delay_ms(HNS3_POLL_RESPONE_MS); 659 wait_time += HNS3_POLL_RESPONE_MS; 660 ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status); 661 if (ret) 662 goto tqp_reset_fail; 663 664 if (reset_status) 665 break; 666 } while (wait_time < HNS3_TQP_RESET_TRY_MS); 667 668 if (!reset_status) { 669 ret = -ETIMEDOUT; 670 hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d", 671 queue_id, ret); 672 goto tqp_reset_fail; 673 } 674 675 ret = hns3_send_reset_tqp_cmd(hw, queue_id, false); 676 if (ret) 677 hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret); 678 679 return ret; 680 681 tqp_reset_fail: 682 hns3_send_reset_tqp_cmd(hw, queue_id, false); 683 return ret; 684 } 685 686 static int 687 hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) 688 { 689 struct hns3_vf_to_pf_msg req; 690 int ret; 691 692 hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); 693 memcpy(req.data, &queue_id, sizeof(uint16_t)); 694 ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); 695 if (ret) 696 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", 697 queue_id, ret); 698 return ret; 699 } 700 701 static int 702 hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status) 703 { 704 struct hns3_reset_cmd *req; 705 struct hns3_cmd_desc desc; 706 int ret; 707 708 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); 709 req = (struct hns3_reset_cmd *)desc.data; 710 hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1); 711 712 /* 713 * The start qid should be the global qid of the first tqp of the 714 * function which should be reset in this port. Since our PF not 715 * support take over of VFs, so we only need to reset function 0, 716 * and its start qid is always 0. 717 */ 718 req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0); 719 req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues); 720 721 ret = hns3_cmd_send(hw, &desc, 1); 722 if (ret) { 723 hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret); 724 return ret; 725 } 726 727 *reset_status = req->fun_reset_rcb_return_status; 728 return 0; 729 } 730 731 static int 732 hns3pf_reset_all_tqps(struct hns3_hw *hw) 733 { 734 #define HNS3_RESET_RCB_NOT_SUPPORT 0U 735 #define HNS3_RESET_ALL_TQP_SUCCESS 1U 736 uint8_t reset_status; 737 uint16_t i; 738 int ret; 739 740 ret = hns3_reset_rcb_cmd(hw, &reset_status); 741 if (ret) 742 return ret; 743 744 /* 745 * If the firmware version is low, it may not support the rcb reset 746 * which means reset all the tqps at a time. In this case, we should 747 * reset tqps one by one. 748 */ 749 if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) { 750 for (i = 0; i < hw->cfg_max_queues; i++) { 751 ret = hns3pf_reset_tqp(hw, i); 752 if (ret) { 753 hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", 754 i, ret); 755 return ret; 756 } 757 } 758 } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) { 759 hns3_err(hw, "fail to reset all tqps, reset_status = %u.", 760 reset_status); 761 return -EIO; 762 } 763 764 return 0; 765 } 766 767 static int 768 hns3vf_reset_all_tqps(struct hns3_hw *hw) 769 { 770 #define HNS3VF_RESET_ALL_TQP_DONE 1U 771 struct hns3_vf_to_pf_msg req; 772 uint8_t reset_status; 773 int ret; 774 uint16_t i; 775 776 hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); 777 ret = hns3vf_mbx_send(hw, &req, true, 778 &reset_status, sizeof(reset_status)); 779 if (ret) { 780 hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); 781 return ret; 782 } 783 784 if (reset_status == HNS3VF_RESET_ALL_TQP_DONE) 785 return 0; 786 787 /* 788 * If the firmware version or kernel PF version is low, it may not 789 * support the rcb reset which means reset all the tqps at a time. 790 * In this case, we should reset tqps one by one. 791 */ 792 for (i = 1; i < hw->cfg_max_queues; i++) { 793 ret = hns3vf_reset_tqp(hw, i); 794 if (ret) 795 return ret; 796 } 797 798 return 0; 799 } 800 801 int 802 hns3_reset_all_tqps(struct hns3_adapter *hns) 803 { 804 struct hns3_hw *hw = &hns->hw; 805 uint16_t i; 806 int ret; 807 808 /* Disable all queues before reset all queues */ 809 for (i = 0; i < hw->cfg_max_queues; i++) { 810 ret = hns3_tqp_enable(hw, i, false); 811 if (ret) { 812 hns3_err(hw, "fail to disable tqps before tqps reset, ret = %d.", 813 ret); 814 return ret; 815 } 816 } 817 818 if (hns->is_vf) 819 return hns3vf_reset_all_tqps(hw); 820 else 821 return hns3pf_reset_all_tqps(hw); 822 } 823 824 static int 825 hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id, 826 enum hns3_ring_type queue_type, bool enable) 827 { 828 struct hns3_reset_tqp_queue_cmd *req; 829 struct hns3_cmd_desc desc; 830 int ret; 831 832 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false); 833 834 req = (struct hns3_reset_tqp_queue_cmd *)desc.data; 835 req->tqp_id = rte_cpu_to_le_16(queue_id); 836 req->queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; 837 hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); 838 839 ret = hns3_cmd_send(hw, &desc, 1); 840 if (ret) 841 hns3_err(hw, "send queue reset cmd error, queue_id = %u, " 842 "queue_type = %s, ret = %d.", queue_id, 843 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); 844 return ret; 845 } 846 847 static int 848 hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id, 849 enum hns3_ring_type queue_type, 850 uint8_t *reset_status) 851 { 852 struct hns3_reset_tqp_queue_cmd *req; 853 struct hns3_cmd_desc desc; 854 int ret; 855 856 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true); 857 858 req = (struct hns3_reset_tqp_queue_cmd *)desc.data; 859 req->tqp_id = rte_cpu_to_le_16(queue_id); 860 req->queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1; 861 862 ret = hns3_cmd_send(hw, &desc, 1); 863 if (ret) { 864 hns3_err(hw, "get queue reset status error, queue_id = %u " 865 "queue_type = %s, ret = %d.", queue_id, 866 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret); 867 return ret; 868 } 869 870 *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); 871 return ret; 872 } 873 874 static int 875 hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id, 876 enum hns3_ring_type queue_type) 877 { 878 #define HNS3_QUEUE_RESET_TRY_MS 200 879 struct hns3_tx_queue *txq; 880 struct hns3_rx_queue *rxq; 881 uint32_t reset_wait_times; 882 uint32_t max_wait_times; 883 uint8_t reset_status; 884 int ret; 885 886 if (queue_type == HNS3_RING_TYPE_TX) { 887 txq = hw->data->tx_queues[queue_id]; 888 hns3_enable_txq(txq, false); 889 } else { 890 rxq = hw->data->rx_queues[queue_id]; 891 hns3_enable_rxq(rxq, false); 892 } 893 894 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true); 895 if (ret) { 896 hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret); 897 return ret; 898 } 899 900 reset_wait_times = 0; 901 max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS; 902 while (reset_wait_times < max_wait_times) { 903 /* Wait for queue hw reset */ 904 rte_delay_ms(HNS3_POLL_RESPONE_MS); 905 ret = hns3_get_queue_reset_status(hw, queue_id, 906 queue_type, &reset_status); 907 if (ret) 908 goto queue_reset_fail; 909 910 if (reset_status) 911 break; 912 reset_wait_times++; 913 } 914 915 if (!reset_status) { 916 hns3_err(hw, "reset queue timeout, queue_id = %u, queue_type = %s", 917 queue_id, 918 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx"); 919 ret = -ETIMEDOUT; 920 goto queue_reset_fail; 921 } 922 923 ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); 924 if (ret) 925 hns3_err(hw, "deassert queue reset fail, ret = %d.", ret); 926 927 return ret; 928 929 queue_reset_fail: 930 hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false); 931 return ret; 932 } 933 934 uint32_t 935 hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id) 936 { 937 uint32_t reg_offset; 938 939 /* Need an extend offset to config queues > 64 */ 940 if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID) 941 reg_offset = HNS3_TQP_INTR_REG_BASE + 942 tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET; 943 else 944 reg_offset = HNS3_TQP_INTR_EXT_REG_BASE + 945 tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID * 946 HNS3_TQP_INTR_HIGH_ORDER_OFFSET + 947 tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID * 948 HNS3_TQP_INTR_LOW_ORDER_OFFSET; 949 950 return reg_offset; 951 } 952 953 void 954 hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, 955 uint8_t gl_idx, uint16_t gl_value) 956 { 957 uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG, 958 HNS3_TQP_INTR_GL1_REG, 959 HNS3_TQP_INTR_GL2_REG}; 960 uint32_t addr, value; 961 962 if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX) 963 return; 964 965 addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id); 966 if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) 967 value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; 968 else 969 value = HNS3_GL_USEC_TO_REG(gl_value); 970 971 hns3_write_dev(hw, addr, value); 972 } 973 974 void 975 hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) 976 { 977 uint32_t addr, value; 978 979 if (rl_value > HNS3_TQP_INTR_RL_MAX) 980 return; 981 982 addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id); 983 value = HNS3_RL_USEC_TO_REG(rl_value); 984 if (value > 0) 985 value |= HNS3_TQP_INTR_RL_ENABLE_MASK; 986 987 hns3_write_dev(hw, addr, value); 988 } 989 990 void 991 hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) 992 { 993 uint32_t addr; 994 995 /* 996 * int_ql_max == 0 means the hardware does not support QL, 997 * QL regs config is not permitted if QL is not supported, 998 * here just return. 999 */ 1000 if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) 1001 return; 1002 1003 addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); 1004 hns3_write_dev(hw, addr, ql_value); 1005 1006 addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); 1007 hns3_write_dev(hw, addr, ql_value); 1008 } 1009 1010 static void 1011 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) 1012 { 1013 uint32_t addr, value; 1014 1015 addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id); 1016 value = en ? 1 : 0; 1017 1018 hns3_write_dev(hw, addr, value); 1019 } 1020 1021 /* 1022 * Enable all rx queue interrupt when in interrupt rx mode. 1023 * This api was called before enable queue rx&tx (in normal start or reset 1024 * recover scenes), used to fix hardware rx queue interrupt enable was clear 1025 * when FLR. 1026 */ 1027 void 1028 hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en) 1029 { 1030 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1031 uint16_t nb_rx_q = hw->data->nb_rx_queues; 1032 uint16_t i; 1033 1034 if (dev->data->dev_conf.intr_conf.rxq == 0) 1035 return; 1036 1037 for (i = 0; i < nb_rx_q; i++) 1038 hns3_queue_intr_enable(hw, i, en); 1039 } 1040 1041 int 1042 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 1043 { 1044 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1045 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1046 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1047 1048 if (dev->data->dev_conf.intr_conf.rxq == 0) 1049 return -ENOTSUP; 1050 1051 hns3_queue_intr_enable(hw, queue_id, true); 1052 1053 return rte_intr_ack(intr_handle); 1054 } 1055 1056 int 1057 hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 1058 { 1059 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1060 1061 if (dev->data->dev_conf.intr_conf.rxq == 0) 1062 return -ENOTSUP; 1063 1064 hns3_queue_intr_enable(hw, queue_id, false); 1065 1066 return 0; 1067 } 1068 1069 static int 1070 hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx) 1071 { 1072 struct hns3_hw *hw = &hns->hw; 1073 struct hns3_rx_queue *rxq; 1074 int ret; 1075 1076 PMD_INIT_FUNC_TRACE(); 1077 1078 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx]; 1079 ret = hns3_alloc_rx_queue_mbufs(hw, rxq); 1080 if (ret) { 1081 hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.", 1082 idx, ret); 1083 return ret; 1084 } 1085 1086 rxq->next_to_use = 0; 1087 rxq->rx_rearm_start = 0; 1088 rxq->rx_free_hold = 0; 1089 rxq->rx_rearm_nb = 0; 1090 rxq->pkt_first_seg = NULL; 1091 rxq->pkt_last_seg = NULL; 1092 hns3_init_rx_queue_hw(rxq); 1093 hns3_rxq_vec_setup(rxq); 1094 1095 return 0; 1096 } 1097 1098 static void 1099 hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx) 1100 { 1101 struct hns3_hw *hw = &hns->hw; 1102 struct hns3_rx_queue *rxq; 1103 1104 rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx]; 1105 rxq->next_to_use = 0; 1106 rxq->rx_free_hold = 0; 1107 rxq->rx_rearm_start = 0; 1108 rxq->rx_rearm_nb = 0; 1109 hns3_init_rx_queue_hw(rxq); 1110 } 1111 1112 static void 1113 hns3_init_txq(struct hns3_tx_queue *txq) 1114 { 1115 struct hns3_desc *desc; 1116 uint16_t i; 1117 1118 /* Clear tx bd */ 1119 desc = txq->tx_ring; 1120 for (i = 0; i < txq->nb_tx_desc; i++) { 1121 desc->tx.tp_fe_sc_vld_ra_ri = 0; 1122 desc++; 1123 } 1124 1125 txq->next_to_use = 0; 1126 txq->next_to_clean = 0; 1127 txq->tx_bd_ready = txq->nb_tx_desc - 1; 1128 hns3_init_tx_queue_hw(txq); 1129 } 1130 1131 static void 1132 hns3_init_tx_ring_tc(struct hns3_adapter *hns) 1133 { 1134 struct hns3_hw *hw = &hns->hw; 1135 struct hns3_tx_queue *txq; 1136 int i, num; 1137 1138 for (i = 0; i < HNS3_MAX_TC_NUM; i++) { 1139 struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i]; 1140 uint16_t j; 1141 1142 if (!tc_queue->enable) 1143 continue; 1144 1145 for (j = 0; j < tc_queue->tqp_count; j++) { 1146 num = tc_queue->tqp_offset + j; 1147 txq = (struct hns3_tx_queue *)hw->data->tx_queues[num]; 1148 if (txq == NULL) 1149 continue; 1150 1151 hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc); 1152 } 1153 } 1154 } 1155 1156 static int 1157 hns3_init_rx_queues(struct hns3_adapter *hns) 1158 { 1159 struct hns3_hw *hw = &hns->hw; 1160 struct hns3_rx_queue *rxq; 1161 uint16_t i, j; 1162 int ret; 1163 1164 /* Initialize RSS for queues */ 1165 ret = hns3_config_rss(hns); 1166 if (ret) { 1167 hns3_err(hw, "failed to configure rss, ret = %d.", ret); 1168 return ret; 1169 } 1170 1171 for (i = 0; i < hw->data->nb_rx_queues; i++) { 1172 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i]; 1173 if (!rxq) { 1174 hns3_err(hw, "Rx queue %u not available or setup.", i); 1175 goto out; 1176 } 1177 1178 if (rxq->rx_deferred_start) 1179 continue; 1180 1181 ret = hns3_init_rxq(hns, i); 1182 if (ret) { 1183 hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i, 1184 ret); 1185 goto out; 1186 } 1187 } 1188 1189 for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) 1190 hns3_init_fake_rxq(hns, i); 1191 1192 return 0; 1193 1194 out: 1195 for (j = 0; j < i; j++) { 1196 rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j]; 1197 if (rxq->rx_deferred_start) 1198 continue; 1199 1200 hns3_rx_queue_release_mbufs(rxq); 1201 } 1202 1203 return ret; 1204 } 1205 1206 static int 1207 hns3_init_tx_queues(struct hns3_adapter *hns) 1208 { 1209 struct hns3_hw *hw = &hns->hw; 1210 struct hns3_tx_queue *txq; 1211 uint16_t i; 1212 1213 for (i = 0; i < hw->data->nb_tx_queues; i++) { 1214 txq = (struct hns3_tx_queue *)hw->data->tx_queues[i]; 1215 if (!txq) { 1216 hns3_err(hw, "Tx queue %u not available or setup.", i); 1217 return -EINVAL; 1218 } 1219 1220 if (txq->tx_deferred_start) 1221 continue; 1222 hns3_init_txq(txq); 1223 } 1224 1225 for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) { 1226 txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i]; 1227 hns3_init_txq(txq); 1228 } 1229 hns3_init_tx_ring_tc(hns); 1230 1231 return 0; 1232 } 1233 1234 /* 1235 * Init all queues. 1236 * Note: just init and setup queues, and don't enable tqps. 1237 */ 1238 int 1239 hns3_init_queues(struct hns3_adapter *hns, bool reset_queue) 1240 { 1241 struct hns3_hw *hw = &hns->hw; 1242 int ret; 1243 1244 if (reset_queue) { 1245 ret = hns3_reset_all_tqps(hns); 1246 if (ret) { 1247 hns3_err(hw, "failed to reset all queues, ret = %d.", 1248 ret); 1249 return ret; 1250 } 1251 } 1252 1253 ret = hns3_init_rx_queues(hns); 1254 if (ret) { 1255 hns3_err(hw, "failed to init rx queues, ret = %d.", ret); 1256 return ret; 1257 } 1258 1259 ret = hns3_init_tx_queues(hns); 1260 if (ret) { 1261 hns3_dev_release_mbufs(hns); 1262 hns3_err(hw, "failed to init tx queues, ret = %d.", ret); 1263 } 1264 1265 return ret; 1266 } 1267 1268 void 1269 hns3_start_tqps(struct hns3_hw *hw) 1270 { 1271 struct hns3_tx_queue *txq; 1272 struct hns3_rx_queue *rxq; 1273 uint16_t i; 1274 1275 hns3_enable_all_queues(hw, true); 1276 1277 for (i = 0; i < hw->data->nb_tx_queues; i++) { 1278 txq = hw->data->tx_queues[i]; 1279 if (txq->enabled) 1280 hw->data->tx_queue_state[i] = 1281 RTE_ETH_QUEUE_STATE_STARTED; 1282 } 1283 1284 for (i = 0; i < hw->data->nb_rx_queues; i++) { 1285 rxq = hw->data->rx_queues[i]; 1286 if (rxq->enabled) 1287 hw->data->rx_queue_state[i] = 1288 RTE_ETH_QUEUE_STATE_STARTED; 1289 } 1290 } 1291 1292 void 1293 hns3_stop_tqps(struct hns3_hw *hw) 1294 { 1295 uint16_t i; 1296 1297 hns3_enable_all_queues(hw, false); 1298 1299 for (i = 0; i < hw->data->nb_tx_queues; i++) 1300 hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1301 1302 for (i = 0; i < hw->data->nb_rx_queues; i++) 1303 hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1304 } 1305 1306 /* 1307 * Iterate over all Rx Queue, and call the callback() function for each Rx 1308 * queue. 1309 * 1310 * @param[in] dev 1311 * The target eth dev. 1312 * @param[in] callback 1313 * The function to call for each queue. 1314 * if callback function return nonzero will stop iterate and return it's value 1315 * @param[in] arg 1316 * The arguments to provide the callback function with. 1317 * 1318 * @return 1319 * 0 on success, otherwise with errno set. 1320 */ 1321 int 1322 hns3_rxq_iterate(struct rte_eth_dev *dev, 1323 int (*callback)(struct hns3_rx_queue *, void *), void *arg) 1324 { 1325 uint32_t i; 1326 int ret; 1327 1328 if (dev->data->rx_queues == NULL) 1329 return -EINVAL; 1330 1331 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1332 ret = callback(dev->data->rx_queues[i], arg); 1333 if (ret != 0) 1334 return ret; 1335 } 1336 1337 return 0; 1338 } 1339 1340 static void* 1341 hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, 1342 struct hns3_queue_info *q_info) 1343 { 1344 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1345 const struct rte_memzone *rx_mz; 1346 struct hns3_rx_queue *rxq; 1347 unsigned int rx_desc; 1348 1349 rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), 1350 RTE_CACHE_LINE_SIZE, q_info->socket_id); 1351 if (rxq == NULL) { 1352 hns3_err(hw, "Failed to allocate memory for No.%u rx ring!", 1353 q_info->idx); 1354 return NULL; 1355 } 1356 1357 /* Allocate rx ring hardware descriptors. */ 1358 rxq->queue_id = q_info->idx; 1359 rxq->nb_rx_desc = q_info->nb_desc; 1360 1361 /* 1362 * Allocate a litter more memory because rx vector functions 1363 * don't check boundaries each time. 1364 */ 1365 rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * 1366 sizeof(struct hns3_desc); 1367 rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, 1368 rx_desc, HNS3_RING_BASE_ALIGN, 1369 q_info->socket_id); 1370 if (rx_mz == NULL) { 1371 hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!", 1372 q_info->idx); 1373 hns3_rx_queue_release(rxq); 1374 return NULL; 1375 } 1376 rxq->mz = rx_mz; 1377 rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; 1378 rxq->rx_ring_phys_addr = rx_mz->iova; 1379 1380 return rxq; 1381 } 1382 1383 static int 1384 hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 1385 uint16_t nb_desc, unsigned int socket_id) 1386 { 1387 struct hns3_adapter *hns = dev->data->dev_private; 1388 struct hns3_hw *hw = &hns->hw; 1389 struct hns3_queue_info q_info; 1390 struct hns3_rx_queue *rxq; 1391 uint16_t nb_rx_q; 1392 1393 if (hw->fkq_data.rx_queues[idx]) { 1394 hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]); 1395 hw->fkq_data.rx_queues[idx] = NULL; 1396 } 1397 1398 q_info.idx = idx; 1399 q_info.socket_id = socket_id; 1400 q_info.nb_desc = nb_desc; 1401 q_info.type = "hns3 fake RX queue"; 1402 q_info.ring_name = "rx_fake_ring"; 1403 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); 1404 if (rxq == NULL) { 1405 hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx); 1406 return -ENOMEM; 1407 } 1408 1409 /* Don't need alloc sw_ring, because upper applications don't use it */ 1410 rxq->sw_ring = NULL; 1411 1412 rxq->hns = hns; 1413 rxq->rx_deferred_start = false; 1414 rxq->port_id = dev->data->port_id; 1415 rxq->configured = true; 1416 nb_rx_q = dev->data->nb_rx_queues; 1417 rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + 1418 (nb_rx_q + idx) * HNS3_TQP_REG_SIZE); 1419 rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE; 1420 1421 rte_spinlock_lock(&hw->lock); 1422 hw->fkq_data.rx_queues[idx] = rxq; 1423 rte_spinlock_unlock(&hw->lock); 1424 1425 return 0; 1426 } 1427 1428 static void* 1429 hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, 1430 struct hns3_queue_info *q_info) 1431 { 1432 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1433 const struct rte_memzone *tx_mz; 1434 struct hns3_tx_queue *txq; 1435 struct hns3_desc *desc; 1436 unsigned int tx_desc; 1437 uint16_t i; 1438 1439 txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), 1440 RTE_CACHE_LINE_SIZE, q_info->socket_id); 1441 if (txq == NULL) { 1442 hns3_err(hw, "Failed to allocate memory for No.%u tx ring!", 1443 q_info->idx); 1444 return NULL; 1445 } 1446 1447 /* Allocate tx ring hardware descriptors. */ 1448 txq->queue_id = q_info->idx; 1449 txq->nb_tx_desc = q_info->nb_desc; 1450 tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc); 1451 tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, 1452 tx_desc, HNS3_RING_BASE_ALIGN, 1453 q_info->socket_id); 1454 if (tx_mz == NULL) { 1455 hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!", 1456 q_info->idx); 1457 hns3_tx_queue_release(txq); 1458 return NULL; 1459 } 1460 txq->mz = tx_mz; 1461 txq->tx_ring = (struct hns3_desc *)tx_mz->addr; 1462 txq->tx_ring_phys_addr = tx_mz->iova; 1463 1464 /* Clear tx bd */ 1465 desc = txq->tx_ring; 1466 for (i = 0; i < txq->nb_tx_desc; i++) { 1467 desc->tx.tp_fe_sc_vld_ra_ri = 0; 1468 desc++; 1469 } 1470 1471 return txq; 1472 } 1473 1474 static int 1475 hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 1476 uint16_t nb_desc, unsigned int socket_id) 1477 { 1478 struct hns3_adapter *hns = dev->data->dev_private; 1479 struct hns3_hw *hw = &hns->hw; 1480 struct hns3_queue_info q_info; 1481 struct hns3_tx_queue *txq; 1482 uint16_t nb_tx_q; 1483 1484 if (hw->fkq_data.tx_queues[idx] != NULL) { 1485 hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]); 1486 hw->fkq_data.tx_queues[idx] = NULL; 1487 } 1488 1489 q_info.idx = idx; 1490 q_info.socket_id = socket_id; 1491 q_info.nb_desc = nb_desc; 1492 q_info.type = "hns3 fake TX queue"; 1493 q_info.ring_name = "tx_fake_ring"; 1494 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); 1495 if (txq == NULL) { 1496 hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx); 1497 return -ENOMEM; 1498 } 1499 1500 /* Don't need alloc sw_ring, because upper applications don't use it */ 1501 txq->sw_ring = NULL; 1502 txq->free = NULL; 1503 1504 txq->hns = hns; 1505 txq->tx_deferred_start = false; 1506 txq->port_id = dev->data->port_id; 1507 txq->configured = true; 1508 nb_tx_q = dev->data->nb_tx_queues; 1509 txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + 1510 (nb_tx_q + idx) * HNS3_TQP_REG_SIZE); 1511 1512 rte_spinlock_lock(&hw->lock); 1513 hw->fkq_data.tx_queues[idx] = txq; 1514 rte_spinlock_unlock(&hw->lock); 1515 1516 return 0; 1517 } 1518 1519 static int 1520 hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) 1521 { 1522 uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues; 1523 void **rxq; 1524 uint16_t i; 1525 1526 if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) { 1527 /* first time configuration */ 1528 uint32_t size; 1529 size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues; 1530 hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size, 1531 RTE_CACHE_LINE_SIZE); 1532 if (hw->fkq_data.rx_queues == NULL) { 1533 hw->fkq_data.nb_fake_rx_queues = 0; 1534 return -ENOMEM; 1535 } 1536 } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) { 1537 /* re-configure */ 1538 rxq = hw->fkq_data.rx_queues; 1539 for (i = nb_queues; i < old_nb_queues; i++) 1540 hns3_rx_queue_release_lock(rxq[i]); 1541 1542 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, 1543 RTE_CACHE_LINE_SIZE); 1544 if (rxq == NULL) 1545 return -ENOMEM; 1546 if (nb_queues > old_nb_queues) { 1547 uint16_t new_qs = nb_queues - old_nb_queues; 1548 memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs); 1549 } 1550 1551 hw->fkq_data.rx_queues = rxq; 1552 } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) { 1553 rxq = hw->fkq_data.rx_queues; 1554 for (i = nb_queues; i < old_nb_queues; i++) 1555 hns3_rx_queue_release_lock(rxq[i]); 1556 1557 rte_free(hw->fkq_data.rx_queues); 1558 hw->fkq_data.rx_queues = NULL; 1559 } 1560 1561 hw->fkq_data.nb_fake_rx_queues = nb_queues; 1562 1563 return 0; 1564 } 1565 1566 static int 1567 hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) 1568 { 1569 uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues; 1570 void **txq; 1571 uint16_t i; 1572 1573 if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) { 1574 /* first time configuration */ 1575 uint32_t size; 1576 size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues; 1577 hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size, 1578 RTE_CACHE_LINE_SIZE); 1579 if (hw->fkq_data.tx_queues == NULL) { 1580 hw->fkq_data.nb_fake_tx_queues = 0; 1581 return -ENOMEM; 1582 } 1583 } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) { 1584 /* re-configure */ 1585 txq = hw->fkq_data.tx_queues; 1586 for (i = nb_queues; i < old_nb_queues; i++) 1587 hns3_tx_queue_release_lock(txq[i]); 1588 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, 1589 RTE_CACHE_LINE_SIZE); 1590 if (txq == NULL) 1591 return -ENOMEM; 1592 if (nb_queues > old_nb_queues) { 1593 uint16_t new_qs = nb_queues - old_nb_queues; 1594 memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs); 1595 } 1596 1597 hw->fkq_data.tx_queues = txq; 1598 } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) { 1599 txq = hw->fkq_data.tx_queues; 1600 for (i = nb_queues; i < old_nb_queues; i++) 1601 hns3_tx_queue_release_lock(txq[i]); 1602 1603 rte_free(hw->fkq_data.tx_queues); 1604 hw->fkq_data.tx_queues = NULL; 1605 } 1606 hw->fkq_data.nb_fake_tx_queues = nb_queues; 1607 1608 return 0; 1609 } 1610 1611 int 1612 hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, 1613 uint16_t nb_tx_q) 1614 { 1615 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1616 uint16_t rx_need_add_nb_q; 1617 uint16_t tx_need_add_nb_q; 1618 uint16_t port_id; 1619 uint16_t q; 1620 int ret; 1621 1622 if (hns3_dev_get_support(hw, INDEP_TXRX)) 1623 return 0; 1624 1625 /* Setup new number of fake RX/TX queues and reconfigure device. */ 1626 rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q; 1627 tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q; 1628 ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q); 1629 if (ret) { 1630 hns3_err(hw, "Fail to configure fake rx queues: %d", ret); 1631 return ret; 1632 } 1633 1634 ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); 1635 if (ret) { 1636 hns3_err(hw, "Fail to configure fake tx queues: %d", ret); 1637 goto cfg_fake_tx_q_fail; 1638 } 1639 1640 /* Allocate and set up fake RX queue per Ethernet port. */ 1641 port_id = hw->data->port_id; 1642 for (q = 0; q < rx_need_add_nb_q; q++) { 1643 ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC, 1644 rte_eth_dev_socket_id(port_id)); 1645 if (ret) 1646 goto setup_fake_rx_q_fail; 1647 } 1648 1649 /* Allocate and set up fake TX queue per Ethernet port. */ 1650 for (q = 0; q < tx_need_add_nb_q; q++) { 1651 ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC, 1652 rte_eth_dev_socket_id(port_id)); 1653 if (ret) 1654 goto setup_fake_tx_q_fail; 1655 } 1656 1657 return 0; 1658 1659 setup_fake_tx_q_fail: 1660 setup_fake_rx_q_fail: 1661 (void)hns3_fake_tx_queue_config(hw, 0); 1662 cfg_fake_tx_q_fail: 1663 (void)hns3_fake_rx_queue_config(hw, 0); 1664 1665 return ret; 1666 } 1667 1668 void 1669 hns3_dev_release_mbufs(struct hns3_adapter *hns) 1670 { 1671 struct rte_eth_dev_data *dev_data = hns->hw.data; 1672 struct hns3_rx_queue *rxq; 1673 struct hns3_tx_queue *txq; 1674 uint16_t i; 1675 1676 if (dev_data->rx_queues) 1677 for (i = 0; i < dev_data->nb_rx_queues; i++) { 1678 rxq = dev_data->rx_queues[i]; 1679 if (rxq == NULL) 1680 continue; 1681 hns3_rx_queue_release_mbufs(rxq); 1682 } 1683 1684 if (dev_data->tx_queues) 1685 for (i = 0; i < dev_data->nb_tx_queues; i++) { 1686 txq = dev_data->tx_queues[i]; 1687 if (txq == NULL) 1688 continue; 1689 hns3_tx_queue_release_mbufs(txq); 1690 } 1691 } 1692 1693 static int 1694 hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) 1695 { 1696 uint16_t vld_buf_size; 1697 uint16_t num_hw_specs; 1698 uint16_t i; 1699 1700 /* 1701 * hns3 network engine only support to set 4 typical specification, and 1702 * different buffer size will affect the max packet_len and the max 1703 * number of segmentation when hw gro is turned on in receive side. The 1704 * relationship between them is as follows: 1705 * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg 1706 * ---------------------|-------------------|---------------- 1707 * HNS3_4K_BD_BUF_SIZE | 60KB | 15 1708 * HNS3_2K_BD_BUF_SIZE | 62KB | 31 1709 * HNS3_1K_BD_BUF_SIZE | 63KB | 63 1710 * HNS3_512_BD_BUF_SIZE | 31.5KB | 63 1711 */ 1712 static const uint16_t hw_rx_buf_size[] = { 1713 HNS3_4K_BD_BUF_SIZE, 1714 HNS3_2K_BD_BUF_SIZE, 1715 HNS3_1K_BD_BUF_SIZE, 1716 HNS3_512_BD_BUF_SIZE 1717 }; 1718 1719 vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - 1720 RTE_PKTMBUF_HEADROOM); 1721 if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE) 1722 return -EINVAL; 1723 1724 num_hw_specs = RTE_DIM(hw_rx_buf_size); 1725 for (i = 0; i < num_hw_specs; i++) { 1726 if (vld_buf_size >= hw_rx_buf_size[i]) { 1727 *rx_buf_len = hw_rx_buf_size[i]; 1728 break; 1729 } 1730 } 1731 return 0; 1732 } 1733 1734 static int 1735 hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, 1736 uint16_t nb_desc) 1737 { 1738 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1739 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 1740 uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD; 1741 uint16_t min_vec_bds; 1742 1743 /* 1744 * HNS3 hardware network engine set scattered as default. If the driver 1745 * is not work in scattered mode and the pkts greater than buf_size 1746 * but smaller than frame size will be distributed to multiple BDs. 1747 * Driver cannot handle this situation. 1748 */ 1749 if (!hw->data->scattered_rx && frame_size > buf_size) { 1750 hns3_err(hw, "frame size is not allowed to be set greater " 1751 "than rx_buf_len if scattered is off."); 1752 return -EINVAL; 1753 } 1754 1755 if (pkt_burst == hns3_recv_pkts_vec || 1756 pkt_burst == hns3_recv_pkts_vec_sve) { 1757 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH + 1758 HNS3_DEFAULT_RX_BURST; 1759 if (nb_desc < min_vec_bds || 1760 nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) { 1761 hns3_err(hw, "if Rx burst mode is vector, " 1762 "number of descriptor is required to be " 1763 "bigger than min vector bds:%u, and could be " 1764 "divided by rxq rearm thresh:%u.", 1765 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH); 1766 return -EINVAL; 1767 } 1768 } 1769 return 0; 1770 } 1771 1772 static int 1773 hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, 1774 struct rte_mempool *mp, uint16_t nb_desc, 1775 uint16_t *buf_size) 1776 { 1777 int ret; 1778 1779 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || 1780 nb_desc % HNS3_ALIGN_RING_DESC) { 1781 hns3_err(hw, "Number (%u) of rx descriptors is invalid", 1782 nb_desc); 1783 return -EINVAL; 1784 } 1785 1786 if (conf->rx_free_thresh >= nb_desc) { 1787 hns3_err(hw, "rx_free_thresh (%u) must be less than %u", 1788 conf->rx_free_thresh, nb_desc); 1789 return -EINVAL; 1790 } 1791 1792 if (conf->rx_drop_en == 0) 1793 hns3_warn(hw, "if no descriptors available, packets are always " 1794 "dropped and rx_drop_en (1) is fixed on"); 1795 1796 if (hns3_rx_buf_len_calc(mp, buf_size)) { 1797 hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! " 1798 "minimal data room size (%u).", 1799 rte_pktmbuf_data_room_size(mp), 1800 HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM); 1801 return -EINVAL; 1802 } 1803 1804 if (hw->data->dev_started) { 1805 ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc); 1806 if (ret) { 1807 hns3_err(hw, "Rx queue runtime setup fail."); 1808 return ret; 1809 } 1810 } 1811 1812 return 0; 1813 } 1814 1815 uint32_t 1816 hns3_get_tqp_reg_offset(uint16_t queue_id) 1817 { 1818 uint32_t reg_offset; 1819 1820 /* Need an extend offset to config queue > 1024 */ 1821 if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID) 1822 reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE; 1823 else 1824 reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET + 1825 (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) * 1826 HNS3_TQP_REG_SIZE; 1827 1828 return reg_offset; 1829 } 1830 1831 int 1832 hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 1833 unsigned int socket_id, const struct rte_eth_rxconf *conf, 1834 struct rte_mempool *mp) 1835 { 1836 struct hns3_adapter *hns = dev->data->dev_private; 1837 struct hns3_hw *hw = &hns->hw; 1838 struct hns3_queue_info q_info; 1839 struct hns3_rx_queue *rxq; 1840 uint16_t rx_buf_size; 1841 int rx_entry_len; 1842 int ret; 1843 1844 ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size); 1845 if (ret) 1846 return ret; 1847 1848 if (dev->data->rx_queues[idx]) { 1849 hns3_rx_queue_release(dev->data->rx_queues[idx]); 1850 dev->data->rx_queues[idx] = NULL; 1851 } 1852 1853 q_info.idx = idx; 1854 q_info.socket_id = socket_id; 1855 q_info.nb_desc = nb_desc; 1856 q_info.type = "hns3 RX queue"; 1857 q_info.ring_name = "rx_ring"; 1858 1859 rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); 1860 if (rxq == NULL) { 1861 hns3_err(hw, 1862 "Failed to alloc mem and reserve DMA mem for rx ring!"); 1863 return -ENOMEM; 1864 } 1865 1866 rxq->hns = hns; 1867 rxq->ptype_tbl = &hns->ptype_tbl; 1868 rxq->mb_pool = mp; 1869 rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ? 1870 conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH; 1871 1872 rxq->rx_deferred_start = conf->rx_deferred_start; 1873 if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { 1874 hns3_warn(hw, "deferred start is not supported."); 1875 rxq->rx_deferred_start = false; 1876 } 1877 1878 rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) * 1879 sizeof(struct hns3_entry); 1880 rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len, 1881 RTE_CACHE_LINE_SIZE, socket_id); 1882 if (rxq->sw_ring == NULL) { 1883 hns3_err(hw, "Failed to allocate memory for rx sw ring!"); 1884 hns3_rx_queue_release(rxq); 1885 return -ENOMEM; 1886 } 1887 1888 rxq->next_to_use = 0; 1889 rxq->rx_free_hold = 0; 1890 rxq->rx_rearm_start = 0; 1891 rxq->rx_rearm_nb = 0; 1892 rxq->pkt_first_seg = NULL; 1893 rxq->pkt_last_seg = NULL; 1894 rxq->port_id = dev->data->port_id; 1895 /* 1896 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, 1897 * the pvid_sw_discard_en in the queue struct should not be changed, 1898 * because PVID-related operations do not need to be processed by PMD. 1899 * For hns3 VF device, whether it needs to process PVID depends 1900 * on the configuration of PF kernel mode netdevice driver. And the 1901 * related PF configuration is delivered through the mailbox and finally 1902 * reflected in port_base_vlan_cfg. 1903 */ 1904 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 1905 rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == 1906 HNS3_PORT_BASE_VLAN_ENABLE; 1907 else 1908 rxq->pvid_sw_discard_en = false; 1909 rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false; 1910 rxq->configured = true; 1911 rxq->io_base = (void *)((char *)hw->io_base + 1912 hns3_get_tqp_reg_offset(idx)); 1913 rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + 1914 HNS3_RING_RX_HEAD_REG); 1915 rxq->rx_buf_len = rx_buf_size; 1916 memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats)); 1917 memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats)); 1918 memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats)); 1919 1920 /* CRC len set here is used for amending packet length */ 1921 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 1922 rxq->crc_len = RTE_ETHER_CRC_LEN; 1923 else 1924 rxq->crc_len = 0; 1925 1926 rxq->bulk_mbuf_num = 0; 1927 1928 rte_spinlock_lock(&hw->lock); 1929 dev->data->rx_queues[idx] = rxq; 1930 rte_spinlock_unlock(&hw->lock); 1931 1932 return 0; 1933 } 1934 1935 void 1936 hns3_rx_scattered_reset(struct rte_eth_dev *dev) 1937 { 1938 struct hns3_adapter *hns = dev->data->dev_private; 1939 struct hns3_hw *hw = &hns->hw; 1940 1941 hw->rx_buf_len = 0; 1942 dev->data->scattered_rx = false; 1943 } 1944 1945 void 1946 hns3_rx_scattered_calc(struct rte_eth_dev *dev) 1947 { 1948 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1949 struct hns3_adapter *hns = dev->data->dev_private; 1950 struct hns3_hw *hw = &hns->hw; 1951 struct hns3_rx_queue *rxq; 1952 uint32_t queue_id; 1953 1954 if (dev->data->rx_queues == NULL) 1955 return; 1956 1957 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 1958 rxq = dev->data->rx_queues[queue_id]; 1959 if (hw->rx_buf_len == 0) 1960 hw->rx_buf_len = rxq->rx_buf_len; 1961 else 1962 hw->rx_buf_len = RTE_MIN(hw->rx_buf_len, 1963 rxq->rx_buf_len); 1964 } 1965 1966 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER || 1967 dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len) 1968 dev->data->scattered_rx = true; 1969 } 1970 1971 const uint32_t * 1972 hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1973 { 1974 static const uint32_t ptypes[] = { 1975 RTE_PTYPE_L2_ETHER, 1976 RTE_PTYPE_L2_ETHER_LLDP, 1977 RTE_PTYPE_L2_ETHER_ARP, 1978 RTE_PTYPE_L3_IPV4, 1979 RTE_PTYPE_L3_IPV4_EXT, 1980 RTE_PTYPE_L3_IPV6, 1981 RTE_PTYPE_L3_IPV6_EXT, 1982 RTE_PTYPE_L4_IGMP, 1983 RTE_PTYPE_L4_ICMP, 1984 RTE_PTYPE_L4_SCTP, 1985 RTE_PTYPE_L4_TCP, 1986 RTE_PTYPE_L4_UDP, 1987 RTE_PTYPE_TUNNEL_GRE, 1988 RTE_PTYPE_INNER_L2_ETHER, 1989 RTE_PTYPE_INNER_L3_IPV4, 1990 RTE_PTYPE_INNER_L3_IPV6, 1991 RTE_PTYPE_INNER_L3_IPV4_EXT, 1992 RTE_PTYPE_INNER_L3_IPV6_EXT, 1993 RTE_PTYPE_INNER_L4_UDP, 1994 RTE_PTYPE_INNER_L4_TCP, 1995 RTE_PTYPE_INNER_L4_SCTP, 1996 RTE_PTYPE_INNER_L4_ICMP, 1997 RTE_PTYPE_TUNNEL_GRENAT, 1998 RTE_PTYPE_TUNNEL_NVGRE, 1999 }; 2000 static const uint32_t adv_layout_ptypes[] = { 2001 RTE_PTYPE_L2_ETHER, 2002 RTE_PTYPE_L2_ETHER_TIMESYNC, 2003 RTE_PTYPE_L2_ETHER_LLDP, 2004 RTE_PTYPE_L2_ETHER_ARP, 2005 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2006 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2007 RTE_PTYPE_L4_FRAG, 2008 RTE_PTYPE_L4_NONFRAG, 2009 RTE_PTYPE_L4_UDP, 2010 RTE_PTYPE_L4_TCP, 2011 RTE_PTYPE_L4_SCTP, 2012 RTE_PTYPE_L4_IGMP, 2013 RTE_PTYPE_L4_ICMP, 2014 RTE_PTYPE_TUNNEL_GRE, 2015 RTE_PTYPE_TUNNEL_GRENAT, 2016 RTE_PTYPE_INNER_L2_ETHER, 2017 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2018 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2019 RTE_PTYPE_INNER_L4_FRAG, 2020 RTE_PTYPE_INNER_L4_ICMP, 2021 RTE_PTYPE_INNER_L4_NONFRAG, 2022 RTE_PTYPE_INNER_L4_UDP, 2023 RTE_PTYPE_INNER_L4_TCP, 2024 RTE_PTYPE_INNER_L4_SCTP, 2025 RTE_PTYPE_INNER_L4_ICMP, 2026 }; 2027 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2028 2029 if (dev->rx_pkt_burst == hns3_recv_pkts_simple || 2030 dev->rx_pkt_burst == hns3_recv_scattered_pkts || 2031 dev->rx_pkt_burst == hns3_recv_pkts_vec || 2032 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) { 2033 if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) { 2034 *no_of_elements = RTE_DIM(adv_layout_ptypes); 2035 return adv_layout_ptypes; 2036 } else { 2037 *no_of_elements = RTE_DIM(ptypes); 2038 return ptypes; 2039 } 2040 } 2041 2042 return NULL; 2043 } 2044 2045 static void 2046 hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) 2047 { 2048 tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 2049 tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 2050 tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP; 2051 tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT; 2052 tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; 2053 tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP; 2054 2055 tbl->l4table[0] = RTE_PTYPE_L4_UDP; 2056 tbl->l4table[1] = RTE_PTYPE_L4_TCP; 2057 tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE; 2058 tbl->l4table[3] = RTE_PTYPE_L4_SCTP; 2059 tbl->l4table[4] = RTE_PTYPE_L4_IGMP; 2060 tbl->l4table[5] = RTE_PTYPE_L4_ICMP; 2061 } 2062 2063 static void 2064 hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) 2065 { 2066 tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER | 2067 RTE_PTYPE_INNER_L3_IPV4; 2068 tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER | 2069 RTE_PTYPE_INNER_L3_IPV6; 2070 /* There is not a ptype for inner ARP/RARP */ 2071 tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN; 2072 tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN; 2073 tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER | 2074 RTE_PTYPE_INNER_L3_IPV4_EXT; 2075 tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER | 2076 RTE_PTYPE_INNER_L3_IPV6_EXT; 2077 2078 tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP; 2079 tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP; 2080 /* There is not a ptype for inner GRE */ 2081 tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN; 2082 tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP; 2083 /* There is not a ptype for inner IGMP */ 2084 tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN; 2085 tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP; 2086 2087 tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 2088 tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 2089 tbl->ol3table[2] = RTE_PTYPE_UNKNOWN; 2090 tbl->ol3table[3] = RTE_PTYPE_UNKNOWN; 2091 tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT; 2092 tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; 2093 2094 tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; 2095 tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_GRENAT; 2096 tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; 2097 } 2098 2099 static void 2100 hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl) 2101 { 2102 uint32_t *ptype = tbl->ptype; 2103 2104 /* Non-tunnel L2 */ 2105 ptype[1] = RTE_PTYPE_L2_ETHER_ARP; 2106 ptype[3] = RTE_PTYPE_L2_ETHER_LLDP; 2107 ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC; 2108 2109 /* Non-tunnel IPv4 */ 2110 ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2111 RTE_PTYPE_L4_FRAG; 2112 ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2113 RTE_PTYPE_L4_NONFRAG; 2114 ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2115 RTE_PTYPE_L4_UDP; 2116 ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2117 RTE_PTYPE_L4_TCP; 2118 ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2119 RTE_PTYPE_TUNNEL_GRE; 2120 ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2121 RTE_PTYPE_L4_SCTP; 2122 ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2123 RTE_PTYPE_L4_IGMP; 2124 ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2125 RTE_PTYPE_L4_ICMP; 2126 /* The next ptype is PTP over IPv4 + UDP */ 2127 ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2128 RTE_PTYPE_L4_UDP; 2129 2130 /* IPv4 --> GRE/Teredo/VXLAN */ 2131 ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2132 RTE_PTYPE_TUNNEL_GRENAT; 2133 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ 2134 ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2135 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER; 2136 2137 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 2138 ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2139 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2141 RTE_PTYPE_INNER_L4_FRAG; 2142 ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2143 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2145 RTE_PTYPE_INNER_L4_NONFRAG; 2146 ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2147 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2149 RTE_PTYPE_INNER_L4_UDP; 2150 ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2151 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2152 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2153 RTE_PTYPE_INNER_L4_TCP; 2154 ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2155 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2156 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2157 RTE_PTYPE_INNER_L4_SCTP; 2158 /* The next ptype's inner L4 is IGMP */ 2159 ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2160 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2161 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; 2162 ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2163 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2164 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2165 RTE_PTYPE_INNER_L4_ICMP; 2166 2167 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 2168 ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2169 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2170 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2171 RTE_PTYPE_INNER_L4_FRAG; 2172 ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2173 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2175 RTE_PTYPE_INNER_L4_NONFRAG; 2176 ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2177 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2178 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2179 RTE_PTYPE_INNER_L4_UDP; 2180 ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2181 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2182 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2183 RTE_PTYPE_INNER_L4_TCP; 2184 ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2185 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2187 RTE_PTYPE_INNER_L4_SCTP; 2188 /* The next ptype's inner L4 is IGMP */ 2189 ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2190 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2191 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; 2192 ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 2193 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2194 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2195 RTE_PTYPE_INNER_L4_ICMP; 2196 2197 /* Non-tunnel IPv6 */ 2198 ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2199 RTE_PTYPE_L4_FRAG; 2200 ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2201 RTE_PTYPE_L4_NONFRAG; 2202 ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2203 RTE_PTYPE_L4_UDP; 2204 ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2205 RTE_PTYPE_L4_TCP; 2206 ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2207 RTE_PTYPE_TUNNEL_GRE; 2208 ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2209 RTE_PTYPE_L4_SCTP; 2210 ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2211 RTE_PTYPE_L4_IGMP; 2212 ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2213 RTE_PTYPE_L4_ICMP; 2214 /* Special for PTP over IPv6 + UDP */ 2215 ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2216 RTE_PTYPE_L4_UDP; 2217 2218 /* IPv6 --> GRE/Teredo/VXLAN */ 2219 ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2220 RTE_PTYPE_TUNNEL_GRENAT; 2221 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ 2222 ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2223 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER; 2224 2225 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 2226 ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2227 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2228 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2229 RTE_PTYPE_INNER_L4_FRAG; 2230 ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2231 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2233 RTE_PTYPE_INNER_L4_NONFRAG; 2234 ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2235 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2236 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2237 RTE_PTYPE_INNER_L4_UDP; 2238 ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2239 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2240 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2241 RTE_PTYPE_INNER_L4_TCP; 2242 ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2243 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2244 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2245 RTE_PTYPE_INNER_L4_SCTP; 2246 /* The next ptype's inner L4 is IGMP */ 2247 ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2248 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2249 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; 2250 ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2251 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2252 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 2253 RTE_PTYPE_INNER_L4_ICMP; 2254 2255 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 2256 ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2257 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2258 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2259 RTE_PTYPE_INNER_L4_FRAG; 2260 ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2261 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2262 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2263 RTE_PTYPE_INNER_L4_NONFRAG; 2264 ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2265 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2267 RTE_PTYPE_INNER_L4_UDP; 2268 ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2269 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2270 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2271 RTE_PTYPE_INNER_L4_TCP; 2272 ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2273 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2275 RTE_PTYPE_INNER_L4_SCTP; 2276 /* The next ptype's inner L4 is IGMP */ 2277 ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2278 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; 2280 ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 2281 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 2282 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 2283 RTE_PTYPE_INNER_L4_ICMP; 2284 } 2285 2286 void 2287 hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) 2288 { 2289 struct hns3_adapter *hns = dev->data->dev_private; 2290 struct hns3_ptype_table *tbl = &hns->ptype_tbl; 2291 2292 memset(tbl, 0, sizeof(*tbl)); 2293 2294 hns3_init_non_tunnel_ptype_tbl(tbl); 2295 hns3_init_tunnel_ptype_tbl(tbl); 2296 hns3_init_adv_layout_ptype(tbl); 2297 } 2298 2299 static inline void 2300 hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, 2301 uint32_t l234_info, const struct hns3_desc *rxd) 2302 { 2303 #define HNS3_STRP_STATUS_NUM 0x4 2304 2305 #define HNS3_NO_STRP_VLAN_VLD 0x0 2306 #define HNS3_INNER_STRP_VLAN_VLD 0x1 2307 #define HNS3_OUTER_STRP_VLAN_VLD 0x2 2308 uint32_t strip_status; 2309 uint32_t report_mode; 2310 2311 /* 2312 * Since HW limitation, the vlan tag will always be inserted into RX 2313 * descriptor when strip the tag from packet, driver needs to determine 2314 * reporting which tag to mbuf according to the PVID configuration 2315 * and vlan striped status. 2316 */ 2317 static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = { 2318 { 2319 HNS3_NO_STRP_VLAN_VLD, 2320 HNS3_OUTER_STRP_VLAN_VLD, 2321 HNS3_INNER_STRP_VLAN_VLD, 2322 HNS3_OUTER_STRP_VLAN_VLD 2323 }, 2324 { 2325 HNS3_NO_STRP_VLAN_VLD, 2326 HNS3_NO_STRP_VLAN_VLD, 2327 HNS3_NO_STRP_VLAN_VLD, 2328 HNS3_INNER_STRP_VLAN_VLD 2329 } 2330 }; 2331 strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M, 2332 HNS3_RXD_STRP_TAGP_S); 2333 report_mode = report_type[rxq->pvid_sw_discard_en][strip_status]; 2334 switch (report_mode) { 2335 case HNS3_NO_STRP_VLAN_VLD: 2336 mb->vlan_tci = 0; 2337 return; 2338 case HNS3_INNER_STRP_VLAN_VLD: 2339 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 2340 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag); 2341 return; 2342 case HNS3_OUTER_STRP_VLAN_VLD: 2343 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 2344 mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag); 2345 return; 2346 default: 2347 mb->vlan_tci = 0; 2348 return; 2349 } 2350 } 2351 2352 static inline void 2353 recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg, 2354 struct rte_mbuf *rxm, struct hns3_rx_queue *rxq, 2355 uint16_t data_len) 2356 { 2357 uint8_t crc_len = rxq->crc_len; 2358 2359 if (data_len <= crc_len) { 2360 rte_pktmbuf_free_seg(rxm); 2361 first_seg->nb_segs--; 2362 last_seg->data_len = (uint16_t)(last_seg->data_len - 2363 (crc_len - data_len)); 2364 last_seg->next = NULL; 2365 } else 2366 rxm->data_len = (uint16_t)(data_len - crc_len); 2367 } 2368 2369 static inline struct rte_mbuf * 2370 hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) 2371 { 2372 int ret; 2373 2374 if (likely(rxq->bulk_mbuf_num > 0)) 2375 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num]; 2376 2377 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf, 2378 HNS3_BULK_ALLOC_MBUF_NUM); 2379 if (likely(ret == 0)) { 2380 rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM; 2381 return rxq->bulk_mbuf[--rxq->bulk_mbuf_num]; 2382 } else 2383 return rte_mbuf_raw_alloc(rxq->mb_pool); 2384 } 2385 2386 static void 2387 hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, 2388 uint64_t timestamp) 2389 { 2390 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); 2391 2392 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | 2393 RTE_MBUF_F_RX_IEEE1588_TMST; 2394 if (hns3_timestamp_rx_dynflag > 0) { 2395 *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, 2396 rte_mbuf_timestamp_t *) = timestamp; 2397 mbuf->ol_flags |= hns3_timestamp_rx_dynflag; 2398 } 2399 2400 pf->rx_timestamp = timestamp; 2401 } 2402 2403 uint16_t 2404 hns3_recv_pkts_simple(void *rx_queue, 2405 struct rte_mbuf **rx_pkts, 2406 uint16_t nb_pkts) 2407 { 2408 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */ 2409 volatile struct hns3_desc *rxdp; /* pointer of the current desc */ 2410 struct hns3_rx_queue *rxq; /* RX queue */ 2411 struct hns3_entry *sw_ring; 2412 struct hns3_entry *rxe; 2413 struct hns3_desc rxd; 2414 struct rte_mbuf *nmb; /* pointer of the new mbuf */ 2415 struct rte_mbuf *rxm; 2416 uint32_t bd_base_info; 2417 uint32_t l234_info; 2418 uint32_t ol_info; 2419 uint64_t dma_addr; 2420 uint16_t nb_rx_bd; 2421 uint16_t nb_rx; 2422 uint16_t rx_id; 2423 int ret; 2424 2425 nb_rx = 0; 2426 nb_rx_bd = 0; 2427 rxq = rx_queue; 2428 rx_ring = rxq->rx_ring; 2429 sw_ring = rxq->sw_ring; 2430 rx_id = rxq->next_to_use; 2431 2432 while (nb_rx < nb_pkts) { 2433 rxdp = &rx_ring[rx_id]; 2434 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); 2435 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 2436 break; 2437 2438 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - 2439 (1u << HNS3_RXD_VLD_B)]; 2440 2441 nmb = hns3_rx_alloc_buffer(rxq); 2442 if (unlikely(nmb == NULL)) { 2443 rte_eth_devices[rxq->port_id].data-> 2444 rx_mbuf_alloc_failed++; 2445 break; 2446 } 2447 2448 nb_rx_bd++; 2449 rxe = &sw_ring[rx_id]; 2450 rx_id++; 2451 if (unlikely(rx_id == rxq->nb_rx_desc)) 2452 rx_id = 0; 2453 2454 rte_prefetch0(sw_ring[rx_id].mbuf); 2455 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) { 2456 rte_prefetch0(&rx_ring[rx_id]); 2457 rte_prefetch0(&sw_ring[rx_id]); 2458 } 2459 2460 rxm = rxe->mbuf; 2461 rxm->ol_flags = 0; 2462 rxe->mbuf = nmb; 2463 2464 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) 2465 hns3_rx_ptp_timestamp_handle(rxq, rxm, 2466 rte_le_to_cpu_64(rxdp->timestamp)); 2467 2468 dma_addr = rte_mbuf_data_iova_default(nmb); 2469 rxdp->addr = rte_cpu_to_le_64(dma_addr); 2470 rxdp->rx.bd_base_info = 0; 2471 2472 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2473 rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) - 2474 rxq->crc_len; 2475 rxm->data_len = rxm->pkt_len; 2476 rxm->port = rxq->port_id; 2477 rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); 2478 rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 2479 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { 2480 rxm->hash.fdir.hi = 2481 rte_le_to_cpu_16(rxd.rx.fd_id); 2482 rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 2483 } 2484 rxm->nb_segs = 1; 2485 rxm->next = NULL; 2486 2487 /* Load remained descriptor data and extract necessary fields */ 2488 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); 2489 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); 2490 ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info); 2491 if (unlikely(ret)) 2492 goto pkt_err; 2493 2494 rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info); 2495 2496 if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) 2497 rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2498 2499 hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd); 2500 2501 /* Increment bytes counter */ 2502 rxq->basic_stats.bytes += rxm->pkt_len; 2503 2504 rx_pkts[nb_rx++] = rxm; 2505 continue; 2506 pkt_err: 2507 rte_pktmbuf_free(rxm); 2508 } 2509 2510 rxq->next_to_use = rx_id; 2511 rxq->rx_free_hold += nb_rx_bd; 2512 if (rxq->rx_free_hold > rxq->rx_free_thresh) { 2513 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold); 2514 rxq->rx_free_hold = 0; 2515 } 2516 2517 return nb_rx; 2518 } 2519 2520 uint16_t 2521 hns3_recv_scattered_pkts(void *rx_queue, 2522 struct rte_mbuf **rx_pkts, 2523 uint16_t nb_pkts) 2524 { 2525 volatile struct hns3_desc *rx_ring; /* RX ring (desc) */ 2526 volatile struct hns3_desc *rxdp; /* pointer of the current desc */ 2527 struct hns3_rx_queue *rxq; /* RX queue */ 2528 struct hns3_entry *sw_ring; 2529 struct hns3_entry *rxe; 2530 struct rte_mbuf *first_seg; 2531 struct rte_mbuf *last_seg; 2532 struct hns3_desc rxd; 2533 struct rte_mbuf *nmb; /* pointer of the new mbuf */ 2534 struct rte_mbuf *rxm; 2535 struct rte_eth_dev *dev; 2536 uint32_t bd_base_info; 2537 uint64_t timestamp; 2538 uint32_t l234_info; 2539 uint32_t gro_size; 2540 uint32_t ol_info; 2541 uint64_t dma_addr; 2542 uint16_t nb_rx_bd; 2543 uint16_t nb_rx; 2544 uint16_t rx_id; 2545 int ret; 2546 2547 nb_rx = 0; 2548 nb_rx_bd = 0; 2549 rxq = rx_queue; 2550 2551 rx_id = rxq->next_to_use; 2552 rx_ring = rxq->rx_ring; 2553 sw_ring = rxq->sw_ring; 2554 first_seg = rxq->pkt_first_seg; 2555 last_seg = rxq->pkt_last_seg; 2556 2557 while (nb_rx < nb_pkts) { 2558 rxdp = &rx_ring[rx_id]; 2559 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); 2560 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 2561 break; 2562 2563 /* 2564 * The interactive process between software and hardware of 2565 * receiving a new packet in hns3 network engine: 2566 * 1. Hardware network engine firstly writes the packet content 2567 * to the memory pointed by the 'addr' field of the Rx Buffer 2568 * Descriptor, secondly fills the result of parsing the 2569 * packet include the valid field into the Rx Buffer 2570 * Descriptor in one write operation. 2571 * 2. Driver reads the Rx BD's valid field in the loop to check 2572 * whether it's valid, if valid then assign a new address to 2573 * the addr field, clear the valid field, get the other 2574 * information of the packet by parsing Rx BD's other fields, 2575 * finally write back the number of Rx BDs processed by the 2576 * driver to the HNS3_RING_RX_HEAD_REG register to inform 2577 * hardware. 2578 * In the above process, the ordering is very important. We must 2579 * make sure that CPU read Rx BD's other fields only after the 2580 * Rx BD is valid. 2581 * 2582 * There are two type of re-ordering: compiler re-ordering and 2583 * CPU re-ordering under the ARMv8 architecture. 2584 * 1. we use volatile to deal with compiler re-ordering, so you 2585 * can see that rx_ring/rxdp defined with volatile. 2586 * 2. we commonly use memory barrier to deal with CPU 2587 * re-ordering, but the cost is high. 2588 * 2589 * In order to solve the high cost of using memory barrier, we 2590 * use the data dependency order under the ARMv8 architecture, 2591 * for example: 2592 * instr01: load A 2593 * instr02: load B <- A 2594 * the instr02 will always execute after instr01. 2595 * 2596 * To construct the data dependency ordering, we use the 2597 * following assignment: 2598 * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - 2599 * (1u<<HNS3_RXD_VLD_B)] 2600 * Using gcc compiler under the ARMv8 architecture, the related 2601 * assembly code example as follows: 2602 * note: (1u << HNS3_RXD_VLD_B) equal 0x10 2603 * instr01: ldr w26, [x22, #28] --read bd_base_info 2604 * instr02: and w0, w26, #0x10 --calc bd_base_info & 0x10 2605 * instr03: sub w0, w0, #0x10 --calc (bd_base_info & 2606 * 0x10) - 0x10 2607 * instr04: add x0, x22, x0, lsl #5 --calc copy source addr 2608 * instr05: ldp x2, x3, [x0] 2609 * instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B 2610 * instr07: ldp x4, x5, [x0, #16] 2611 * instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B 2612 * the instr05~08 depend on x0's value, x0 depent on w26's 2613 * value, the w26 is the bd_base_info, this form the data 2614 * dependency ordering. 2615 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) - 2616 * (1u<<HNS3_RXD_VLD_B) will always zero, so the 2617 * assignment is correct. 2618 * 2619 * So we use the data dependency ordering instead of memory 2620 * barrier to improve receive performance. 2621 */ 2622 rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - 2623 (1u << HNS3_RXD_VLD_B)]; 2624 RX_BD_LOG(&rxq->hns->hw, DEBUG, &rxd); 2625 2626 nmb = hns3_rx_alloc_buffer(rxq); 2627 if (unlikely(nmb == NULL)) { 2628 dev = &rte_eth_devices[rxq->port_id]; 2629 dev->data->rx_mbuf_alloc_failed++; 2630 break; 2631 } 2632 2633 nb_rx_bd++; 2634 rxe = &sw_ring[rx_id]; 2635 rx_id++; 2636 if (unlikely(rx_id == rxq->nb_rx_desc)) 2637 rx_id = 0; 2638 2639 rte_prefetch0(sw_ring[rx_id].mbuf); 2640 if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) { 2641 rte_prefetch0(&rx_ring[rx_id]); 2642 rte_prefetch0(&sw_ring[rx_id]); 2643 } 2644 2645 rxm = rxe->mbuf; 2646 rxe->mbuf = nmb; 2647 2648 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) 2649 timestamp = rte_le_to_cpu_64(rxdp->timestamp); 2650 2651 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2652 rxdp->rx.bd_base_info = 0; 2653 rxdp->addr = dma_addr; 2654 2655 if (first_seg == NULL) { 2656 first_seg = rxm; 2657 first_seg->nb_segs = 1; 2658 } else { 2659 first_seg->nb_segs++; 2660 last_seg->next = rxm; 2661 } 2662 2663 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2664 rxm->data_len = rte_le_to_cpu_16(rxd.rx.size); 2665 2666 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { 2667 last_seg = rxm; 2668 rxm->next = NULL; 2669 continue; 2670 } 2671 2672 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) 2673 hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); 2674 2675 /* 2676 * The last buffer of the received packet. packet len from 2677 * buffer description may contains CRC len, packet len should 2678 * subtract it, same as data len. 2679 */ 2680 first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len); 2681 2682 /* 2683 * This is the last buffer of the received packet. If the CRC 2684 * is not stripped by the hardware: 2685 * - Subtract the CRC length from the total packet length. 2686 * - If the last buffer only contains the whole CRC or a part 2687 * of it, free the mbuf associated to the last buffer. If part 2688 * of the CRC is also contained in the previous mbuf, subtract 2689 * the length of that CRC part from the data length of the 2690 * previous mbuf. 2691 */ 2692 rxm->next = NULL; 2693 if (unlikely(rxq->crc_len > 0)) { 2694 first_seg->pkt_len -= rxq->crc_len; 2695 recalculate_data_len(first_seg, last_seg, rxm, rxq, 2696 rxm->data_len); 2697 } 2698 2699 first_seg->port = rxq->port_id; 2700 first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); 2701 first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; 2702 if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { 2703 first_seg->hash.fdir.hi = 2704 rte_le_to_cpu_16(rxd.rx.fd_id); 2705 first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 2706 } 2707 2708 gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, 2709 HNS3_RXD_GRO_SIZE_S); 2710 if (gro_size != 0) { 2711 first_seg->ol_flags |= RTE_MBUF_F_RX_LRO; 2712 first_seg->tso_segsz = gro_size; 2713 } 2714 2715 l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); 2716 ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); 2717 ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info, 2718 l234_info); 2719 if (unlikely(ret)) 2720 goto pkt_err; 2721 2722 first_seg->packet_type = hns3_rx_calc_ptype(rxq, 2723 l234_info, ol_info); 2724 2725 if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) 2726 rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2727 2728 hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd); 2729 2730 /* Increment bytes counter */ 2731 rxq->basic_stats.bytes += first_seg->pkt_len; 2732 2733 rx_pkts[nb_rx++] = first_seg; 2734 first_seg = NULL; 2735 continue; 2736 pkt_err: 2737 rte_pktmbuf_free(first_seg); 2738 first_seg = NULL; 2739 } 2740 2741 rxq->next_to_use = rx_id; 2742 rxq->pkt_first_seg = first_seg; 2743 rxq->pkt_last_seg = last_seg; 2744 2745 rxq->rx_free_hold += nb_rx_bd; 2746 if (rxq->rx_free_hold > rxq->rx_free_thresh) { 2747 hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold); 2748 rxq->rx_free_hold = 0; 2749 } 2750 2751 return nb_rx; 2752 } 2753 2754 void __rte_weak 2755 hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq) 2756 { 2757 } 2758 2759 int __rte_weak 2760 hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev) 2761 { 2762 return -ENOTSUP; 2763 } 2764 2765 uint16_t __rte_weak 2766 hns3_recv_pkts_vec(__rte_unused void *rx_queue, 2767 __rte_unused struct rte_mbuf **rx_pkts, 2768 __rte_unused uint16_t nb_pkts) 2769 { 2770 return 0; 2771 } 2772 2773 uint16_t __rte_weak 2774 hns3_recv_pkts_vec_sve(__rte_unused void *rx_queue, 2775 __rte_unused struct rte_mbuf **rx_pkts, 2776 __rte_unused uint16_t nb_pkts) 2777 { 2778 return 0; 2779 } 2780 2781 int 2782 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2783 struct rte_eth_burst_mode *mode) 2784 { 2785 static const struct { 2786 eth_rx_burst_t pkt_burst; 2787 const char *info; 2788 } burst_infos[] = { 2789 { hns3_recv_pkts_simple, "Scalar Simple" }, 2790 { hns3_recv_scattered_pkts, "Scalar Scattered" }, 2791 { hns3_recv_pkts_vec, "Vector Neon" }, 2792 { hns3_recv_pkts_vec_sve, "Vector Sve" }, 2793 { rte_eth_pkt_burst_dummy, "Dummy" }, 2794 }; 2795 2796 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2797 int ret = -EINVAL; 2798 unsigned int i; 2799 2800 for (i = 0; i < RTE_DIM(burst_infos); i++) { 2801 if (pkt_burst == burst_infos[i].pkt_burst) { 2802 snprintf(mode->info, sizeof(mode->info), "%s", 2803 burst_infos[i].info); 2804 ret = 0; 2805 break; 2806 } 2807 } 2808 2809 return ret; 2810 } 2811 2812 static bool 2813 hns3_get_default_vec_support(void) 2814 { 2815 #if defined(RTE_ARCH_ARM64) 2816 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 2817 return false; 2818 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) 2819 return true; 2820 #endif 2821 return false; 2822 } 2823 2824 static bool 2825 hns3_get_sve_support(void) 2826 { 2827 #if defined(RTE_HAS_SVE_ACLE) 2828 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256) 2829 return false; 2830 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) 2831 return true; 2832 #endif 2833 return false; 2834 } 2835 2836 static eth_rx_burst_t 2837 hns3_get_rx_function(struct rte_eth_dev *dev) 2838 { 2839 struct hns3_adapter *hns = dev->data->dev_private; 2840 uint64_t offloads = dev->data->dev_conf.rxmode.offloads; 2841 bool vec_allowed, sve_allowed, simple_allowed; 2842 bool vec_support; 2843 2844 vec_support = hns3_rx_check_vec_support(dev) == 0; 2845 vec_allowed = vec_support && hns3_get_default_vec_support(); 2846 sve_allowed = vec_support && hns3_get_sve_support(); 2847 simple_allowed = !dev->data->scattered_rx && 2848 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0; 2849 2850 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) 2851 return hns3_recv_pkts_vec; 2852 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed) 2853 return hns3_recv_pkts_vec_sve; 2854 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) 2855 return hns3_recv_pkts_simple; 2856 if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON) 2857 return hns3_recv_scattered_pkts; 2858 2859 if (vec_allowed) 2860 return hns3_recv_pkts_vec; 2861 if (simple_allowed) 2862 return hns3_recv_pkts_simple; 2863 2864 return hns3_recv_scattered_pkts; 2865 } 2866 2867 static int 2868 hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf, 2869 uint16_t nb_desc, uint16_t *tx_rs_thresh, 2870 uint16_t *tx_free_thresh, uint16_t idx) 2871 { 2872 #define HNS3_TX_RS_FREE_THRESH_GAP 8 2873 uint16_t rs_thresh, free_thresh, fast_free_thresh; 2874 2875 if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || 2876 nb_desc % HNS3_ALIGN_RING_DESC) { 2877 hns3_err(hw, "number (%u) of tx descriptors is invalid", 2878 nb_desc); 2879 return -EINVAL; 2880 } 2881 2882 rs_thresh = (conf->tx_rs_thresh > 0) ? 2883 conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH; 2884 free_thresh = (conf->tx_free_thresh > 0) ? 2885 conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH; 2886 if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh || 2887 rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP || 2888 free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) { 2889 hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc " 2890 "(%u) of tx descriptors for port=%u queue=%u check " 2891 "fail!", 2892 rs_thresh, free_thresh, nb_desc, hw->data->port_id, 2893 idx); 2894 return -EINVAL; 2895 } 2896 2897 if (conf->tx_free_thresh == 0) { 2898 /* Fast free Tx memory buffer to improve cache hit rate */ 2899 fast_free_thresh = nb_desc - rs_thresh; 2900 if (fast_free_thresh >= 2901 HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH) 2902 free_thresh = fast_free_thresh - 2903 HNS3_TX_FAST_FREE_AHEAD; 2904 } 2905 2906 *tx_rs_thresh = rs_thresh; 2907 *tx_free_thresh = free_thresh; 2908 return 0; 2909 } 2910 2911 static void * 2912 hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id) 2913 { 2914 #define HNS3_TX_PUSH_TQP_REGION_SIZE 0x10000 2915 #define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET 64 2916 #define HNS3_TX_PUSH_PCI_BAR_INDEX 4 2917 2918 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 2919 uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX; 2920 2921 /* 2922 * If device support Tx push then its PCIe bar45 must exist, and DPDK 2923 * framework will mmap the bar45 default in PCI probe stage. 2924 * 2925 * In the bar45, the first half is for RoCE (RDMA over Converged 2926 * Ethernet), and the second half is for NIC, every TQP occupy 64KB. 2927 * 2928 * The quick doorbell located at 64B offset in the TQP region. 2929 */ 2930 return (char *)pci_dev->mem_resource[bar_id].addr + 2931 (pci_dev->mem_resource[bar_id].len >> 1) + 2932 HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id + 2933 HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET; 2934 } 2935 2936 void 2937 hns3_tx_push_init(struct rte_eth_dev *dev) 2938 { 2939 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2940 volatile uint32_t *reg; 2941 uint32_t val; 2942 2943 if (!hns3_dev_get_support(hw, TX_PUSH)) 2944 return; 2945 2946 reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0); 2947 /* 2948 * Because the size of bar45 is about 8GB size, it may take a long time 2949 * to do the page fault in Tx process when work with vfio-pci, so use 2950 * one read operation to make kernel setup page table mapping for bar45 2951 * in the init stage. 2952 * Note: the bar45 is readable but the result is all 1. 2953 */ 2954 val = *reg; 2955 RTE_SET_USED(val); 2956 } 2957 2958 static void 2959 hns3_tx_push_queue_init(struct rte_eth_dev *dev, 2960 uint16_t queue_id, 2961 struct hns3_tx_queue *txq) 2962 { 2963 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2964 if (!hns3_dev_get_support(hw, TX_PUSH)) { 2965 txq->tx_push_enable = false; 2966 return; 2967 } 2968 2969 txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev, 2970 queue_id); 2971 txq->tx_push_enable = true; 2972 } 2973 2974 int 2975 hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 2976 unsigned int socket_id, const struct rte_eth_txconf *conf) 2977 { 2978 struct hns3_adapter *hns = dev->data->dev_private; 2979 uint16_t tx_rs_thresh, tx_free_thresh; 2980 struct hns3_hw *hw = &hns->hw; 2981 struct hns3_queue_info q_info; 2982 struct hns3_tx_queue *txq; 2983 int tx_entry_len; 2984 int ret; 2985 2986 ret = hns3_tx_queue_conf_check(hw, conf, nb_desc, 2987 &tx_rs_thresh, &tx_free_thresh, idx); 2988 if (ret) 2989 return ret; 2990 2991 if (dev->data->tx_queues[idx] != NULL) { 2992 hns3_tx_queue_release(dev->data->tx_queues[idx]); 2993 dev->data->tx_queues[idx] = NULL; 2994 } 2995 2996 q_info.idx = idx; 2997 q_info.socket_id = socket_id; 2998 q_info.nb_desc = nb_desc; 2999 q_info.type = "hns3 TX queue"; 3000 q_info.ring_name = "tx_ring"; 3001 txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); 3002 if (txq == NULL) { 3003 hns3_err(hw, 3004 "Failed to alloc mem and reserve DMA mem for tx ring!"); 3005 return -ENOMEM; 3006 } 3007 3008 txq->tx_deferred_start = conf->tx_deferred_start; 3009 if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { 3010 hns3_warn(hw, "deferred start is not supported."); 3011 txq->tx_deferred_start = false; 3012 } 3013 3014 tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc; 3015 txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len, 3016 RTE_CACHE_LINE_SIZE, socket_id); 3017 if (txq->sw_ring == NULL) { 3018 hns3_err(hw, "Failed to allocate memory for tx sw ring!"); 3019 hns3_tx_queue_release(txq); 3020 return -ENOMEM; 3021 } 3022 3023 txq->hns = hns; 3024 txq->next_to_use = 0; 3025 txq->next_to_clean = 0; 3026 txq->tx_bd_ready = txq->nb_tx_desc - 1; 3027 txq->tx_free_thresh = tx_free_thresh; 3028 txq->tx_rs_thresh = tx_rs_thresh; 3029 txq->free = rte_zmalloc_socket("hns3 TX mbuf free array", 3030 sizeof(struct rte_mbuf *) * txq->tx_rs_thresh, 3031 RTE_CACHE_LINE_SIZE, socket_id); 3032 if (!txq->free) { 3033 hns3_err(hw, "failed to allocate tx mbuf free array!"); 3034 hns3_tx_queue_release(txq); 3035 return -ENOMEM; 3036 } 3037 3038 txq->port_id = dev->data->port_id; 3039 /* 3040 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, 3041 * the pvid_sw_shift_en in the queue struct should not be changed, 3042 * because PVID-related operations do not need to be processed by PMD. 3043 * For hns3 VF device, whether it needs to process PVID depends 3044 * on the configuration of PF kernel mode netdev driver. And the 3045 * related PF configuration is delivered through the mailbox and finally 3046 * reflected in port_base_vlan_cfg. 3047 */ 3048 if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) 3049 txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == 3050 HNS3_PORT_BASE_VLAN_ENABLE; 3051 else 3052 txq->pvid_sw_shift_en = false; 3053 3054 if (hns3_dev_get_support(hw, SIMPLE_BD)) 3055 txq->simple_bd_enable = true; 3056 3057 txq->max_non_tso_bd_num = hw->max_non_tso_bd_num; 3058 txq->configured = true; 3059 txq->io_base = (void *)((char *)hw->io_base + 3060 hns3_get_tqp_reg_offset(idx)); 3061 txq->io_tail_reg = (volatile void *)((char *)txq->io_base + 3062 HNS3_RING_TX_TAIL_REG); 3063 txq->min_tx_pkt_len = hw->min_tx_pkt_len; 3064 txq->tso_mode = hw->tso_mode; 3065 txq->udp_cksum_mode = hw->udp_cksum_mode; 3066 txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads & 3067 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE); 3068 memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats)); 3069 memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); 3070 3071 /* 3072 * Call hns3_tx_push_queue_init after assigned io_tail_reg field because 3073 * it may overwrite the io_tail_reg field. 3074 */ 3075 hns3_tx_push_queue_init(dev, idx, txq); 3076 3077 rte_spinlock_lock(&hw->lock); 3078 dev->data->tx_queues[idx] = txq; 3079 rte_spinlock_unlock(&hw->lock); 3080 3081 return 0; 3082 } 3083 3084 static void 3085 hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) 3086 { 3087 uint16_t tx_next_clean = txq->next_to_clean; 3088 uint16_t tx_next_use = txq->next_to_use; 3089 uint16_t tx_bd_ready = txq->tx_bd_ready; 3090 uint16_t tx_bd_max = txq->nb_tx_desc; 3091 struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean]; 3092 struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; 3093 struct rte_mbuf *mbuf; 3094 3095 while ((!(desc->tx.tp_fe_sc_vld_ra_ri & 3096 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) && 3097 tx_next_use != tx_next_clean) { 3098 mbuf = tx_bak_pkt->mbuf; 3099 if (mbuf) { 3100 rte_pktmbuf_free_seg(mbuf); 3101 tx_bak_pkt->mbuf = NULL; 3102 } 3103 3104 desc++; 3105 tx_bak_pkt++; 3106 tx_next_clean++; 3107 tx_bd_ready++; 3108 3109 if (tx_next_clean >= tx_bd_max) { 3110 tx_next_clean = 0; 3111 desc = txq->tx_ring; 3112 tx_bak_pkt = txq->sw_ring; 3113 } 3114 } 3115 3116 txq->next_to_clean = tx_next_clean; 3117 txq->tx_bd_ready = tx_bd_ready; 3118 } 3119 3120 int 3121 hns3_config_gro(struct hns3_hw *hw, bool en) 3122 { 3123 struct hns3_cfg_gro_status_cmd *req; 3124 struct hns3_cmd_desc desc; 3125 int ret; 3126 3127 if (!hns3_dev_get_support(hw, GRO)) 3128 return 0; 3129 3130 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); 3131 req = (struct hns3_cfg_gro_status_cmd *)desc.data; 3132 3133 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0); 3134 3135 ret = hns3_cmd_send(hw, &desc, 1); 3136 if (ret) 3137 hns3_err(hw, "%s hardware GRO failed, ret = %d", 3138 en ? "enable" : "disable", ret); 3139 3140 return ret; 3141 } 3142 3143 int 3144 hns3_restore_gro_conf(struct hns3_hw *hw) 3145 { 3146 uint64_t offloads; 3147 bool gro_en; 3148 int ret; 3149 3150 offloads = hw->data->dev_conf.rxmode.offloads; 3151 gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; 3152 ret = hns3_config_gro(hw, gro_en); 3153 if (ret) 3154 hns3_err(hw, "restore hardware GRO to %s failed, ret = %d", 3155 gro_en ? "enabled" : "disabled", ret); 3156 3157 return ret; 3158 } 3159 3160 static inline bool 3161 hns3_pkt_is_tso(struct rte_mbuf *m) 3162 { 3163 return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG); 3164 } 3165 3166 static void 3167 hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm) 3168 { 3169 if (!hns3_pkt_is_tso(rxm)) 3170 return; 3171 3172 if (paylen <= rxm->tso_segsz) 3173 return; 3174 3175 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B)); 3176 desc->tx.ckst_mss |= rte_cpu_to_le_16(rxm->tso_segsz); 3177 } 3178 3179 static inline void 3180 hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm) 3181 { 3182 desc->addr = rte_mbuf_data_iova(rxm); 3183 desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm)); 3184 desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)); 3185 } 3186 3187 static void 3188 hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, 3189 struct rte_mbuf *rxm) 3190 { 3191 uint64_t ol_flags = rxm->ol_flags; 3192 uint32_t hdr_len; 3193 uint32_t paylen; 3194 3195 hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len; 3196 hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 3197 rxm->outer_l2_len + rxm->outer_l3_len : 0; 3198 paylen = rxm->pkt_len - hdr_len; 3199 desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen); 3200 hns3_set_tso(desc, paylen, rxm); 3201 3202 /* 3203 * Currently, hardware doesn't support more than two layers VLAN offload 3204 * in Tx direction based on hns3 network engine. So when the number of 3205 * VLANs in the packets represented by rxm plus the number of VLAN 3206 * offload by hardware such as PVID etc, exceeds two, the packets will 3207 * be discarded or the original VLAN of the packets will be overwritten 3208 * by hardware. When the PF PVID is enabled by calling the API function 3209 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3 3210 * PF kernel ether driver, the outer VLAN tag will always be the PVID. 3211 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should 3212 * be added to the position close to the IP header when PVID is enabled. 3213 */ 3214 if (!txq->pvid_sw_shift_en && 3215 ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 3216 desc->tx.ol_type_vlan_len_msec |= 3217 rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B)); 3218 if (ol_flags & RTE_MBUF_F_TX_QINQ) 3219 desc->tx.outer_vlan_tag = 3220 rte_cpu_to_le_16(rxm->vlan_tci_outer); 3221 else 3222 desc->tx.outer_vlan_tag = 3223 rte_cpu_to_le_16(rxm->vlan_tci); 3224 } 3225 3226 if (ol_flags & RTE_MBUF_F_TX_QINQ || 3227 ((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) { 3228 desc->tx.type_cs_vlan_tso_len |= 3229 rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B)); 3230 desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci); 3231 } 3232 3233 if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 3234 desc->tx.tp_fe_sc_vld_ra_ri |= 3235 rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B)); 3236 } 3237 3238 static inline int 3239 hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf, 3240 struct rte_mbuf **alloc_mbuf) 3241 { 3242 #define MAX_NON_TSO_BD_PER_PKT 18 3243 struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT]; 3244 uint16_t i; 3245 3246 /* Allocate enough mbufs */ 3247 if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf)) 3248 return -ENOMEM; 3249 3250 for (i = 0; i < nb_new_buf - 1; i++) 3251 pkt_segs[i]->next = pkt_segs[i + 1]; 3252 3253 pkt_segs[nb_new_buf - 1]->next = NULL; 3254 pkt_segs[0]->nb_segs = nb_new_buf; 3255 *alloc_mbuf = pkt_segs[0]; 3256 3257 return 0; 3258 } 3259 3260 static inline void 3261 hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt) 3262 { 3263 new_pkt->ol_flags = old_pkt->ol_flags; 3264 new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt); 3265 new_pkt->outer_l2_len = old_pkt->outer_l2_len; 3266 new_pkt->outer_l3_len = old_pkt->outer_l3_len; 3267 new_pkt->l2_len = old_pkt->l2_len; 3268 new_pkt->l3_len = old_pkt->l3_len; 3269 new_pkt->l4_len = old_pkt->l4_len; 3270 new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer; 3271 new_pkt->vlan_tci = old_pkt->vlan_tci; 3272 } 3273 3274 static int 3275 hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt, 3276 uint8_t max_non_tso_bd_num) 3277 { 3278 struct rte_mempool *mb_pool; 3279 struct rte_mbuf *new_mbuf; 3280 struct rte_mbuf *temp_new; 3281 struct rte_mbuf *temp; 3282 uint16_t last_buf_len; 3283 uint16_t nb_new_buf; 3284 uint16_t buf_size; 3285 uint16_t buf_len; 3286 uint16_t len_s; 3287 uint16_t len_d; 3288 uint16_t len; 3289 int ret; 3290 char *s; 3291 char *d; 3292 3293 mb_pool = tx_pkt->pool; 3294 buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM; 3295 nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1; 3296 if (nb_new_buf > max_non_tso_bd_num) 3297 return -EINVAL; 3298 3299 last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size; 3300 if (last_buf_len == 0) 3301 last_buf_len = buf_size; 3302 3303 /* Allocate enough mbufs */ 3304 ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf); 3305 if (ret) 3306 return ret; 3307 3308 /* Copy the original packet content to the new mbufs */ 3309 temp = tx_pkt; 3310 s = rte_pktmbuf_mtod(temp, char *); 3311 len_s = rte_pktmbuf_data_len(temp); 3312 temp_new = new_mbuf; 3313 while (temp != NULL && temp_new != NULL) { 3314 d = rte_pktmbuf_mtod(temp_new, char *); 3315 buf_len = temp_new->next == NULL ? last_buf_len : buf_size; 3316 len_d = buf_len; 3317 3318 while (len_d) { 3319 len = RTE_MIN(len_s, len_d); 3320 memcpy(d, s, len); 3321 s = s + len; 3322 d = d + len; 3323 len_d = len_d - len; 3324 len_s = len_s - len; 3325 3326 if (len_s == 0) { 3327 temp = temp->next; 3328 if (temp == NULL) 3329 break; 3330 s = rte_pktmbuf_mtod(temp, char *); 3331 len_s = rte_pktmbuf_data_len(temp); 3332 } 3333 } 3334 3335 temp_new->data_len = buf_len; 3336 temp_new = temp_new->next; 3337 } 3338 hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt); 3339 3340 /* free original mbufs */ 3341 rte_pktmbuf_free(tx_pkt); 3342 3343 *new_pkt = new_mbuf; 3344 3345 return 0; 3346 } 3347 3348 static void 3349 hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec) 3350 { 3351 uint32_t tmp = *ol_type_vlan_len_msec; 3352 uint64_t ol_flags = m->ol_flags; 3353 3354 /* (outer) IP header type */ 3355 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { 3356 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) 3357 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, 3358 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); 3359 else 3360 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, 3361 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); 3362 } else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) { 3363 tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 3364 HNS3_OL3T_IPV6); 3365 } 3366 /* OL3 header size, defined in 4 bytes */ 3367 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, 3368 m->outer_l3_len >> HNS3_L3_LEN_UNIT); 3369 *ol_type_vlan_len_msec = tmp; 3370 } 3371 3372 static int 3373 hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, 3374 uint32_t *type_cs_vlan_tso_len) 3375 { 3376 #define HNS3_NVGRE_HLEN 8 3377 uint32_t tmp_outer = *ol_type_vlan_len_msec; 3378 uint32_t tmp_inner = *type_cs_vlan_tso_len; 3379 uint64_t ol_flags = m->ol_flags; 3380 uint16_t inner_l2_len; 3381 3382 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 3383 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: 3384 case RTE_MBUF_F_TX_TUNNEL_GENEVE: 3385 case RTE_MBUF_F_TX_TUNNEL_VXLAN: 3386 /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */ 3387 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, 3388 HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP); 3389 /* 3390 * The inner l2 length of mbuf is the sum of outer l4 length, 3391 * tunneling header length and inner l2 length for a tunnel 3392 * packet. But in hns3 tx descriptor, the tunneling header 3393 * length is contained in the field of outer L4 length. 3394 * Therefore, driver need to calculate the outer L4 length and 3395 * inner L2 length. 3396 */ 3397 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, 3398 HNS3_TXD_L4LEN_S, 3399 (uint8_t)RTE_ETHER_VXLAN_HLEN >> 3400 HNS3_L4_LEN_UNIT); 3401 3402 inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN; 3403 break; 3404 case RTE_MBUF_F_TX_TUNNEL_GRE: 3405 tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, 3406 HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); 3407 /* 3408 * For NVGRE tunnel packet, the outer L4 is empty. So only 3409 * fill the NVGRE header length to the outer L4 field. 3410 */ 3411 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, 3412 HNS3_TXD_L4LEN_S, 3413 (uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT); 3414 3415 inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN; 3416 break; 3417 default: 3418 /* For non UDP / GRE tunneling, drop the tunnel packet */ 3419 return -EINVAL; 3420 } 3421 3422 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, 3423 inner_l2_len >> HNS3_L2_LEN_UNIT); 3424 /* OL2 header size, defined in 2 bytes */ 3425 tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, 3426 m->outer_l2_len >> HNS3_L2_LEN_UNIT); 3427 3428 *type_cs_vlan_tso_len = tmp_inner; 3429 *ol_type_vlan_len_msec = tmp_outer; 3430 3431 return 0; 3432 } 3433 3434 static int 3435 hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, 3436 uint16_t tx_desc_id) 3437 { 3438 struct hns3_desc *tx_ring = txq->tx_ring; 3439 struct hns3_desc *desc = &tx_ring[tx_desc_id]; 3440 uint64_t ol_flags = m->ol_flags; 3441 uint32_t tmp_outer = 0; 3442 uint32_t tmp_inner = 0; 3443 uint32_t tmp_ol4cs; 3444 int ret; 3445 3446 /* 3447 * The tunnel header is contained in the inner L2 header field of the 3448 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, 3449 * there is a need that switching between them. To avoid multiple 3450 * calculations, the length of the L2 header include the outer and 3451 * inner, will be filled during the parsing of tunnel packets. 3452 */ 3453 if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) { 3454 /* 3455 * For non tunnel type the tunnel type id is 0, so no need to 3456 * assign a value to it. Only the inner(normal) L2 header length 3457 * is assigned. 3458 */ 3459 tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, 3460 HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT); 3461 } else { 3462 /* 3463 * If outer csum is not offload, the outer length may be filled 3464 * with 0. And the length of the outer header is added to the 3465 * inner l2_len. It would lead a cksum error. So driver has to 3466 * calculate the header length. 3467 */ 3468 if (unlikely(!(ol_flags & 3469 (RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 3470 m->outer_l2_len == 0)) { 3471 struct rte_net_hdr_lens hdr_len; 3472 (void)rte_net_get_ptype(m, &hdr_len, 3473 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); 3474 m->outer_l3_len = hdr_len.l3_len; 3475 m->outer_l2_len = hdr_len.l2_len; 3476 m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len; 3477 } 3478 hns3_parse_outer_params(m, &tmp_outer); 3479 ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner); 3480 if (ret) 3481 return -EINVAL; 3482 } 3483 3484 desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer); 3485 desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner); 3486 tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ? 3487 BIT(HNS3_TXD_OL4CS_B) : 0; 3488 desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs); 3489 3490 return 0; 3491 } 3492 3493 static void 3494 hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) 3495 { 3496 uint64_t ol_flags = m->ol_flags; 3497 uint32_t l3_type; 3498 uint32_t tmp; 3499 3500 tmp = *type_cs_vlan_tso_len; 3501 if (ol_flags & RTE_MBUF_F_TX_IPV4) 3502 l3_type = HNS3_L3T_IPV4; 3503 else if (ol_flags & RTE_MBUF_F_TX_IPV6) 3504 l3_type = HNS3_L3T_IPV6; 3505 else 3506 l3_type = HNS3_L3T_NONE; 3507 3508 /* inner(/normal) L3 header size, defined in 4 bytes */ 3509 tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, 3510 m->l3_len >> HNS3_L3_LEN_UNIT); 3511 3512 tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type); 3513 3514 /* Enable L3 checksum offloads */ 3515 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 3516 tmp |= BIT(HNS3_TXD_L3CS_B); 3517 *type_cs_vlan_tso_len = tmp; 3518 } 3519 3520 static void 3521 hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) 3522 { 3523 uint64_t ol_flags = m->ol_flags; 3524 uint32_t tmp; 3525 /* Enable L4 checksum offloads */ 3526 switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) { 3527 case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG: 3528 case RTE_MBUF_F_TX_TCP_CKSUM: 3529 case RTE_MBUF_F_TX_TCP_SEG: 3530 tmp = *type_cs_vlan_tso_len; 3531 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, 3532 HNS3_L4T_TCP); 3533 break; 3534 case RTE_MBUF_F_TX_UDP_CKSUM: 3535 tmp = *type_cs_vlan_tso_len; 3536 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, 3537 HNS3_L4T_UDP); 3538 break; 3539 case RTE_MBUF_F_TX_SCTP_CKSUM: 3540 tmp = *type_cs_vlan_tso_len; 3541 tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, 3542 HNS3_L4T_SCTP); 3543 break; 3544 default: 3545 return; 3546 } 3547 tmp |= BIT(HNS3_TXD_L4CS_B); 3548 tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, 3549 m->l4_len >> HNS3_L4_LEN_UNIT); 3550 *type_cs_vlan_tso_len = tmp; 3551 } 3552 3553 static void 3554 hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m, 3555 uint16_t tx_desc_id) 3556 { 3557 struct hns3_desc *tx_ring = txq->tx_ring; 3558 struct hns3_desc *desc = &tx_ring[tx_desc_id]; 3559 uint32_t value = 0; 3560 3561 hns3_parse_l3_cksum_params(m, &value); 3562 hns3_parse_l4_cksum_params(m, &value); 3563 3564 desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value); 3565 } 3566 3567 static bool 3568 hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, 3569 uint32_t max_non_tso_bd_num) 3570 { 3571 struct rte_mbuf *m_first = tx_pkts; 3572 struct rte_mbuf *m_last = tx_pkts; 3573 uint32_t tot_len = 0; 3574 uint32_t hdr_len; 3575 uint32_t i; 3576 3577 /* 3578 * Hardware requires that the sum of the data length of every 8 3579 * consecutive buffers is greater than MSS in hns3 network engine. 3580 * We simplify it by ensuring pkt_headlen + the first 8 consecutive 3581 * frags greater than gso header len + mss, and the remaining 7 3582 * consecutive frags greater than MSS except the last 7 frags. 3583 */ 3584 if (bd_num <= max_non_tso_bd_num) 3585 return false; 3586 3587 for (i = 0; m_last && i < max_non_tso_bd_num - 1; 3588 i++, m_last = m_last->next) 3589 tot_len += m_last->data_len; 3590 3591 if (!m_last) 3592 return true; 3593 3594 /* ensure the first 8 frags is greater than mss + header */ 3595 hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len; 3596 hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 3597 tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0; 3598 if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len) 3599 return true; 3600 3601 /* 3602 * ensure the sum of the data length of every 7 consecutive buffer 3603 * is greater than mss except the last one. 3604 */ 3605 for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) { 3606 tot_len -= m_first->data_len; 3607 tot_len += m_last->data_len; 3608 3609 if (tot_len < tx_pkts->tso_segsz) 3610 return true; 3611 3612 m_first = m_first->next; 3613 m_last = m_last->next; 3614 } 3615 3616 return false; 3617 } 3618 3619 static bool 3620 hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, 3621 uint32_t *l4_proto) 3622 { 3623 struct rte_ipv4_hdr *ipv4_hdr; 3624 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 3625 m->outer_l2_len); 3626 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) 3627 ipv4_hdr->hdr_checksum = 0; 3628 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { 3629 struct rte_udp_hdr *udp_hdr; 3630 /* 3631 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo 3632 * header for TSO packets 3633 */ 3634 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) 3635 return true; 3636 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, 3637 m->outer_l2_len + m->outer_l3_len); 3638 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); 3639 3640 return true; 3641 } 3642 *l4_proto = ipv4_hdr->next_proto_id; 3643 return false; 3644 } 3645 3646 static bool 3647 hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, 3648 uint32_t *l4_proto) 3649 { 3650 struct rte_ipv6_hdr *ipv6_hdr; 3651 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, 3652 m->outer_l2_len); 3653 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { 3654 struct rte_udp_hdr *udp_hdr; 3655 /* 3656 * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo 3657 * header for TSO packets 3658 */ 3659 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) 3660 return true; 3661 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, 3662 m->outer_l2_len + m->outer_l3_len); 3663 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); 3664 3665 return true; 3666 } 3667 *l4_proto = ipv6_hdr->proto; 3668 return false; 3669 } 3670 3671 static void 3672 hns3_outer_header_cksum_prepare(struct rte_mbuf *m) 3673 { 3674 uint64_t ol_flags = m->ol_flags; 3675 uint32_t paylen, hdr_len, l4_proto; 3676 struct rte_udp_hdr *udp_hdr; 3677 3678 if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) 3679 return; 3680 3681 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { 3682 if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) 3683 return; 3684 } else { 3685 if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto)) 3686 return; 3687 } 3688 3689 /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ 3690 if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 3691 hdr_len = m->l2_len + m->l3_len + m->l4_len; 3692 hdr_len += m->outer_l2_len + m->outer_l3_len; 3693 paylen = m->pkt_len - hdr_len; 3694 if (paylen <= m->tso_segsz) 3695 return; 3696 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, 3697 m->outer_l2_len + 3698 m->outer_l3_len); 3699 udp_hdr->dgram_cksum = 0; 3700 } 3701 } 3702 3703 static int 3704 hns3_check_tso_pkt_valid(struct rte_mbuf *m) 3705 { 3706 uint32_t tmp_data_len_sum = 0; 3707 uint16_t nb_buf = m->nb_segs; 3708 uint32_t paylen, hdr_len; 3709 struct rte_mbuf *m_seg; 3710 int i; 3711 3712 if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT) 3713 return -EINVAL; 3714 3715 hdr_len = m->l2_len + m->l3_len + m->l4_len; 3716 hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 3717 m->outer_l2_len + m->outer_l3_len : 0; 3718 if (hdr_len > HNS3_MAX_TSO_HDR_SIZE) 3719 return -EINVAL; 3720 3721 paylen = m->pkt_len - hdr_len; 3722 if (paylen > HNS3_MAX_BD_PAYLEN) 3723 return -EINVAL; 3724 3725 /* 3726 * The TSO header (include outer and inner L2, L3 and L4 header) 3727 * should be provided by three descriptors in maximum in hns3 network 3728 * engine. 3729 */ 3730 m_seg = m; 3731 for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf; 3732 i++, m_seg = m_seg->next) { 3733 tmp_data_len_sum += m_seg->data_len; 3734 } 3735 3736 if (hdr_len > tmp_data_len_sum) 3737 return -EINVAL; 3738 3739 return 0; 3740 } 3741 3742 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3743 static inline int 3744 hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) 3745 { 3746 struct rte_ether_hdr *eh; 3747 struct rte_vlan_hdr *vh; 3748 3749 if (!txq->pvid_sw_shift_en) 3750 return 0; 3751 3752 /* 3753 * Due to hardware limitations, we only support two-layer VLAN hardware 3754 * offload in Tx direction based on hns3 network engine, so when PVID is 3755 * enabled, QinQ insert is no longer supported. 3756 * And when PVID is enabled, in the following two cases: 3757 * i) packets with more than two VLAN tags. 3758 * ii) packets with one VLAN tag while the hardware VLAN insert is 3759 * enabled. 3760 * The packets will be regarded as abnormal packets and discarded by 3761 * hardware in Tx direction. For debugging purposes, a validation check 3762 * for these types of packets is added to the '.tx_pkt_prepare' ops 3763 * implementation function named hns3_prep_pkts to inform users that 3764 * these packets will be discarded. 3765 */ 3766 if (m->ol_flags & RTE_MBUF_F_TX_QINQ) 3767 return -EINVAL; 3768 3769 eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 3770 if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { 3771 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) 3772 return -EINVAL; 3773 3774 /* Ensure the incoming packet is not a QinQ packet */ 3775 vh = (struct rte_vlan_hdr *)(eh + 1); 3776 if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) 3777 return -EINVAL; 3778 } 3779 3780 return 0; 3781 } 3782 #endif 3783 3784 static uint16_t 3785 hns3_udp_cksum_help(struct rte_mbuf *m) 3786 { 3787 uint64_t ol_flags = m->ol_flags; 3788 uint16_t cksum = 0; 3789 uint32_t l4_len; 3790 3791 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 3792 struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m, 3793 struct rte_ipv4_hdr *, m->l2_len); 3794 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len; 3795 } else { 3796 struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m, 3797 struct rte_ipv6_hdr *, m->l2_len); 3798 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len); 3799 } 3800 3801 rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum); 3802 3803 cksum = ~cksum; 3804 /* 3805 * RFC 768:If the computed checksum is zero for UDP, it is transmitted 3806 * as all ones 3807 */ 3808 if (cksum == 0) 3809 cksum = 0xffff; 3810 3811 return (uint16_t)cksum; 3812 } 3813 3814 static bool 3815 hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) 3816 { 3817 uint64_t ol_flags = m->ol_flags; 3818 struct rte_udp_hdr *udp_hdr; 3819 uint16_t dst_port; 3820 3821 if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE || 3822 ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK || 3823 (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM) 3824 return true; 3825 /* 3826 * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will 3827 * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM 3828 * offload is set and the tunnel mask has not been set, the CKSUM will 3829 * be wrong since the header length is wrong and driver should complete 3830 * the CKSUM to avoid CKSUM error. 3831 */ 3832 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, 3833 m->l2_len + m->l3_len); 3834 dst_port = rte_be_to_cpu_16(udp_hdr->dst_port); 3835 switch (dst_port) { 3836 case RTE_VXLAN_DEFAULT_PORT: 3837 case RTE_VXLAN_GPE_DEFAULT_PORT: 3838 case RTE_GENEVE_DEFAULT_PORT: 3839 udp_hdr->dgram_cksum = hns3_udp_cksum_help(m); 3840 m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK; 3841 return false; 3842 default: 3843 return true; 3844 } 3845 } 3846 3847 static int 3848 hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) 3849 { 3850 int ret; 3851 3852 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3853 ret = rte_validate_tx_offload(m); 3854 if (ret != 0) { 3855 rte_errno = -ret; 3856 return ret; 3857 } 3858 3859 ret = hns3_vld_vlan_chk(tx_queue, m); 3860 if (ret != 0) { 3861 rte_errno = EINVAL; 3862 return ret; 3863 } 3864 #endif 3865 if (hns3_pkt_is_tso(m)) { 3866 if (hns3_pkt_need_linearized(m, m->nb_segs, 3867 tx_queue->max_non_tso_bd_num) || 3868 hns3_check_tso_pkt_valid(m)) { 3869 rte_errno = EINVAL; 3870 return -EINVAL; 3871 } 3872 3873 if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) { 3874 /* 3875 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means 3876 * hardware support recalculate the TCP pseudo header 3877 * checksum of packets that need TSO, so network driver 3878 * software not need to recalculate it. 3879 */ 3880 hns3_outer_header_cksum_prepare(m); 3881 return 0; 3882 } 3883 } 3884 3885 ret = rte_net_intel_cksum_prepare(m); 3886 if (ret != 0) { 3887 rte_errno = -ret; 3888 return ret; 3889 } 3890 3891 if (!hns3_validate_tunnel_cksum(tx_queue, m)) 3892 return 0; 3893 3894 hns3_outer_header_cksum_prepare(m); 3895 3896 return 0; 3897 } 3898 3899 uint16_t 3900 hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, 3901 uint16_t nb_pkts) 3902 { 3903 struct rte_mbuf *m; 3904 uint16_t i; 3905 3906 for (i = 0; i < nb_pkts; i++) { 3907 m = tx_pkts[i]; 3908 if (hns3_prep_pkt_proc(tx_queue, m)) 3909 return i; 3910 } 3911 3912 return i; 3913 } 3914 3915 static inline int 3916 hns3_handle_simple_bd(struct hns3_tx_queue *txq, struct hns3_desc *desc, 3917 struct rte_mbuf *m) 3918 { 3919 #define HNS3_TCP_CSUM_OFFSET 16 3920 #define HNS3_UDP_CSUM_OFFSET 6 3921 3922 /* 3923 * In HIP09, NIC HW support Tx simple BD mode that the HW will 3924 * calculate the checksum from the start position of checksum and fill 3925 * the checksum result to the offset position without packet type and 3926 * header length of L3/L4. 3927 * For non-tunneling packet: 3928 * - Tx simple BD support for TCP and UDP checksum. 3929 * For tunneling packet: 3930 * - Tx simple BD support for inner L4 checksum(except sctp checksum). 3931 * - Tx simple BD not support the outer checksum and the inner L3 3932 * checksum. 3933 * - Besides, Tx simple BD is not support for TSO. 3934 */ 3935 if (txq->simple_bd_enable && !(m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 3936 !(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 3937 !(m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) && 3938 ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM || 3939 (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)) { 3940 /* set checksum start and offset, defined in 2 Bytes */ 3941 hns3_set_field(desc->tx.type_cs_vlan_tso_len, 3942 HNS3_TXD_L4_START_M, HNS3_TXD_L4_START_S, 3943 (m->l2_len + m->l3_len) >> HNS3_SIMPLE_BD_UNIT); 3944 hns3_set_field(desc->tx.ol_type_vlan_len_msec, 3945 HNS3_TXD_L4_CKS_OFFSET_M, HNS3_TXD_L4_CKS_OFFSET_S, 3946 (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 3947 RTE_MBUF_F_TX_TCP_CKSUM ? 3948 HNS3_TCP_CSUM_OFFSET >> HNS3_SIMPLE_BD_UNIT : 3949 HNS3_UDP_CSUM_OFFSET >> HNS3_SIMPLE_BD_UNIT); 3950 3951 hns3_set_bit(desc->tx.ckst_mss, HNS3_TXD_CKST_B, 1); 3952 3953 return 0; 3954 } 3955 3956 return -ENOTSUP; 3957 } 3958 3959 static int 3960 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, 3961 struct rte_mbuf *m) 3962 { 3963 struct hns3_desc *tx_ring = txq->tx_ring; 3964 struct hns3_desc *desc = &tx_ring[tx_desc_id]; 3965 3966 /* Enable checksum offloading */ 3967 if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { 3968 if (hns3_handle_simple_bd(txq, desc, m) == 0) 3969 return 0; 3970 /* Fill in tunneling parameters if necessary */ 3971 if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { 3972 txq->dfx_stats.unsupported_tunnel_pkt_cnt++; 3973 return -EINVAL; 3974 } 3975 3976 hns3_txd_enable_checksum(txq, m, tx_desc_id); 3977 } else { 3978 /* clear the control bit */ 3979 desc->tx.type_cs_vlan_tso_len = 0; 3980 desc->tx.ol_type_vlan_len_msec = 0; 3981 } 3982 3983 return 0; 3984 } 3985 3986 static int 3987 hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, 3988 struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq) 3989 { 3990 uint8_t max_non_tso_bd_num; 3991 struct rte_mbuf *new_pkt; 3992 int ret; 3993 3994 if (hns3_pkt_is_tso(*m_seg)) 3995 return 0; 3996 3997 /* 3998 * If packet length is greater than HNS3_MAX_FRAME_LEN 3999 * driver support, the packet will be ignored. 4000 */ 4001 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) { 4002 txq->dfx_stats.over_length_pkt_cnt++; 4003 return -EINVAL; 4004 } 4005 4006 max_non_tso_bd_num = txq->max_non_tso_bd_num; 4007 if (unlikely(nb_buf > max_non_tso_bd_num)) { 4008 txq->dfx_stats.exceed_limit_bd_pkt_cnt++; 4009 ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, 4010 max_non_tso_bd_num); 4011 if (ret) { 4012 txq->dfx_stats.exceed_limit_bd_reassem_fail++; 4013 return ret; 4014 } 4015 *m_seg = new_pkt; 4016 } 4017 4018 return 0; 4019 } 4020 4021 static inline void 4022 hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) 4023 { 4024 struct hns3_entry *tx_entry; 4025 struct hns3_desc *desc; 4026 uint16_t tx_next_clean; 4027 uint16_t i; 4028 4029 while (1) { 4030 if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh) 4031 break; 4032 4033 /* 4034 * All mbufs can be released only when the VLD bits of all 4035 * descriptors in a batch are cleared. 4036 */ 4037 tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) % 4038 txq->nb_tx_desc; 4039 desc = &txq->tx_ring[tx_next_clean]; 4040 for (i = 0; i < txq->tx_rs_thresh; i++) { 4041 if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) & 4042 BIT(HNS3_TXD_VLD_B)) 4043 return; 4044 desc--; 4045 } 4046 4047 tx_entry = &txq->sw_ring[txq->next_to_clean]; 4048 4049 if (txq->mbuf_fast_free_en) { 4050 rte_mempool_put_bulk(tx_entry->mbuf->pool, 4051 (void **)tx_entry, txq->tx_rs_thresh); 4052 for (i = 0; i < txq->tx_rs_thresh; i++) 4053 tx_entry[i].mbuf = NULL; 4054 goto update_field; 4055 } 4056 4057 for (i = 0; i < txq->tx_rs_thresh; i++) 4058 rte_prefetch0((tx_entry + i)->mbuf); 4059 for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) { 4060 rte_mempool_put(tx_entry->mbuf->pool, tx_entry->mbuf); 4061 tx_entry->mbuf = NULL; 4062 } 4063 4064 update_field: 4065 txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc; 4066 txq->tx_bd_ready += txq->tx_rs_thresh; 4067 } 4068 } 4069 4070 static inline void 4071 hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts) 4072 { 4073 tx_entry->mbuf = pkts[0]; 4074 } 4075 4076 static inline void 4077 hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts) 4078 { 4079 hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]); 4080 hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]); 4081 hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]); 4082 hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]); 4083 } 4084 4085 static inline void 4086 hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) 4087 { 4088 #define PER_LOOP_NUM 4 4089 uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); 4090 uint64_t dma_addr; 4091 uint32_t i; 4092 4093 for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) { 4094 dma_addr = rte_mbuf_data_iova(*pkts); 4095 txdp->addr = rte_cpu_to_le_64(dma_addr); 4096 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len); 4097 txdp->tx.paylen_fd_dop_ol4cs = 0; 4098 txdp->tx.type_cs_vlan_tso_len = 0; 4099 txdp->tx.ol_type_vlan_len_msec = 0; 4100 if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) 4101 bd_flag |= BIT(HNS3_TXD_TSYN_B); 4102 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); 4103 } 4104 } 4105 4106 static inline void 4107 hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) 4108 { 4109 uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); 4110 uint64_t dma_addr; 4111 4112 dma_addr = rte_mbuf_data_iova(*pkts); 4113 txdp->addr = rte_cpu_to_le_64(dma_addr); 4114 txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len); 4115 txdp->tx.paylen_fd_dop_ol4cs = 0; 4116 txdp->tx.type_cs_vlan_tso_len = 0; 4117 txdp->tx.ol_type_vlan_len_msec = 0; 4118 if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) 4119 bd_flag |= BIT(HNS3_TXD_TSYN_B); 4120 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); 4121 } 4122 4123 static inline void 4124 hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq, 4125 struct rte_mbuf **pkts, 4126 uint16_t nb_pkts) 4127 { 4128 #define PER_LOOP_NUM 4 4129 #define PER_LOOP_MASK (PER_LOOP_NUM - 1) 4130 struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use]; 4131 struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use]; 4132 const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK)); 4133 const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK)); 4134 uint32_t i; 4135 4136 for (i = 0; i < mainpart; i += PER_LOOP_NUM) { 4137 hns3_tx_backup_4mbuf(tx_entry + i, pkts + i); 4138 hns3_tx_setup_4bd(txdp + i, pkts + i); 4139 4140 /* Increment bytes counter */ 4141 uint32_t j; 4142 for (j = 0; j < PER_LOOP_NUM; j++) 4143 txq->basic_stats.bytes += pkts[i + j]->pkt_len; 4144 } 4145 if (unlikely(leftover > 0)) { 4146 for (i = 0; i < leftover; i++) { 4147 hns3_tx_backup_1mbuf(tx_entry + mainpart + i, 4148 pkts + mainpart + i); 4149 hns3_tx_setup_1bd(txdp + mainpart + i, 4150 pkts + mainpart + i); 4151 4152 /* Increment bytes counter */ 4153 txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len; 4154 } 4155 } 4156 } 4157 4158 uint16_t 4159 hns3_xmit_pkts_simple(void *tx_queue, 4160 struct rte_mbuf **tx_pkts, 4161 uint16_t nb_pkts) 4162 { 4163 struct hns3_tx_queue *txq = tx_queue; 4164 uint16_t nb_tx = 0; 4165 4166 hns3_tx_free_buffer_simple(txq); 4167 4168 nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); 4169 if (unlikely(nb_pkts == 0)) { 4170 if (txq->tx_bd_ready == 0) 4171 txq->dfx_stats.queue_full_cnt++; 4172 return 0; 4173 } 4174 4175 txq->tx_bd_ready -= nb_pkts; 4176 if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) { 4177 nb_tx = txq->nb_tx_desc - txq->next_to_use; 4178 hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx); 4179 txq->next_to_use = 0; 4180 } 4181 4182 if (nb_pkts > nb_tx) { 4183 hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); 4184 txq->next_to_use += nb_pkts - nb_tx; 4185 } 4186 4187 hns3_write_txq_tail_reg(txq, nb_pkts); 4188 4189 return nb_pkts; 4190 } 4191 4192 uint16_t 4193 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 4194 { 4195 struct hns3_tx_queue *txq = tx_queue; 4196 struct hns3_entry *tx_bak_pkt; 4197 struct hns3_desc *tx_ring; 4198 struct rte_mbuf *tx_pkt; 4199 struct rte_mbuf *m_seg; 4200 struct hns3_desc *desc; 4201 uint32_t nb_hold = 0; 4202 uint16_t tx_next_use; 4203 uint16_t tx_pkt_num; 4204 uint16_t tx_bd_max; 4205 uint16_t nb_buf; 4206 uint16_t nb_tx; 4207 uint16_t i; 4208 4209 hns3_tx_free_useless_buffer(txq); 4210 4211 tx_next_use = txq->next_to_use; 4212 tx_bd_max = txq->nb_tx_desc; 4213 tx_pkt_num = nb_pkts; 4214 tx_ring = txq->tx_ring; 4215 4216 /* send packets */ 4217 tx_bak_pkt = &txq->sw_ring[tx_next_use]; 4218 for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) { 4219 tx_pkt = *tx_pkts++; 4220 4221 nb_buf = tx_pkt->nb_segs; 4222 4223 if (nb_buf > txq->tx_bd_ready) { 4224 txq->dfx_stats.queue_full_cnt++; 4225 if (nb_tx == 0) 4226 return 0; 4227 goto end_of_tx; 4228 } 4229 4230 /* 4231 * If packet length is less than minimum packet length supported 4232 * by hardware in Tx direction, driver need to pad it to avoid 4233 * error. 4234 */ 4235 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < 4236 txq->min_tx_pkt_len)) { 4237 uint16_t add_len; 4238 char *appended; 4239 4240 add_len = txq->min_tx_pkt_len - 4241 rte_pktmbuf_pkt_len(tx_pkt); 4242 appended = rte_pktmbuf_append(tx_pkt, add_len); 4243 if (appended == NULL) { 4244 txq->dfx_stats.pkt_padding_fail_cnt++; 4245 break; 4246 } 4247 4248 memset(appended, 0, add_len); 4249 } 4250 4251 m_seg = tx_pkt; 4252 4253 if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq)) 4254 goto end_of_tx; 4255 4256 if (hns3_parse_cksum(txq, tx_next_use, m_seg)) 4257 goto end_of_tx; 4258 4259 i = 0; 4260 desc = &tx_ring[tx_next_use]; 4261 4262 /* 4263 * If the packet is divided into multiple Tx Buffer Descriptors, 4264 * only need to fill vlan, paylen and tso into the first Tx 4265 * Buffer Descriptor. 4266 */ 4267 hns3_fill_first_desc(txq, desc, m_seg); 4268 4269 do { 4270 desc = &tx_ring[tx_next_use]; 4271 /* 4272 * Fill valid bits, DMA address and data length for each 4273 * Tx Buffer Descriptor. 4274 */ 4275 hns3_fill_per_desc(desc, m_seg); 4276 tx_bak_pkt->mbuf = m_seg; 4277 m_seg = m_seg->next; 4278 tx_next_use++; 4279 tx_bak_pkt++; 4280 if (tx_next_use >= tx_bd_max) { 4281 tx_next_use = 0; 4282 tx_bak_pkt = txq->sw_ring; 4283 } 4284 if (m_seg != NULL) 4285 TX_BD_LOG(&txq->hns->hw, DEBUG, desc); 4286 4287 i++; 4288 } while (m_seg != NULL); 4289 4290 /* Add end flag for the last Tx Buffer Descriptor */ 4291 desc->tx.tp_fe_sc_vld_ra_ri |= 4292 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B)); 4293 TX_BD_LOG(&txq->hns->hw, DEBUG, desc); 4294 4295 /* Increment bytes counter */ 4296 txq->basic_stats.bytes += tx_pkt->pkt_len; 4297 nb_hold += i; 4298 txq->next_to_use = tx_next_use; 4299 txq->tx_bd_ready -= i; 4300 } 4301 4302 end_of_tx: 4303 4304 if (likely(nb_tx)) 4305 hns3_write_txq_tail_reg(txq, nb_hold); 4306 4307 return nb_tx; 4308 } 4309 4310 int __rte_weak 4311 hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev) 4312 { 4313 return -ENOTSUP; 4314 } 4315 4316 uint16_t __rte_weak 4317 hns3_xmit_pkts_vec(__rte_unused void *tx_queue, 4318 __rte_unused struct rte_mbuf **tx_pkts, 4319 __rte_unused uint16_t nb_pkts) 4320 { 4321 return 0; 4322 } 4323 4324 uint16_t __rte_weak 4325 hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue, 4326 struct rte_mbuf __rte_unused **tx_pkts, 4327 uint16_t __rte_unused nb_pkts) 4328 { 4329 return 0; 4330 } 4331 4332 int 4333 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 4334 struct rte_eth_burst_mode *mode) 4335 { 4336 static const struct { 4337 eth_tx_burst_t pkt_burst; 4338 const char *info; 4339 } burst_infos[] = { 4340 { hns3_xmit_pkts_simple, "Scalar Simple" }, 4341 { hns3_xmit_pkts, "Scalar" }, 4342 { hns3_xmit_pkts_vec, "Vector Neon" }, 4343 { hns3_xmit_pkts_vec_sve, "Vector Sve" }, 4344 { rte_eth_pkt_burst_dummy, "Dummy" }, 4345 }; 4346 4347 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 4348 int ret = -EINVAL; 4349 unsigned int i; 4350 4351 for (i = 0; i < RTE_DIM(burst_infos); i++) { 4352 if (pkt_burst == burst_infos[i].pkt_burst) { 4353 snprintf(mode->info, sizeof(mode->info), "%s", 4354 burst_infos[i].info); 4355 ret = 0; 4356 break; 4357 } 4358 } 4359 4360 return ret; 4361 } 4362 4363 static bool 4364 hns3_tx_check_simple_support(struct rte_eth_dev *dev) 4365 { 4366 uint64_t offloads = dev->data->dev_conf.txmode.offloads; 4367 4368 return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)); 4369 } 4370 4371 static bool 4372 hns3_get_tx_prep_needed(struct rte_eth_dev *dev) 4373 { 4374 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ 4375 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ 4376 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ 4377 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ 4378 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ 4379 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 4380 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \ 4381 RTE_ETH_TX_OFFLOAD_TCP_TSO | \ 4382 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ 4383 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ 4384 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) 4385 4386 uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; 4387 4388 if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK) 4389 return true; 4390 4391 return false; 4392 } 4393 4394 static eth_tx_prep_t 4395 hns3_get_tx_prepare(struct rte_eth_dev *dev) 4396 { 4397 return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL; 4398 } 4399 4400 static eth_tx_burst_t 4401 hns3_get_tx_function(struct rte_eth_dev *dev) 4402 { 4403 struct hns3_adapter *hns = dev->data->dev_private; 4404 bool vec_allowed, sve_allowed, simple_allowed; 4405 bool vec_support; 4406 4407 vec_support = hns3_tx_check_vec_support(dev) == 0; 4408 vec_allowed = vec_support && hns3_get_default_vec_support(); 4409 sve_allowed = vec_support && hns3_get_sve_support(); 4410 simple_allowed = hns3_tx_check_simple_support(dev); 4411 4412 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) 4413 return hns3_xmit_pkts_vec; 4414 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed) 4415 return hns3_xmit_pkts_vec_sve; 4416 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) 4417 return hns3_xmit_pkts_simple; 4418 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) 4419 return hns3_xmit_pkts; 4420 4421 if (vec_allowed) 4422 return hns3_xmit_pkts_vec; 4423 if (simple_allowed) 4424 return hns3_xmit_pkts_simple; 4425 4426 return hns3_xmit_pkts; 4427 } 4428 4429 static void 4430 hns3_trace_rxtx_function(struct rte_eth_dev *dev) 4431 { 4432 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4433 struct rte_eth_burst_mode rx_mode; 4434 struct rte_eth_burst_mode tx_mode; 4435 4436 memset(&rx_mode, 0, sizeof(rx_mode)); 4437 memset(&tx_mode, 0, sizeof(tx_mode)); 4438 (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode); 4439 (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode); 4440 4441 hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.", 4442 rx_mode.info, tx_mode.info); 4443 } 4444 4445 static void 4446 hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev) 4447 { 4448 struct rte_eth_fp_ops *fpo = rte_eth_fp_ops; 4449 uint16_t port_id = dev->data->port_id; 4450 4451 fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst; 4452 fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst; 4453 fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare; 4454 fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status; 4455 fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status; 4456 fpo[port_id].rxq.data = dev->data->rx_queues; 4457 fpo[port_id].txq.data = dev->data->tx_queues; 4458 } 4459 4460 void 4461 hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) 4462 { 4463 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 4464 struct hns3_adapter *hns = eth_dev->data->dev_private; 4465 4466 if (hns->hw.adapter_state == HNS3_NIC_STARTED && 4467 __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { 4468 eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); 4469 eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; 4470 eth_dev->tx_pkt_burst = hw->set_link_down ? 4471 rte_eth_pkt_burst_dummy : 4472 hns3_get_tx_function(eth_dev); 4473 eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev); 4474 eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; 4475 } else { 4476 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 4477 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 4478 eth_dev->tx_pkt_prepare = NULL; 4479 } 4480 4481 hns3_trace_rxtx_function(eth_dev); 4482 hns3_eth_dev_fp_ops_config(eth_dev); 4483 } 4484 4485 void 4486 hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 4487 struct rte_eth_rxq_info *qinfo) 4488 { 4489 struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id]; 4490 4491 qinfo->mp = rxq->mb_pool; 4492 qinfo->nb_desc = rxq->nb_rx_desc; 4493 qinfo->scattered_rx = dev->data->scattered_rx; 4494 /* Report the HW Rx buffer length to user */ 4495 qinfo->rx_buf_size = rxq->rx_buf_len; 4496 4497 /* 4498 * If there are no available Rx buffer descriptors, incoming packets 4499 * are always dropped by hardware based on hns3 network engine. 4500 */ 4501 qinfo->conf.rx_drop_en = 1; 4502 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 4503 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 4504 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 4505 } 4506 4507 void 4508 hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 4509 struct rte_eth_txq_info *qinfo) 4510 { 4511 struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id]; 4512 4513 qinfo->nb_desc = txq->nb_tx_desc; 4514 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 4515 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; 4516 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 4517 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 4518 } 4519 4520 int 4521 hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 4522 { 4523 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4524 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; 4525 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); 4526 int ret; 4527 4528 if (!hns3_dev_get_support(hw, INDEP_TXRX)) 4529 return -ENOTSUP; 4530 4531 rte_spinlock_lock(&hw->lock); 4532 4533 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 4534 hns3_err(hw, "fail to start Rx queue during resetting."); 4535 rte_spinlock_unlock(&hw->lock); 4536 return -EIO; 4537 } 4538 4539 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); 4540 if (ret) { 4541 hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", 4542 rx_queue_id, ret); 4543 rte_spinlock_unlock(&hw->lock); 4544 return ret; 4545 } 4546 4547 if (rxq->sw_ring[0].mbuf != NULL) 4548 hns3_rx_queue_release_mbufs(rxq); 4549 4550 ret = hns3_init_rxq(hns, rx_queue_id); 4551 if (ret) { 4552 hns3_err(hw, "fail to init Rx queue %u, ret = %d.", 4553 rx_queue_id, ret); 4554 rte_spinlock_unlock(&hw->lock); 4555 return ret; 4556 } 4557 4558 hns3_enable_rxq(rxq, true); 4559 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 4560 rte_spinlock_unlock(&hw->lock); 4561 4562 return ret; 4563 } 4564 4565 static void 4566 hns3_reset_sw_rxq(struct hns3_rx_queue *rxq) 4567 { 4568 rxq->next_to_use = 0; 4569 rxq->rx_rearm_start = 0; 4570 rxq->rx_free_hold = 0; 4571 rxq->rx_rearm_nb = 0; 4572 rxq->pkt_first_seg = NULL; 4573 rxq->pkt_last_seg = NULL; 4574 memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc)); 4575 hns3_rxq_vec_setup(rxq); 4576 } 4577 4578 int 4579 hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 4580 { 4581 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4582 struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; 4583 4584 if (!hns3_dev_get_support(hw, INDEP_TXRX)) 4585 return -ENOTSUP; 4586 4587 rte_spinlock_lock(&hw->lock); 4588 4589 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 4590 hns3_err(hw, "fail to stop Rx queue during resetting."); 4591 rte_spinlock_unlock(&hw->lock); 4592 return -EIO; 4593 } 4594 4595 hns3_enable_rxq(rxq, false); 4596 4597 hns3_rx_queue_release_mbufs(rxq); 4598 4599 hns3_reset_sw_rxq(rxq); 4600 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 4601 rte_spinlock_unlock(&hw->lock); 4602 4603 return 0; 4604 } 4605 4606 int 4607 hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 4608 { 4609 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4610 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; 4611 int ret; 4612 4613 if (!hns3_dev_get_support(hw, INDEP_TXRX)) 4614 return -ENOTSUP; 4615 4616 rte_spinlock_lock(&hw->lock); 4617 4618 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 4619 hns3_err(hw, "fail to start Tx queue during resetting."); 4620 rte_spinlock_unlock(&hw->lock); 4621 return -EIO; 4622 } 4623 4624 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); 4625 if (ret) { 4626 hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", 4627 tx_queue_id, ret); 4628 rte_spinlock_unlock(&hw->lock); 4629 return ret; 4630 } 4631 4632 hns3_init_txq(txq); 4633 hns3_enable_txq(txq, true); 4634 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 4635 rte_spinlock_unlock(&hw->lock); 4636 4637 return ret; 4638 } 4639 4640 int 4641 hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 4642 { 4643 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4644 struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; 4645 4646 if (!hns3_dev_get_support(hw, INDEP_TXRX)) 4647 return -ENOTSUP; 4648 4649 rte_spinlock_lock(&hw->lock); 4650 4651 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { 4652 hns3_err(hw, "fail to stop Tx queue during resetting."); 4653 rte_spinlock_unlock(&hw->lock); 4654 return -EIO; 4655 } 4656 4657 hns3_enable_txq(txq, false); 4658 hns3_tx_queue_release_mbufs(txq); 4659 /* 4660 * All the mbufs in sw_ring are released and all the pointers in sw_ring 4661 * are set to NULL. If this queue is still called by upper layer, 4662 * residual SW status of this txq may cause these pointers in sw_ring 4663 * which have been set to NULL to be released again. To avoid it, 4664 * reinit the txq. 4665 */ 4666 hns3_init_txq(txq); 4667 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 4668 rte_spinlock_unlock(&hw->lock); 4669 4670 return 0; 4671 } 4672 4673 static int 4674 hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) 4675 { 4676 uint16_t next_to_clean = txq->next_to_clean; 4677 uint16_t next_to_use = txq->next_to_use; 4678 uint16_t tx_bd_ready = txq->tx_bd_ready; 4679 struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean]; 4680 struct hns3_desc *desc = &txq->tx_ring[next_to_clean]; 4681 uint32_t idx; 4682 4683 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) 4684 free_cnt = txq->nb_tx_desc; 4685 4686 for (idx = 0; idx < free_cnt; idx++) { 4687 if (next_to_clean == next_to_use) 4688 break; 4689 if (desc->tx.tp_fe_sc_vld_ra_ri & 4690 rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) 4691 break; 4692 if (tx_pkt->mbuf != NULL) { 4693 rte_pktmbuf_free_seg(tx_pkt->mbuf); 4694 tx_pkt->mbuf = NULL; 4695 } 4696 next_to_clean++; 4697 tx_bd_ready++; 4698 tx_pkt++; 4699 desc++; 4700 if (next_to_clean == txq->nb_tx_desc) { 4701 tx_pkt = txq->sw_ring; 4702 desc = txq->tx_ring; 4703 next_to_clean = 0; 4704 } 4705 } 4706 4707 if (idx > 0) { 4708 txq->next_to_clean = next_to_clean; 4709 txq->tx_bd_ready = tx_bd_ready; 4710 } 4711 4712 return (int)idx; 4713 } 4714 4715 int 4716 hns3_tx_done_cleanup(void *txq, uint32_t free_cnt) 4717 { 4718 struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq; 4719 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; 4720 4721 if (dev->tx_pkt_burst == hns3_xmit_pkts) 4722 return hns3_tx_done_cleanup_full(q, free_cnt); 4723 else if (dev->tx_pkt_burst == rte_eth_pkt_burst_dummy) 4724 return 0; 4725 else 4726 return -ENOTSUP; 4727 } 4728 4729 int 4730 hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 4731 { 4732 volatile struct hns3_desc *rxdp; 4733 struct hns3_rx_queue *rxq; 4734 struct rte_eth_dev *dev; 4735 uint32_t bd_base_info; 4736 uint16_t desc_id; 4737 4738 rxq = (struct hns3_rx_queue *)rx_queue; 4739 if (offset >= rxq->nb_rx_desc) 4740 return -EINVAL; 4741 4742 desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc; 4743 rxdp = &rxq->rx_ring[desc_id]; 4744 bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); 4745 dev = &rte_eth_devices[rxq->port_id]; 4746 if (dev->rx_pkt_burst == hns3_recv_pkts_simple || 4747 dev->rx_pkt_burst == hns3_recv_scattered_pkts) { 4748 if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold) 4749 return RTE_ETH_RX_DESC_UNAVAIL; 4750 } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec || 4751 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) { 4752 if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb) 4753 return RTE_ETH_RX_DESC_UNAVAIL; 4754 } else { 4755 return RTE_ETH_RX_DESC_UNAVAIL; 4756 } 4757 4758 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 4759 return RTE_ETH_RX_DESC_AVAIL; 4760 else 4761 return RTE_ETH_RX_DESC_DONE; 4762 } 4763 4764 int 4765 hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 4766 { 4767 volatile struct hns3_desc *txdp; 4768 struct hns3_tx_queue *txq; 4769 struct rte_eth_dev *dev; 4770 uint16_t desc_id; 4771 4772 txq = (struct hns3_tx_queue *)tx_queue; 4773 if (offset >= txq->nb_tx_desc) 4774 return -EINVAL; 4775 4776 dev = &rte_eth_devices[txq->port_id]; 4777 if (dev->tx_pkt_burst != hns3_xmit_pkts_simple && 4778 dev->tx_pkt_burst != hns3_xmit_pkts && 4779 dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve && 4780 dev->tx_pkt_burst != hns3_xmit_pkts_vec) 4781 return RTE_ETH_TX_DESC_UNAVAIL; 4782 4783 desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc; 4784 txdp = &txq->tx_ring[desc_id]; 4785 if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) 4786 return RTE_ETH_TX_DESC_FULL; 4787 else 4788 return RTE_ETH_TX_DESC_DONE; 4789 } 4790 4791 uint32_t 4792 hns3_rx_queue_count(void *rx_queue) 4793 { 4794 /* 4795 * Number of BDs that have been processed by the driver 4796 * but have not been notified to the hardware. 4797 */ 4798 uint32_t driver_hold_bd_num; 4799 struct hns3_rx_queue *rxq; 4800 const struct rte_eth_dev *dev; 4801 uint32_t fbd_num; 4802 4803 rxq = rx_queue; 4804 dev = &rte_eth_devices[rxq->port_id]; 4805 4806 fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG); 4807 if (dev->rx_pkt_burst == hns3_recv_pkts_vec || 4808 dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) 4809 driver_hold_bd_num = rxq->rx_rearm_nb; 4810 else 4811 driver_hold_bd_num = rxq->rx_free_hold; 4812 4813 if (fbd_num <= driver_hold_bd_num) 4814 return 0; 4815 else 4816 return fbd_num - driver_hold_bd_num; 4817 } 4818 4819 void 4820 hns3_enable_rxd_adv_layout(struct hns3_hw *hw) 4821 { 4822 /* 4823 * If the hardware support rxd advanced layout, then driver enable it 4824 * default. 4825 */ 4826 if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) 4827 hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1); 4828 } 4829 4830 void 4831 hns3_stop_tx_datapath(struct rte_eth_dev *dev) 4832 { 4833 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 4834 dev->tx_pkt_prepare = NULL; 4835 hns3_eth_dev_fp_ops_config(dev); 4836 4837 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4838 return; 4839 4840 rte_wmb(); 4841 /* Disable tx datapath on secondary process. */ 4842 hns3_mp_req_stop_tx(dev); 4843 /* Prevent crashes when queues are still in use. */ 4844 rte_delay_ms(dev->data->nb_tx_queues); 4845 } 4846 4847 void 4848 hns3_start_tx_datapath(struct rte_eth_dev *dev) 4849 { 4850 dev->tx_pkt_burst = hns3_get_tx_function(dev); 4851 dev->tx_pkt_prepare = hns3_get_tx_prepare(dev); 4852 hns3_eth_dev_fp_ops_config(dev); 4853 4854 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4855 return; 4856 4857 hns3_mp_req_start_tx(dev); 4858 } 4859 4860 void 4861 hns3_stop_rxtx_datapath(struct rte_eth_dev *dev) 4862 { 4863 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4864 4865 hns3_set_rxtx_function(dev); 4866 4867 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4868 return; 4869 4870 rte_wmb(); 4871 /* Disable datapath on secondary process. */ 4872 hns3_mp_req_stop_rxtx(dev); 4873 /* Prevent crashes when queues are still in use. */ 4874 rte_delay_ms(hw->cfg_max_queues); 4875 } 4876 4877 void 4878 hns3_start_rxtx_datapath(struct rte_eth_dev *dev) 4879 { 4880 hns3_set_rxtx_function(dev); 4881 4882 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 4883 return; 4884 4885 hns3_mp_req_start_rxtx(dev); 4886 } 4887 4888 static int 4889 hns3_monitor_callback(const uint64_t value, 4890 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) 4891 { 4892 const uint64_t vld = rte_le_to_cpu_32(BIT(HNS3_RXD_VLD_B)); 4893 return (value & vld) == vld ? -1 : 0; 4894 } 4895 4896 int 4897 hns3_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 4898 { 4899 struct hns3_rx_queue *rxq = rx_queue; 4900 struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use]; 4901 4902 pmc->addr = &rxdp->rx.bd_base_info; 4903 pmc->fn = hns3_monitor_callback; 4904 pmc->size = sizeof(uint32_t); 4905 4906 return 0; 4907 } 4908