1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Marvell International Ltd. 3 * Copyright(c) 2018 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include "mvneta_rxtx.h" 8 9 #define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE) 10 11 #define MRVL_NETA_DEFAULT_TC 0 12 13 /** Maximum number of descriptors in shadow queue. Must be power of 2 */ 14 #define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX 15 16 /** Shadow queue size mask (since shadow queue size is power of 2) */ 17 #define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1) 18 19 /** Minimum number of sent buffers to release from shadow queue to BM */ 20 #define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN 16 21 22 /** Maximum number of sent buffers to release from shadow queue to BM */ 23 #define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX 64 24 25 #define MVNETA_COOKIE_ADDR_INVALID ~0ULL 26 #define MVNETA_COOKIE_HIGH_ADDR_SHIFT (sizeof(neta_cookie_t) * 8) 27 #define MVNETA_COOKIE_HIGH_ADDR_MASK (~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT) 28 29 #define MVNETA_SET_COOKIE_HIGH_ADDR(addr) { \ 30 if (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID)) \ 31 cookie_addr_high = \ 32 (uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\ 33 } 34 35 #define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr) \ 36 ((likely(cookie_addr_high == \ 37 ((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0) 38 39 struct mvneta_rxq { 40 struct mvneta_priv *priv; 41 struct rte_mempool *mp; 42 int queue_id; 43 int port_id; 44 int size; 45 int cksum_enabled; 46 uint64_t bytes_recv; 47 uint64_t drop_mac; 48 uint64_t pkts_processed; 49 }; 50 51 /* 52 * To use buffer harvesting based on loopback port shadow queue structure 53 * was introduced for buffers information bookkeeping. 54 */ 55 struct mvneta_shadow_txq { 56 int head; /* write index - used when sending buffers */ 57 int tail; /* read index - used when releasing buffers */ 58 u16 size; /* queue occupied size */ 59 struct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */ 60 }; 61 62 struct mvneta_txq { 63 struct mvneta_priv *priv; 64 int queue_id; 65 int port_id; 66 uint64_t bytes_sent; 67 struct mvneta_shadow_txq shadow_txq; 68 int tx_deferred_start; 69 }; 70 71 static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID; 72 static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN; 73 74 static inline int 75 mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num) 76 { 77 struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX]; 78 struct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX]; 79 int i, ret; 80 uint16_t nb_desc = *num; 81 82 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc); 83 if (ret) { 84 MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc); 85 *num = 0; 86 return -1; 87 } 88 89 MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]); 90 91 for (i = 0; i < nb_desc; i++) { 92 if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) { 93 MVNETA_LOG(ERR, 94 "mbuf virt high addr 0x%lx out of range 0x%lx", 95 (uint64_t)mbufs[i] >> 32, 96 cookie_addr_high >> 32); 97 *num = 0; 98 goto out; 99 } 100 entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]); 101 entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i]; 102 } 103 neta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num); 104 105 out: 106 for (i = *num; i < nb_desc; i++) 107 rte_pktmbuf_free(mbufs[i]); 108 109 return 0; 110 } 111 112 /** 113 * Allocate buffers from mempool 114 * and store addresses in rx descriptors. 115 * 116 * @return 117 * 0 on success, negative error value otherwise. 118 */ 119 static inline int 120 mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num) 121 { 122 uint16_t nb_desc, nb_desc_burst, sent = 0; 123 int ret = 0; 124 125 nb_desc = *num; 126 127 do { 128 nb_desc_burst = 129 (nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ? 130 nb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX; 131 132 ret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst); 133 if (unlikely(ret || !nb_desc_burst)) 134 break; 135 136 sent += nb_desc_burst; 137 nb_desc -= nb_desc_burst; 138 139 } while (nb_desc); 140 141 *num = sent; 142 143 return ret; 144 } 145 146 static inline void 147 mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf) 148 { 149 sq->ent[sq->head].cookie = (uint64_t)buf; 150 sq->ent[sq->head].addr = buf ? 151 rte_mbuf_data_iova_default(buf) : 0; 152 153 sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK; 154 sq->size++; 155 } 156 157 static inline void 158 mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf) 159 { 160 neta_ppio_outq_desc_reset(desc); 161 neta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf)); 162 neta_ppio_outq_desc_set_pkt_offset(desc, 0); 163 neta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf)); 164 } 165 166 /** 167 * Release already sent buffers to mempool. 168 * 169 * @param ppio 170 * Pointer to the port structure. 171 * @param sq 172 * Pointer to the shadow queue. 173 * @param qid 174 * Queue id number. 175 * @param force 176 * Force releasing packets. 177 */ 178 static inline void 179 mvneta_sent_buffers_free(struct neta_ppio *ppio, 180 struct mvneta_shadow_txq *sq, int qid) 181 { 182 struct neta_buff_inf *entry; 183 uint16_t nb_done = 0; 184 int i; 185 int tail = sq->tail; 186 187 neta_ppio_get_num_outq_done(ppio, qid, &nb_done); 188 189 if (nb_done > sq->size) { 190 MVNETA_LOG(ERR, "nb_done: %d, sq->size %d", 191 nb_done, sq->size); 192 return; 193 } 194 195 for (i = 0; i < nb_done; i++) { 196 entry = &sq->ent[tail]; 197 198 if (unlikely(!entry->addr)) { 199 MVNETA_LOG(DEBUG, 200 "Shadow memory @%d: cookie(%lx), pa(%lx)!", 201 tail, (u64)entry->cookie, 202 (u64)entry->addr); 203 tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; 204 continue; 205 } 206 207 struct rte_mbuf *mbuf; 208 209 mbuf = (struct rte_mbuf *) 210 (cookie_addr_high | entry->cookie); 211 rte_pktmbuf_free(mbuf); 212 tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; 213 } 214 215 sq->tail = tail; 216 sq->size -= nb_done; 217 } 218 219 /** 220 * Return packet type information and l3/l4 offsets. 221 * 222 * @param desc 223 * Pointer to the received packet descriptor. 224 * @param l3_offset 225 * l3 packet offset. 226 * @param l4_offset 227 * l4 packet offset. 228 * 229 * @return 230 * Packet type information. 231 */ 232 static inline uint64_t 233 mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc, 234 uint8_t *l3_offset, uint8_t *l4_offset) 235 { 236 enum neta_inq_l3_type l3_type; 237 enum neta_inq_l4_type l4_type; 238 uint64_t packet_type; 239 240 neta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 241 neta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 242 243 packet_type = RTE_PTYPE_L2_ETHER; 244 245 if (NETA_RXD_GET_VLAN_INFO(desc)) 246 packet_type |= RTE_PTYPE_L2_ETHER_VLAN; 247 248 switch (l3_type) { 249 case NETA_INQ_L3_TYPE_IPV4_BAD: 250 case NETA_INQ_L3_TYPE_IPV4_OK: 251 packet_type |= RTE_PTYPE_L3_IPV4; 252 break; 253 case NETA_INQ_L3_TYPE_IPV6: 254 packet_type |= RTE_PTYPE_L3_IPV6; 255 break; 256 default: 257 packet_type |= RTE_PTYPE_UNKNOWN; 258 MVNETA_LOG(DEBUG, "Failed to recognize l3 packet type"); 259 break; 260 } 261 262 switch (l4_type) { 263 case NETA_INQ_L4_TYPE_TCP: 264 packet_type |= RTE_PTYPE_L4_TCP; 265 break; 266 case NETA_INQ_L4_TYPE_UDP: 267 packet_type |= RTE_PTYPE_L4_UDP; 268 break; 269 default: 270 packet_type |= RTE_PTYPE_UNKNOWN; 271 MVNETA_LOG(DEBUG, "Failed to recognize l4 packet type"); 272 break; 273 } 274 275 return packet_type; 276 } 277 278 /** 279 * Prepare offload information. 280 * 281 * @param ol_flags 282 * Offload flags. 283 * @param l3_type 284 * Pointer to the neta_ouq_l3_type structure. 285 * @param l4_type 286 * Pointer to the neta_outq_l4_type structure. 287 * @param gen_l3_cksum 288 * Will be set to 1 in case l3 checksum is computed. 289 * @param l4_cksum 290 * Will be set to 1 in case l4 checksum is computed. 291 */ 292 static inline void 293 mvneta_prepare_proto_info(uint64_t ol_flags, 294 enum neta_outq_l3_type *l3_type, 295 enum neta_outq_l4_type *l4_type, 296 int *gen_l3_cksum, 297 int *gen_l4_cksum) 298 { 299 /* 300 * Based on ol_flags prepare information 301 * for neta_ppio_outq_desc_set_proto_info() which setups descriptor 302 * for offloading. 303 * in most of the checksum cases ipv4 must be set, so this is the 304 * default value 305 */ 306 *l3_type = NETA_OUTQ_L3_TYPE_IPV4; 307 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 308 309 if (ol_flags & PKT_TX_IPV6) { 310 *l3_type = NETA_OUTQ_L3_TYPE_IPV6; 311 /* no checksum for ipv6 header */ 312 *gen_l3_cksum = 0; 313 } 314 315 if (ol_flags & PKT_TX_TCP_CKSUM) { 316 *l4_type = NETA_OUTQ_L4_TYPE_TCP; 317 *gen_l4_cksum = 1; 318 } else if (ol_flags & PKT_TX_UDP_CKSUM) { 319 *l4_type = NETA_OUTQ_L4_TYPE_UDP; 320 *gen_l4_cksum = 1; 321 } else { 322 *l4_type = NETA_OUTQ_L4_TYPE_OTHER; 323 /* no checksum for other type */ 324 *gen_l4_cksum = 0; 325 } 326 } 327 328 /** 329 * Get offload information from the received packet descriptor. 330 * 331 * @param desc 332 * Pointer to the received packet descriptor. 333 * 334 * @return 335 * Mbuf offload flags. 336 */ 337 static inline uint64_t 338 mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc) 339 { 340 uint64_t flags; 341 enum neta_inq_desc_status status; 342 343 status = neta_ppio_inq_desc_get_l3_pkt_error(desc); 344 if (unlikely(status != NETA_DESC_ERR_OK)) 345 flags = PKT_RX_IP_CKSUM_BAD; 346 else 347 flags = PKT_RX_IP_CKSUM_GOOD; 348 349 status = neta_ppio_inq_desc_get_l4_pkt_error(desc); 350 if (unlikely(status != NETA_DESC_ERR_OK)) 351 flags |= PKT_RX_L4_CKSUM_BAD; 352 else 353 flags |= PKT_RX_L4_CKSUM_GOOD; 354 355 return flags; 356 } 357 358 /** 359 * DPDK callback for transmit. 360 * 361 * @param txq 362 * Generic pointer transmit queue. 363 * @param tx_pkts 364 * Packets to transmit. 365 * @param nb_pkts 366 * Number of packets in array. 367 * 368 * @return 369 * Number of packets successfully transmitted. 370 */ 371 static uint16_t 372 mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 373 { 374 struct mvneta_txq *q = txq; 375 struct mvneta_shadow_txq *sq; 376 struct neta_ppio_desc descs[nb_pkts]; 377 int i, bytes_sent = 0; 378 uint16_t num, sq_free_size; 379 uint64_t addr; 380 381 sq = &q->shadow_txq; 382 if (unlikely(!nb_pkts || !q->priv->ppio)) 383 return 0; 384 385 if (sq->size) 386 mvneta_sent_buffers_free(q->priv->ppio, 387 sq, q->queue_id); 388 389 sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1; 390 if (unlikely(nb_pkts > sq_free_size)) { 391 MVNETA_LOG(DEBUG, 392 "No room in shadow queue for %d packets! %d packets will be sent.", 393 nb_pkts, sq_free_size); 394 nb_pkts = sq_free_size; 395 } 396 397 398 for (i = 0; i < nb_pkts; i++) { 399 struct rte_mbuf *mbuf = tx_pkts[i]; 400 int gen_l3_cksum, gen_l4_cksum; 401 enum neta_outq_l3_type l3_type; 402 enum neta_outq_l4_type l4_type; 403 404 /* Fill first mbuf info in shadow queue */ 405 mvneta_fill_shadowq(sq, mbuf); 406 mvneta_fill_desc(&descs[i], mbuf); 407 408 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 409 410 if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS)) 411 continue; 412 mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 413 &gen_l3_cksum, &gen_l4_cksum); 414 415 neta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 416 mbuf->l2_len, 417 mbuf->l2_len + mbuf->l3_len, 418 gen_l3_cksum, gen_l4_cksum); 419 } 420 num = nb_pkts; 421 neta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts); 422 423 424 /* number of packets that were not sent */ 425 if (unlikely(num > nb_pkts)) { 426 for (i = nb_pkts; i < num; i++) { 427 sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) & 428 MRVL_NETA_TX_SHADOWQ_MASK; 429 addr = cookie_addr_high | sq->ent[sq->head].cookie; 430 bytes_sent -= 431 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 432 } 433 sq->size -= num - nb_pkts; 434 } 435 436 q->bytes_sent += bytes_sent; 437 438 return nb_pkts; 439 } 440 441 /** DPDK callback for S/G transmit. 442 * 443 * @param txq 444 * Generic pointer transmit queue. 445 * @param tx_pkts 446 * Packets to transmit. 447 * @param nb_pkts 448 * Number of packets in array. 449 * 450 * @return 451 * Number of packets successfully transmitted. 452 */ 453 static uint16_t 454 mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 455 { 456 struct mvneta_txq *q = txq; 457 struct mvneta_shadow_txq *sq; 458 struct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS]; 459 struct neta_ppio_sg_pkts pkts; 460 uint8_t frags[nb_pkts]; 461 int i, j, bytes_sent = 0; 462 int tail, tail_first; 463 uint16_t num, sq_free_size; 464 uint16_t nb_segs, total_descs = 0; 465 uint64_t addr; 466 467 sq = &q->shadow_txq; 468 pkts.frags = frags; 469 pkts.num = 0; 470 471 if (unlikely(!q->priv->ppio)) 472 return 0; 473 474 if (sq->size) 475 mvneta_sent_buffers_free(q->priv->ppio, 476 sq, q->queue_id); 477 /* Save shadow queue free size */ 478 sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1; 479 480 tail = 0; 481 for (i = 0; i < nb_pkts; i++) { 482 struct rte_mbuf *mbuf = tx_pkts[i]; 483 struct rte_mbuf *seg = NULL; 484 int gen_l3_cksum, gen_l4_cksum; 485 enum neta_outq_l3_type l3_type; 486 enum neta_outq_l4_type l4_type; 487 488 nb_segs = mbuf->nb_segs; 489 total_descs += nb_segs; 490 491 /* 492 * Check if total_descs does not exceed 493 * shadow queue free size 494 */ 495 if (unlikely(total_descs > sq_free_size)) { 496 total_descs -= nb_segs; 497 MVNETA_LOG(DEBUG, 498 "No room in shadow queue for %d packets! " 499 "%d packets will be sent.", 500 nb_pkts, i); 501 break; 502 } 503 504 505 /* Check if nb_segs does not exceed the max nb of desc per 506 * fragmented packet 507 */ 508 if (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) { 509 total_descs -= nb_segs; 510 MVNETA_LOG(ERR, 511 "Too many segments. Packet won't be sent."); 512 break; 513 } 514 515 pkts.frags[pkts.num] = nb_segs; 516 pkts.num++; 517 tail_first = tail; 518 519 seg = mbuf; 520 for (j = 0; j < nb_segs - 1; j++) { 521 /* For the subsequent segments, set shadow queue 522 * buffer to NULL 523 */ 524 mvneta_fill_shadowq(sq, NULL); 525 mvneta_fill_desc(&descs[tail], seg); 526 527 tail++; 528 seg = seg->next; 529 } 530 /* Put first mbuf info in last shadow queue entry */ 531 mvneta_fill_shadowq(sq, mbuf); 532 /* Update descriptor with last segment */ 533 mvneta_fill_desc(&descs[tail++], seg); 534 535 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 536 537 if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS)) 538 continue; 539 mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 540 &gen_l3_cksum, &gen_l4_cksum); 541 542 neta_ppio_outq_desc_set_proto_info(&descs[tail_first], 543 l3_type, l4_type, 544 mbuf->l2_len, 545 mbuf->l2_len + mbuf->l3_len, 546 gen_l3_cksum, gen_l4_cksum); 547 } 548 num = total_descs; 549 neta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs, 550 &pkts); 551 552 /* number of packets that were not sent */ 553 if (unlikely(num > total_descs)) { 554 for (i = total_descs; i < num; i++) { 555 sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + 556 sq->head - 1) & 557 MRVL_NETA_TX_SHADOWQ_MASK; 558 addr = sq->ent[sq->head].cookie; 559 if (addr) { 560 struct rte_mbuf *mbuf; 561 562 mbuf = (struct rte_mbuf *) 563 (cookie_addr_high | addr); 564 bytes_sent -= rte_pktmbuf_pkt_len(mbuf); 565 } 566 } 567 sq->size -= num - total_descs; 568 nb_pkts = pkts.num; 569 } 570 571 q->bytes_sent += bytes_sent; 572 573 return nb_pkts; 574 } 575 576 /** 577 * Set tx burst function according to offload flag 578 * 579 * @param dev 580 * Pointer to Ethernet device structure. 581 */ 582 void 583 mvneta_set_tx_function(struct rte_eth_dev *dev) 584 { 585 struct mvneta_priv *priv = dev->data->dev_private; 586 587 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 588 if (priv->multiseg) { 589 MVNETA_LOG(INFO, "Using multi-segment tx callback"); 590 dev->tx_pkt_burst = mvneta_tx_sg_pkt_burst; 591 } else { 592 MVNETA_LOG(INFO, "Using single-segment tx callback"); 593 dev->tx_pkt_burst = mvneta_tx_pkt_burst; 594 } 595 } 596 597 /** 598 * DPDK callback for receive. 599 * 600 * @param rxq 601 * Generic pointer to the receive queue. 602 * @param rx_pkts 603 * Array to store received packets. 604 * @param nb_pkts 605 * Maximum number of packets in array. 606 * 607 * @return 608 * Number of packets successfully received. 609 */ 610 uint16_t 611 mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 612 { 613 struct mvneta_rxq *q = rxq; 614 struct neta_ppio_desc descs[nb_pkts]; 615 int i, ret, rx_done = 0, rx_dropped = 0; 616 617 if (unlikely(!q || !q->priv->ppio)) 618 return 0; 619 620 ret = neta_ppio_recv(q->priv->ppio, q->queue_id, 621 descs, &nb_pkts); 622 623 if (unlikely(ret < 0)) { 624 MVNETA_LOG(ERR, "Failed to receive packets"); 625 return 0; 626 } 627 628 for (i = 0; i < nb_pkts; i++) { 629 struct rte_mbuf *mbuf; 630 uint8_t l3_offset, l4_offset; 631 enum neta_inq_desc_status status; 632 uint64_t addr; 633 634 addr = cookie_addr_high | 635 neta_ppio_inq_desc_get_cookie(&descs[i]); 636 mbuf = (struct rte_mbuf *)addr; 637 638 rte_pktmbuf_reset(mbuf); 639 640 /* drop packet in case of mac, overrun or resource error */ 641 status = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 642 if (unlikely(status != NETA_DESC_ERR_OK)) { 643 /* Release the mbuf to the mempool since 644 * it won't be transferred to tx path 645 */ 646 rte_pktmbuf_free(mbuf); 647 q->drop_mac++; 648 rx_dropped++; 649 continue; 650 } 651 652 mbuf->data_off += MVNETA_PKT_EFFEC_OFFS; 653 mbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]); 654 mbuf->data_len = mbuf->pkt_len; 655 mbuf->port = q->port_id; 656 mbuf->packet_type = 657 mvneta_desc_to_packet_type_and_offset(&descs[i], 658 &l3_offset, 659 &l4_offset); 660 mbuf->l2_len = l3_offset; 661 mbuf->l3_len = l4_offset - l3_offset; 662 663 if (likely(q->cksum_enabled)) 664 mbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]); 665 666 rx_pkts[rx_done++] = mbuf; 667 q->bytes_recv += mbuf->pkt_len; 668 } 669 q->pkts_processed += rx_done + rx_dropped; 670 671 if (q->pkts_processed > rx_desc_free_thresh) { 672 int buf_to_refill = rx_desc_free_thresh; 673 674 ret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill); 675 if (ret) 676 MVNETA_LOG(ERR, "Refill failed"); 677 q->pkts_processed -= buf_to_refill; 678 } 679 680 return rx_done; 681 } 682 683 /** 684 * DPDK callback to configure the receive queue. 685 * 686 * @param dev 687 * Pointer to Ethernet device structure. 688 * @param idx 689 * RX queue index. 690 * @param desc 691 * Number of descriptors to configure in queue. 692 * @param socket 693 * NUMA socket on which memory must be allocated. 694 * @param conf 695 * Thresholds parameters (unused_). 696 * @param mp 697 * Memory pool for buffer allocations. 698 * 699 * @return 700 * 0 on success, negative error value otherwise. 701 */ 702 int 703 mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 704 unsigned int socket, 705 const struct rte_eth_rxconf *conf __rte_unused, 706 struct rte_mempool *mp) 707 { 708 struct mvneta_priv *priv = dev->data->dev_private; 709 struct mvneta_rxq *rxq; 710 uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp); 711 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 712 713 frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS; 714 715 if (frame_size < max_rx_pkt_len) { 716 MVNETA_LOG(ERR, 717 "Mbuf size must be increased to %u bytes to hold up " 718 "to %u bytes of data.", 719 buf_size + max_rx_pkt_len - frame_size, 720 max_rx_pkt_len); 721 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 722 MVNETA_LOG(INFO, "Setting max rx pkt len to %u", 723 dev->data->dev_conf.rxmode.max_rx_pkt_len); 724 } 725 726 if (dev->data->rx_queues[idx]) { 727 rte_free(dev->data->rx_queues[idx]); 728 dev->data->rx_queues[idx] = NULL; 729 } 730 731 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 732 if (!rxq) 733 return -ENOMEM; 734 735 rxq->priv = priv; 736 rxq->mp = mp; 737 rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads & 738 DEV_RX_OFFLOAD_IPV4_CKSUM; 739 rxq->queue_id = idx; 740 rxq->port_id = dev->data->port_id; 741 rxq->size = desc; 742 rx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2)); 743 priv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size = 744 desc; 745 746 dev->data->rx_queues[idx] = rxq; 747 748 return 0; 749 } 750 751 /** 752 * DPDK callback to configure the transmit queue. 753 * 754 * @param dev 755 * Pointer to Ethernet device structure. 756 * @param idx 757 * Transmit queue index. 758 * @param desc 759 * Number of descriptors to configure in the queue. 760 * @param socket 761 * NUMA socket on which memory must be allocated. 762 * @param conf 763 * Tx queue configuration parameters. 764 * 765 * @return 766 * 0 on success, negative error value otherwise. 767 */ 768 int 769 mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 770 unsigned int socket, const struct rte_eth_txconf *conf) 771 { 772 struct mvneta_priv *priv = dev->data->dev_private; 773 struct mvneta_txq *txq; 774 775 if (dev->data->tx_queues[idx]) { 776 rte_free(dev->data->tx_queues[idx]); 777 dev->data->tx_queues[idx] = NULL; 778 } 779 780 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 781 if (!txq) 782 return -ENOMEM; 783 784 txq->priv = priv; 785 txq->queue_id = idx; 786 txq->port_id = dev->data->port_id; 787 txq->tx_deferred_start = conf->tx_deferred_start; 788 dev->data->tx_queues[idx] = txq; 789 790 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 791 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1; 792 793 return 0; 794 } 795 796 /** 797 * DPDK callback to release the transmit queue. 798 * 799 * @param txq 800 * Generic transmit queue pointer. 801 */ 802 void 803 mvneta_tx_queue_release(void *txq) 804 { 805 struct mvneta_txq *q = txq; 806 807 if (!q) 808 return; 809 810 rte_free(q); 811 } 812 813 /** 814 * Return mbufs to mempool. 815 * 816 * @param rxq 817 * Pointer to rx queue structure 818 * @param desc 819 * Array of rx descriptors 820 */ 821 static void 822 mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num) 823 { 824 uint64_t addr; 825 uint8_t i; 826 827 for (i = 0; i < num; i++) { 828 if (desc) { 829 addr = cookie_addr_high | 830 neta_ppio_inq_desc_get_cookie(desc); 831 if (addr) 832 rte_pktmbuf_free((struct rte_mbuf *)addr); 833 desc++; 834 } 835 } 836 } 837 838 int 839 mvneta_alloc_rx_bufs(struct rte_eth_dev *dev) 840 { 841 struct mvneta_priv *priv = dev->data->dev_private; 842 int ret = 0, i; 843 844 for (i = 0; i < dev->data->nb_rx_queues; i++) { 845 struct mvneta_rxq *rxq = dev->data->rx_queues[i]; 846 int num = rxq->size; 847 848 ret = mvneta_buffs_alloc(priv, rxq, &num); 849 if (ret || num != rxq->size) { 850 rte_free(rxq); 851 return ret; 852 } 853 } 854 855 return 0; 856 } 857 858 /** 859 * Flush single receive queue. 860 * 861 * @param rxq 862 * Pointer to rx queue structure. 863 * @param descs 864 * Array of rx descriptors 865 */ 866 static void 867 mvneta_rx_queue_flush(struct mvneta_rxq *rxq) 868 { 869 struct neta_ppio_desc *descs; 870 struct neta_buff_inf *bufs; 871 uint16_t num; 872 int ret, i; 873 874 descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0); 875 bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0); 876 877 do { 878 num = MRVL_NETA_RXD_MAX; 879 ret = neta_ppio_recv(rxq->priv->ppio, 880 rxq->queue_id, 881 descs, &num); 882 mvneta_recv_buffs_free(descs, num); 883 } while (ret == 0 && num); 884 885 rxq->pkts_processed = 0; 886 887 num = MRVL_NETA_RXD_MAX; 888 889 neta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num); 890 MVNETA_LOG(INFO, "freeing %u unused bufs.", num); 891 892 for (i = 0; i < num; i++) { 893 uint64_t addr; 894 if (bufs[i].cookie) { 895 addr = cookie_addr_high | bufs[i].cookie; 896 rte_pktmbuf_free((struct rte_mbuf *)addr); 897 } 898 } 899 900 rte_free(descs); 901 rte_free(bufs); 902 } 903 904 /** 905 * Flush single transmit queue. 906 * 907 * @param txq 908 * Pointer to tx queue structure 909 */ 910 static void 911 mvneta_tx_queue_flush(struct mvneta_txq *txq) 912 { 913 struct mvneta_shadow_txq *sq = &txq->shadow_txq; 914 915 if (sq->size) 916 mvneta_sent_buffers_free(txq->priv->ppio, sq, 917 txq->queue_id); 918 919 /* free the rest of them */ 920 while (sq->tail != sq->head) { 921 uint64_t addr = cookie_addr_high | 922 sq->ent[sq->tail].cookie; 923 rte_pktmbuf_free((struct rte_mbuf *)addr); 924 sq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; 925 } 926 memset(sq, 0, sizeof(*sq)); 927 } 928 929 void 930 mvneta_flush_queues(struct rte_eth_dev *dev) 931 { 932 int i; 933 934 MVNETA_LOG(INFO, "Flushing rx queues"); 935 for (i = 0; i < dev->data->nb_rx_queues; i++) { 936 struct mvneta_rxq *rxq = dev->data->rx_queues[i]; 937 938 mvneta_rx_queue_flush(rxq); 939 } 940 941 MVNETA_LOG(INFO, "Flushing tx queues"); 942 for (i = 0; i < dev->data->nb_tx_queues; i++) { 943 struct mvneta_txq *txq = dev->data->tx_queues[i]; 944 945 mvneta_tx_queue_flush(txq); 946 } 947 } 948 949 /** 950 * DPDK callback to release the receive queue. 951 * 952 * @param rxq 953 * Generic receive queue pointer. 954 */ 955 void 956 mvneta_rx_queue_release(void *rxq) 957 { 958 struct mvneta_rxq *q = rxq; 959 960 if (!q) 961 return; 962 963 /* If dev_stop was called already, mbufs are already 964 * returned to mempool and ppio is deinitialized. 965 * Skip this step. 966 */ 967 968 if (q->priv->ppio) 969 mvneta_rx_queue_flush(q); 970 971 rte_free(rxq); 972 } 973 974 /** 975 * DPDK callback to get information about specific receive queue. 976 * 977 * @param dev 978 * Pointer to Ethernet device structure. 979 * @param rx_queue_id 980 * Receive queue index. 981 * @param qinfo 982 * Receive queue information structure. 983 */ 984 void 985 mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 986 struct rte_eth_rxq_info *qinfo) 987 { 988 struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id]; 989 990 qinfo->mp = q->mp; 991 qinfo->nb_desc = q->size; 992 } 993 994 /** 995 * DPDK callback to get information about specific transmit queue. 996 * 997 * @param dev 998 * Pointer to Ethernet device structure. 999 * @param tx_queue_id 1000 * Transmit queue index. 1001 * @param qinfo 1002 * Transmit queue information structure. 1003 */ 1004 void 1005 mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1006 struct rte_eth_txq_info *qinfo) 1007 { 1008 struct mvneta_priv *priv = dev->data->dev_private; 1009 1010 qinfo->nb_desc = 1011 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1012 } 1013