1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_ 7 #define RTE_PMD_MLX5_RXTX_VEC_H_ 8 9 #include <rte_common.h> 10 #include <rte_mbuf.h> 11 12 #include <mlx5_prm.h> 13 14 #include "mlx5_autoconf.h" 15 16 /* HW checksum offload capabilities of vectorized Tx. */ 17 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ 18 (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 19 DEV_TX_OFFLOAD_UDP_CKSUM | \ 20 DEV_TX_OFFLOAD_TCP_CKSUM | \ 21 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 22 23 /* 24 * Compile time sanity check for vectorized functions. 25 */ 26 27 #define S_ASSERT_RTE_MBUF(s) \ 28 static_assert(s, "A field of struct rte_mbuf is changed") 29 #define S_ASSERT_MLX5_CQE(s) \ 30 static_assert(s, "A field of struct mlx5_cqe is changed") 31 32 /* rxq_cq_decompress_v() */ 33 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 34 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 35 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 36 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 37 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) == 38 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); 39 40 /* rxq_cq_to_ptype_oflags_v() */ 41 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) == 42 offsetof(struct rte_mbuf, rearm_data) + 8); 43 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) == 44 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); 45 46 /* rxq_burst_v() */ 47 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 48 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 49 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 50 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 51 #if (RTE_CACHE_LINE_SIZE == 128) 52 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64); 53 #else 54 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); 55 #endif 56 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == 57 offsetof(struct mlx5_cqe, pkt_info) + 12); 58 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 == 59 offsetof(struct mlx5_cqe, hdr_type_etc)); 60 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == 61 offsetof(struct mlx5_cqe, hdr_type_etc) + 2); 62 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 == 63 offsetof(struct mlx5_cqe, byte_cnt)); 64 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == 65 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); 66 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == 67 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); 68 69 /** 70 * Replenish buffers for RX in bulk. 71 * 72 * @param rxq 73 * Pointer to RX queue structure. 74 * @param n 75 * Number of buffers to be replenished. 76 */ 77 static inline void 78 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) 79 { 80 const uint16_t q_n = 1 << rxq->elts_n; 81 const uint16_t q_mask = q_n - 1; 82 uint16_t elts_idx = rxq->rq_ci & q_mask; 83 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 84 volatile struct mlx5_wqe_data_seg *wq = 85 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; 86 unsigned int i; 87 88 MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); 89 MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); 90 MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > 91 MLX5_VPMD_DESCS_PER_LOOP); 92 /* Not to cross queue end. */ 93 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); 94 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 95 rxq->stats.rx_nombuf += n; 96 return; 97 } 98 for (i = 0; i < n; ++i) { 99 void *buf_addr; 100 101 /* 102 * In order to support the mbufs with external attached 103 * data buffer we should use the buf_addr pointer instead of 104 * rte_mbuf_buf_addr(). It touches the mbuf itself and may 105 * impact the performance. 106 */ 107 buf_addr = elts[i]->buf_addr; 108 wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + 109 RTE_PKTMBUF_HEADROOM); 110 /* If there's only one MR, no need to replace LKey in WQE. */ 111 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) 112 wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); 113 } 114 rxq->rq_ci += n; 115 /* Prevent overflowing into consumed mbufs. */ 116 elts_idx = rxq->rq_ci & q_mask; 117 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 118 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 119 rte_cio_wmb(); 120 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 121 } 122 123 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */ 124