15bfc9fc1SYongseok Koh /*- 25bfc9fc1SYongseok Koh * BSD LICENSE 35bfc9fc1SYongseok Koh * 45bfc9fc1SYongseok Koh * Copyright 2017 6WIND S.A. 55bfc9fc1SYongseok Koh * Copyright 2017 Mellanox. 65bfc9fc1SYongseok Koh * 75bfc9fc1SYongseok Koh * Redistribution and use in source and binary forms, with or without 85bfc9fc1SYongseok Koh * modification, are permitted provided that the following conditions 95bfc9fc1SYongseok Koh * are met: 105bfc9fc1SYongseok Koh * 115bfc9fc1SYongseok Koh * * Redistributions of source code must retain the above copyright 125bfc9fc1SYongseok Koh * notice, this list of conditions and the following disclaimer. 135bfc9fc1SYongseok Koh * * Redistributions in binary form must reproduce the above copyright 145bfc9fc1SYongseok Koh * notice, this list of conditions and the following disclaimer in 155bfc9fc1SYongseok Koh * the documentation and/or other materials provided with the 165bfc9fc1SYongseok Koh * distribution. 175bfc9fc1SYongseok Koh * * Neither the name of 6WIND S.A. nor the names of its 185bfc9fc1SYongseok Koh * contributors may be used to endorse or promote products derived 195bfc9fc1SYongseok Koh * from this software without specific prior written permission. 205bfc9fc1SYongseok Koh * 215bfc9fc1SYongseok Koh * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 225bfc9fc1SYongseok Koh * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 235bfc9fc1SYongseok Koh * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 245bfc9fc1SYongseok Koh * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 255bfc9fc1SYongseok Koh * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 265bfc9fc1SYongseok Koh * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 275bfc9fc1SYongseok Koh * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 285bfc9fc1SYongseok Koh * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 295bfc9fc1SYongseok Koh * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 305bfc9fc1SYongseok Koh * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 315bfc9fc1SYongseok Koh * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 325bfc9fc1SYongseok Koh */ 335bfc9fc1SYongseok Koh 345bfc9fc1SYongseok Koh #ifndef RTE_PMD_MLX5_RXTX_VEC_H_ 355bfc9fc1SYongseok Koh #define RTE_PMD_MLX5_RXTX_VEC_H_ 365bfc9fc1SYongseok Koh 375bfc9fc1SYongseok Koh #include <rte_common.h> 385bfc9fc1SYongseok Koh #include <rte_mbuf.h> 395bfc9fc1SYongseok Koh 405bfc9fc1SYongseok Koh #include "mlx5_autoconf.h" 415bfc9fc1SYongseok Koh #include "mlx5_prm.h" 425bfc9fc1SYongseok Koh 43*dbccb4cdSShahaf Shuler /* HW checksum offload capabilities of vectorized Tx. */ 44*dbccb4cdSShahaf Shuler #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ 45*dbccb4cdSShahaf Shuler (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 46*dbccb4cdSShahaf Shuler DEV_TX_OFFLOAD_UDP_CKSUM | \ 47*dbccb4cdSShahaf Shuler DEV_TX_OFFLOAD_TCP_CKSUM | \ 48*dbccb4cdSShahaf Shuler DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) 49*dbccb4cdSShahaf Shuler 50*dbccb4cdSShahaf Shuler /* HW offload capabilities of vectorized Tx. */ 51*dbccb4cdSShahaf Shuler #define MLX5_VEC_TX_OFFLOAD_CAP \ 52*dbccb4cdSShahaf Shuler (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \ 53*dbccb4cdSShahaf Shuler DEV_TX_OFFLOAD_MULTI_SEGS) 54*dbccb4cdSShahaf Shuler 555bfc9fc1SYongseok Koh /* 565bfc9fc1SYongseok Koh * Compile time sanity check for vectorized functions. 575bfc9fc1SYongseok Koh */ 585bfc9fc1SYongseok Koh 595bfc9fc1SYongseok Koh #define S_ASSERT_RTE_MBUF(s) \ 605bfc9fc1SYongseok Koh static_assert(s, "A field of struct rte_mbuf is changed") 615bfc9fc1SYongseok Koh #define S_ASSERT_MLX5_CQE(s) \ 625bfc9fc1SYongseok Koh static_assert(s, "A field of struct mlx5_cqe is changed") 635bfc9fc1SYongseok Koh 645bfc9fc1SYongseok Koh /* rxq_cq_decompress_v() */ 655bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 665bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 675bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 685bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 695bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) == 705bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); 715bfc9fc1SYongseok Koh 725bfc9fc1SYongseok Koh /* rxq_cq_to_ptype_oflags_v() */ 735bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) == 745bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rearm_data) + 8); 755bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) == 765bfc9fc1SYongseok Koh RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); 775bfc9fc1SYongseok Koh 785bfc9fc1SYongseok Koh /* rxq_burst_v() */ 795bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 805bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 815bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 825bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 83570acdb1SYongseok Koh #if (RTE_CACHE_LINE_SIZE == 128) 84570acdb1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64); 85570acdb1SYongseok Koh #else 865bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); 87570acdb1SYongseok Koh #endif 885bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == 895bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, pkt_info) + 12); 905bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 915bfc9fc1SYongseok Koh sizeof(((struct mlx5_cqe *)0)->rsvd1) == 925bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, hdr_type_etc)); 935bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == 945bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, hdr_type_etc) + 2); 955bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) + 965bfc9fc1SYongseok Koh sizeof(((struct mlx5_cqe *)0)->rsvd2) == 975bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, byte_cnt)); 985bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == 995bfc9fc1SYongseok Koh RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); 1005bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == 1015bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); 1025bfc9fc1SYongseok Koh 1033c2ddbd4SYongseok Koh /** 1043c2ddbd4SYongseok Koh * Replenish buffers for RX in bulk. 1053c2ddbd4SYongseok Koh * 1063c2ddbd4SYongseok Koh * @param rxq 1073c2ddbd4SYongseok Koh * Pointer to RX queue structure. 1083c2ddbd4SYongseok Koh * @param n 1093c2ddbd4SYongseok Koh * Number of buffers to be replenished. 1103c2ddbd4SYongseok Koh */ 1113c2ddbd4SYongseok Koh static inline void 1123c2ddbd4SYongseok Koh mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) 1133c2ddbd4SYongseok Koh { 1143c2ddbd4SYongseok Koh const uint16_t q_n = 1 << rxq->elts_n; 1153c2ddbd4SYongseok Koh const uint16_t q_mask = q_n - 1; 11603e0868bSYongseok Koh uint16_t elts_idx = rxq->rq_ci & q_mask; 1173c2ddbd4SYongseok Koh struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 1183c2ddbd4SYongseok Koh volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx]; 1193c2ddbd4SYongseok Koh unsigned int i; 1203c2ddbd4SYongseok Koh 1213c2ddbd4SYongseok Koh assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); 1223c2ddbd4SYongseok Koh assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); 1233c2ddbd4SYongseok Koh assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); 1243c2ddbd4SYongseok Koh /* Not to cross queue end. */ 1253c2ddbd4SYongseok Koh n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); 1263c2ddbd4SYongseok Koh if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 1273c2ddbd4SYongseok Koh rxq->stats.rx_nombuf += n; 1283c2ddbd4SYongseok Koh return; 1293c2ddbd4SYongseok Koh } 1303c2ddbd4SYongseok Koh for (i = 0; i < n; ++i) 1313c2ddbd4SYongseok Koh wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + 1323c2ddbd4SYongseok Koh RTE_PKTMBUF_HEADROOM); 1333c2ddbd4SYongseok Koh rxq->rq_ci += n; 13403e0868bSYongseok Koh /* Prevent overflowing into consumed mbufs. */ 13503e0868bSYongseok Koh elts_idx = rxq->rq_ci & q_mask; 13603e0868bSYongseok Koh for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 13703e0868bSYongseok Koh (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 1383c2ddbd4SYongseok Koh rte_io_wmb(); 1393c2ddbd4SYongseok Koh *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 1403c2ddbd4SYongseok Koh } 1413c2ddbd4SYongseok Koh 1425bfc9fc1SYongseok Koh #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */ 143