15bfc9fc1SYongseok Koh /*- 25bfc9fc1SYongseok Koh * BSD LICENSE 35bfc9fc1SYongseok Koh * 45bfc9fc1SYongseok Koh * Copyright 2017 6WIND S.A. 55bfc9fc1SYongseok Koh * Copyright 2017 Mellanox. 65bfc9fc1SYongseok Koh * 75bfc9fc1SYongseok Koh * Redistribution and use in source and binary forms, with or without 85bfc9fc1SYongseok Koh * modification, are permitted provided that the following conditions 95bfc9fc1SYongseok Koh * are met: 105bfc9fc1SYongseok Koh * 115bfc9fc1SYongseok Koh * * Redistributions of source code must retain the above copyright 125bfc9fc1SYongseok Koh * notice, this list of conditions and the following disclaimer. 135bfc9fc1SYongseok Koh * * Redistributions in binary form must reproduce the above copyright 145bfc9fc1SYongseok Koh * notice, this list of conditions and the following disclaimer in 155bfc9fc1SYongseok Koh * the documentation and/or other materials provided with the 165bfc9fc1SYongseok Koh * distribution. 175bfc9fc1SYongseok Koh * * Neither the name of 6WIND S.A. nor the names of its 185bfc9fc1SYongseok Koh * contributors may be used to endorse or promote products derived 195bfc9fc1SYongseok Koh * from this software without specific prior written permission. 205bfc9fc1SYongseok Koh * 215bfc9fc1SYongseok Koh * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 225bfc9fc1SYongseok Koh * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 235bfc9fc1SYongseok Koh * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 245bfc9fc1SYongseok Koh * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 255bfc9fc1SYongseok Koh * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 265bfc9fc1SYongseok Koh * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 275bfc9fc1SYongseok Koh * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 285bfc9fc1SYongseok Koh * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 295bfc9fc1SYongseok Koh * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 305bfc9fc1SYongseok Koh * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 315bfc9fc1SYongseok Koh * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 325bfc9fc1SYongseok Koh */ 335bfc9fc1SYongseok Koh 345bfc9fc1SYongseok Koh #ifndef RTE_PMD_MLX5_RXTX_VEC_H_ 355bfc9fc1SYongseok Koh #define RTE_PMD_MLX5_RXTX_VEC_H_ 365bfc9fc1SYongseok Koh 375bfc9fc1SYongseok Koh #include <rte_common.h> 385bfc9fc1SYongseok Koh #include <rte_mbuf.h> 395bfc9fc1SYongseok Koh 405bfc9fc1SYongseok Koh #include "mlx5_autoconf.h" 415bfc9fc1SYongseok Koh #include "mlx5_prm.h" 425bfc9fc1SYongseok Koh 435bfc9fc1SYongseok Koh /* 445bfc9fc1SYongseok Koh * Compile time sanity check for vectorized functions. 455bfc9fc1SYongseok Koh */ 465bfc9fc1SYongseok Koh 475bfc9fc1SYongseok Koh #define S_ASSERT_RTE_MBUF(s) \ 485bfc9fc1SYongseok Koh static_assert(s, "A field of struct rte_mbuf is changed") 495bfc9fc1SYongseok Koh #define S_ASSERT_MLX5_CQE(s) \ 505bfc9fc1SYongseok Koh static_assert(s, "A field of struct mlx5_cqe is changed") 515bfc9fc1SYongseok Koh 525bfc9fc1SYongseok Koh /* rxq_cq_decompress_v() */ 535bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 545bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 555bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 565bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 575bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) == 585bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); 595bfc9fc1SYongseok Koh 605bfc9fc1SYongseok Koh /* rxq_cq_to_ptype_oflags_v() */ 615bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) == 625bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rearm_data) + 8); 635bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) == 645bfc9fc1SYongseok Koh RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); 655bfc9fc1SYongseok Koh 665bfc9fc1SYongseok Koh /* rxq_burst_v() */ 675bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 685bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 695bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 705bfc9fc1SYongseok Koh offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 71570acdb1SYongseok Koh #if (RTE_CACHE_LINE_SIZE == 128) 72570acdb1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64); 73570acdb1SYongseok Koh #else 745bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); 75570acdb1SYongseok Koh #endif 765bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == 775bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, pkt_info) + 12); 785bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 795bfc9fc1SYongseok Koh sizeof(((struct mlx5_cqe *)0)->rsvd1) == 805bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, hdr_type_etc)); 815bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == 825bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, hdr_type_etc) + 2); 835bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) + 845bfc9fc1SYongseok Koh sizeof(((struct mlx5_cqe *)0)->rsvd2) == 855bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, byte_cnt)); 865bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == 875bfc9fc1SYongseok Koh RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); 885bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == 895bfc9fc1SYongseok Koh offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); 905bfc9fc1SYongseok Koh 913c2ddbd4SYongseok Koh /** 923c2ddbd4SYongseok Koh * Replenish buffers for RX in bulk. 933c2ddbd4SYongseok Koh * 943c2ddbd4SYongseok Koh * @param rxq 953c2ddbd4SYongseok Koh * Pointer to RX queue structure. 963c2ddbd4SYongseok Koh * @param n 973c2ddbd4SYongseok Koh * Number of buffers to be replenished. 983c2ddbd4SYongseok Koh */ 993c2ddbd4SYongseok Koh static inline void 1003c2ddbd4SYongseok Koh mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) 1013c2ddbd4SYongseok Koh { 1023c2ddbd4SYongseok Koh const uint16_t q_n = 1 << rxq->elts_n; 1033c2ddbd4SYongseok Koh const uint16_t q_mask = q_n - 1; 104*03e0868bSYongseok Koh uint16_t elts_idx = rxq->rq_ci & q_mask; 1053c2ddbd4SYongseok Koh struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 1063c2ddbd4SYongseok Koh volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx]; 1073c2ddbd4SYongseok Koh unsigned int i; 1083c2ddbd4SYongseok Koh 1093c2ddbd4SYongseok Koh assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); 1103c2ddbd4SYongseok Koh assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); 1113c2ddbd4SYongseok Koh assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); 1123c2ddbd4SYongseok Koh /* Not to cross queue end. */ 1133c2ddbd4SYongseok Koh n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); 1143c2ddbd4SYongseok Koh if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 1153c2ddbd4SYongseok Koh rxq->stats.rx_nombuf += n; 1163c2ddbd4SYongseok Koh return; 1173c2ddbd4SYongseok Koh } 1183c2ddbd4SYongseok Koh for (i = 0; i < n; ++i) 1193c2ddbd4SYongseok Koh wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + 1203c2ddbd4SYongseok Koh RTE_PKTMBUF_HEADROOM); 1213c2ddbd4SYongseok Koh rxq->rq_ci += n; 122*03e0868bSYongseok Koh /* Prevent overflowing into consumed mbufs. */ 123*03e0868bSYongseok Koh elts_idx = rxq->rq_ci & q_mask; 124*03e0868bSYongseok Koh for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 125*03e0868bSYongseok Koh (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 1263c2ddbd4SYongseok Koh rte_io_wmb(); 1273c2ddbd4SYongseok Koh *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 1283c2ddbd4SYongseok Koh } 1293c2ddbd4SYongseok Koh 1305bfc9fc1SYongseok Koh #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */ 131