1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
4 */
5
6 #ifndef MLX4_PRM_H_
7 #define MLX4_PRM_H_
8
9 #include <rte_atomic.h>
10 #include <rte_branch_prediction.h>
11 #include <rte_byteorder.h>
12
13 /* Verbs headers do not support -pedantic. */
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic ignored "-Wpedantic"
16 #endif
17 #include <infiniband/mlx4dv.h>
18 #include <infiniband/verbs.h>
19 #ifdef PEDANTIC
20 #pragma GCC diagnostic error "-Wpedantic"
21 #endif
22 #include "mlx4_autoconf.h"
23
24 /* ConnectX-3 Tx queue basic block. */
25 #define MLX4_TXBB_SHIFT 6
26 #define MLX4_TXBB_SIZE (1 << MLX4_TXBB_SHIFT)
27
28 /* Typical TSO descriptor with 16 gather entries is 352 bytes. */
29 #define MLX4_MAX_SGE 32
30 #define MLX4_MAX_WQE_SIZE \
31 (MLX4_MAX_SGE * sizeof(struct mlx4_wqe_data_seg) + \
32 sizeof(struct mlx4_wqe_ctrl_seg))
33 #define MLX4_SEG_SHIFT 4
34
35 /* Send queue stamping/invalidating information. */
36 #define MLX4_SQ_STAMP_STRIDE 64
37 #define MLX4_SQ_STAMP_DWORDS (MLX4_SQ_STAMP_STRIDE / 4)
38 #define MLX4_SQ_OWNER_BIT 31
39 #define MLX4_SQ_STAMP_VAL 0x7fffffff
40
41 /* Work queue element (WQE) flags. */
42 #define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
43 #define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
44 #define MLX4_WQE_CTRL_RR (1 << 6)
45
46 /* CQE checksum flags. */
47 enum {
48 MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
49 MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26),
50 MLX4_CQE_L2_TUNNEL = (int)(1u << 27),
51 MLX4_CQE_L2_VLAN_MASK = (int)(3u << 29),
52 MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),
53 };
54
55 /* CQE status flags. */
56 #define MLX4_CQE_STATUS_IPV6F (1 << 12)
57 #define MLX4_CQE_STATUS_IPV4 (1 << 22)
58 #define MLX4_CQE_STATUS_IPV4F (1 << 23)
59 #define MLX4_CQE_STATUS_IPV6 (1 << 24)
60 #define MLX4_CQE_STATUS_IPV4OPT (1 << 25)
61 #define MLX4_CQE_STATUS_TCP (1 << 26)
62 #define MLX4_CQE_STATUS_UDP (1 << 27)
63 #define MLX4_CQE_STATUS_PTYPE_MASK \
64 (MLX4_CQE_STATUS_IPV4 | \
65 MLX4_CQE_STATUS_IPV4F | \
66 MLX4_CQE_STATUS_IPV6 | \
67 MLX4_CQE_STATUS_IPV4OPT | \
68 MLX4_CQE_STATUS_TCP | \
69 MLX4_CQE_STATUS_UDP)
70
71 /* Send queue information. */
72 struct mlx4_sq {
73 volatile uint8_t *buf; /**< SQ buffer. */
74 volatile uint8_t *eob; /**< End of SQ buffer */
75 uint32_t size; /**< SQ size includes headroom. */
76 uint32_t remain_size; /**< Remaining WQE room in SQ (bytes). */
77 uint32_t owner_opcode;
78 /**< Default owner opcode with HW valid owner bit. */
79 uint32_t stamp; /**< Stamp value with an invalid HW owner bit. */
80 uint32_t *db; /**< Pointer to the doorbell. */
81 off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
82 uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
83 };
84
85 /* Completion queue events, numbers and masks. */
86 #define MLX4_CQ_DB_GEQ_N_MASK 0x3
87 #define MLX4_CQ_DOORBELL 0x20
88 #define MLX4_CQ_DB_CI_MASK 0xffffff
89
90 /* Completion queue information. */
91 struct mlx4_cq {
92 volatile void *cq_uar; /**< CQ user access region. */
93 volatile void *cq_db_reg; /**< CQ doorbell register. */
94 volatile uint32_t *set_ci_db; /**< Pointer to the CQ doorbell. */
95 volatile uint32_t *arm_db; /**< Arming Rx events doorbell. */
96 volatile uint8_t *buf; /**< Pointer to the completion queue buffer. */
97 uint32_t cqe_cnt; /**< Number of entries in the queue. */
98 uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */
99 uint32_t cons_index; /**< Last queue entry that was handled. */
100 uint32_t cqn; /**< CQ number. */
101 int arm_sn; /**< Rx event counter. */
102 };
103
104 #ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
105 /*
106 * WQE LSO segment structure.
107 * Defined here as backward compatibility for rdma-core v17 and below.
108 * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18
109 * and above.
110 */
111 struct mlx4_wqe_lso_seg {
112 rte_be32_t mss_hdr_size;
113 rte_be32_t header[];
114 };
115 #endif
116
117 /**
118 * Retrieve a CQE entry from a CQ.
119 *
120 * cqe = cq->buf + cons_index * cqe_size + cqe_offset
121 *
122 * Where cqe_size is 32 or 64 bytes and cqe_offset is 0 or 32 (depending on
123 * cqe_size).
124 *
125 * @param cq
126 * CQ to retrieve entry from.
127 * @param index
128 * Entry index.
129 *
130 * @return
131 * Pointer to CQE entry.
132 */
133 static inline volatile struct mlx4_cqe *
mlx4_get_cqe(struct mlx4_cq * cq,uint32_t index)134 mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index)
135 {
136 return (volatile struct mlx4_cqe *)(cq->buf +
137 ((index & (cq->cqe_cnt - 1)) <<
138 (5 + cq->cqe_64)) +
139 (cq->cqe_64 << 5));
140 }
141
142 /**
143 * Transpose a flag in a value.
144 *
145 * @param val
146 * Input value.
147 * @param from
148 * Flag to retrieve from input value.
149 * @param to
150 * Flag to set in output value.
151 *
152 * @return
153 * Output value with transposed flag enabled if present on input.
154 */
155 static inline uint64_t
mlx4_transpose(uint64_t val,uint64_t from,uint64_t to)156 mlx4_transpose(uint64_t val, uint64_t from, uint64_t to)
157 {
158 return (from >= to ?
159 (val & from) / (from / to) :
160 (val & from) * (to / from));
161 }
162
163 #endif /* MLX4_PRM_H_ */
164