xref: /dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) Mellanox Technologies, Ltd. 2001-2020.
3  */
4 
5 #ifndef __MLX5_WIN_DEFS_H__
6 #define __MLX5_WIN_DEFS_H__
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
12 enum {
13 	MLX5_CQE_OWNER_MASK	= 1,
14 	MLX5_CQE_REQ		= 0,
15 	MLX5_CQE_RESP_WR_IMM	= 1,
16 	MLX5_CQE_RESP_SEND	= 2,
17 	MLX5_CQE_RESP_SEND_IMM	= 3,
18 	MLX5_CQE_RESP_SEND_INV	= 4,
19 	MLX5_CQE_RESIZE_CQ	= 5,
20 	MLX5_CQE_NO_PACKET	= 6,
21 	MLX5_CQE_REQ_ERR	= 13,
22 	MLX5_CQE_RESP_ERR	= 14,
23 	MLX5_CQE_INVALID	= 15,
24 };
25 
26 enum {
27 	MLX5_OPCODE_NOP			= 0x00,
28 	MLX5_OPCODE_SEND_INVAL		= 0x01,
29 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
30 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
31 	MLX5_OPCODE_SEND		= 0x0a,
32 	MLX5_OPCODE_SEND_IMM		= 0x0b,
33 	MLX5_OPCODE_TSO			= 0x0e,
34 	MLX5_OPCODE_RDMA_READ		= 0x10,
35 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
36 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
37 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
38 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
39 	MLX5_OPCODE_FMR			= 0x19,
40 	MLX5_OPCODE_LOCAL_INVAL		= 0x1b,
41 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
42 	MLX5_OPCODE_UMR			= 0x25,
43 	MLX5_OPCODE_TAG_MATCHING	= 0x28
44 };
45 
46 enum mlx5dv_cq_init_attr_mask {
47 	MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE	= 1 << 0,
48 	MLX5DV_CQ_INIT_ATTR_MASK_FLAGS		= 1 << 1,
49 	MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = 1 << 2,
50 };
51 
52 enum mlx5dv_cqe_comp_res_format {
53 	MLX5DV_CQE_RES_FORMAT_HASH		= 1 << 0,
54 	MLX5DV_CQE_RES_FORMAT_CSUM		= 1 << 1,
55 	MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX	= 1 << 2,
56 };
57 
58 enum ibv_access_flags {
59 	IBV_ACCESS_LOCAL_WRITE		= 1,
60 	IBV_ACCESS_REMOTE_WRITE		= 1 << 1,
61 	IBV_ACCESS_REMOTE_READ		= 1 << 2,
62 	IBV_ACCESS_REMOTE_ATOMIC	= 1 << 3,
63 	IBV_ACCESS_MW_BIND		= 1 << 4,
64 	IBV_ACCESS_ZERO_BASED		= 1 << 5,
65 	IBV_ACCESS_ON_DEMAND		= 1 << 6,
66 };
67 
68 enum mlx5_ib_uapi_devx_create_event_channel_flags {
69 	MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = 1 << 0,
70 };
71 
72 #define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA \
73 	MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA
74 
75 enum {
76 	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
77 	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
78 	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
79 	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
80 	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
81 	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
82 	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
83 	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
84 	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
85 	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
86 	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
87 	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
88 	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
89 };
90 
91 enum {
92 	MLX5_ETH_WQE_L3_CSUM = (1 << 6),
93 	MLX5_ETH_WQE_L4_CSUM = (1 << 7),
94 };
95 
96 enum {
97 	MLX5_WQE_CTRL_CQ_UPDATE	= 2 << 2,
98 	MLX5_WQE_CTRL_SOLICITED	= 1 << 1,
99 	MLX5_WQE_CTRL_FENCE	= 4 << 5,
100 	MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
101 };
102 
103 enum {
104 	MLX5_SEND_WQE_BB	= 64,
105 	MLX5_SEND_WQE_SHIFT	= 6,
106 };
107 
108 /*
109  * RX Hash fields enable to set which incoming packet's field should
110  * participates in RX Hash. Each flag represent certain packet's field,
111  * when the flag is set the field that is represented by the flag will
112  * participate in RX Hash calculation.
113  * Note: IPV4 and IPV6 flags can't be enabled together on the same QP,
114  * TCP and UDP flags can't be enabled together on the same QP.
115  */
116 enum ibv_rx_hash_fields {
117 	IBV_RX_HASH_SRC_IPV4	= 1 << 0,
118 	IBV_RX_HASH_DST_IPV4	= 1 << 1,
119 	IBV_RX_HASH_SRC_IPV6	= 1 << 2,
120 	IBV_RX_HASH_DST_IPV6	= 1 << 3,
121 	IBV_RX_HASH_SRC_PORT_TCP	= 1 << 4,
122 	IBV_RX_HASH_DST_PORT_TCP	= 1 << 5,
123 	IBV_RX_HASH_SRC_PORT_UDP	= 1 << 6,
124 	IBV_RX_HASH_DST_PORT_UDP	= 1 << 7,
125 	IBV_RX_HASH_IPSEC_SPI		= 1 << 8,
126 	IBV_RX_HASH_INNER		= (1 << 31),
127 };
128 
129 enum {
130 	MLX5_RCV_DBR	= 0,
131 	MLX5_SND_DBR	= 1,
132 };
133 
134 #ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2
135 #define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2	0x0
136 #endif
137 #ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL
138 #define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL	0x1
139 #endif
140 #ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2
141 #define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2	0x2
142 #endif
143 #ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL
144 #define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL	0x3
145 #endif
146 
147 enum ibv_flow_flags {
148 	IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0,
149 	IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1,
150 	IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2,
151 };
152 
153 enum ibv_flow_attr_type {
154 	/* Steering according to rule specifications. */
155 	IBV_FLOW_ATTR_NORMAL		= 0x0,
156 	/*
157 	 * Default unicast and multicast rule -
158 	 * receive all Eth traffic which isn't steered to any QP.
159 	 */
160 	IBV_FLOW_ATTR_ALL_DEFAULT	= 0x1,
161 	/*
162 	 * Default multicast rule -
163 	 * receive all Eth multicast traffic which isn't steered to any QP.
164 	 */
165 	IBV_FLOW_ATTR_MC_DEFAULT	= 0x2,
166 	/* Sniffer rule - receive all port traffic. */
167 	IBV_FLOW_ATTR_SNIFFER		= 0x3,
168 };
169 
170 enum mlx5dv_flow_table_type {
171 	MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX     = 0x0,
172 	MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX	= 0x1,
173 	MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB	= 0x2,
174 	MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX	= 0x3,
175 };
176 
177 #define MLX5DV_FLOW_TABLE_TYPE_NIC_RX	MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
178 #define MLX5DV_FLOW_TABLE_TYPE_NIC_TX	MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
179 #define MLX5DV_FLOW_TABLE_TYPE_FDB	MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
180 #define MLX5DV_FLOW_TABLE_TYPE_RDMA_RX	MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
181 
182 struct mlx5dv_flow_match_parameters {
183 	size_t match_sz;
184 	uint64_t match_buf[]; /* Device spec format */
185 };
186 
187 struct mlx5dv_flow_matcher_attr {
188 	enum ibv_flow_attr_type type;
189 	uint32_t flags; /* From enum ibv_flow_flags. */
190 	uint16_t priority;
191 	uint8_t match_criteria_enable; /* Device spec format. */
192 	struct mlx5dv_flow_match_parameters *match_mask;
193 	uint64_t comp_mask; /* Use mlx5dv_flow_matcher_attr_mask. */
194 	enum mlx5dv_flow_table_type ft_type;
195 };
196 
197 /* Windows specific mlx5_matcher. */
198 struct mlx5_matcher {
199 	void *ctx;
200 	struct mlx5dv_flow_matcher_attr attr;
201 	uint64_t match_buf[];
202 };
203 
204 /*
205  * Windows mlx5_action. This struct is the
206  * equivalent of rdma-core struct mlx5dv_dr_action.
207  */
208 struct mlx5_action {
209 	int type;
210 	struct {
211 		uint32_t id;
212 	} dest_tir;
213 };
214 
215 struct mlx5_err_cqe {
216 	uint8_t		rsvd0[32];
217 	uint32_t	srqn;
218 	uint8_t		rsvd1[18];
219 	uint8_t		vendor_err_synd;
220 	uint8_t		syndrome;
221 	uint32_t	s_wqe_opcode_qpn;
222 	uint16_t	wqe_counter;
223 	uint8_t		signature;
224 	uint8_t		op_own;
225 };
226 
227 struct mlx5_wqe_srq_next_seg {
228 	uint8_t			rsvd0[2];
229 	rte_be16_t		next_wqe_index;
230 	uint8_t			signature;
231 	uint8_t			rsvd1[11];
232 };
233 
234 enum ibv_wq_state {
235 	IBV_WQS_RESET,
236 	IBV_WQS_RDY,
237 	IBV_WQS_ERR,
238 	IBV_WQS_UNKNOWN
239 };
240 
241 struct mlx5_wqe_data_seg {
242 	rte_be32_t		byte_count;
243 	rte_be32_t		lkey;
244 	rte_be64_t		addr;
245 };
246 
247 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP	(1 << 4)
248 #define IBV_DEVICE_RAW_IP_CSUM			(1 << 26)
249 #define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING	(1 << 0)
250 #define IBV_RAW_PACKET_CAP_SCATTER_FCS		(1 << 1)
251 #define IBV_QPT_RAW_PACKET			8
252 
253 enum {
254 	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT                    = 0x0,
255 	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE               = 0x1,
256 	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR                      = 0x2,
257 	MLX5_FLOW_CONTEXT_DEST_TYPE_QP                       = 0x3,
258 };
259 
260 enum {
261 	MLX5_MATCH_OUTER_HEADERS        = 1 << 0,
262 	MLX5_MATCH_MISC_PARAMETERS      = 1 << 1,
263 	MLX5_MATCH_INNER_HEADERS        = 1 << 2,
264 };
265 #endif /* __MLX5_WIN_DEFS_H__ */
266