xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision 3dfa78770ed53c376df2da7c2bd997e0b2d33cd1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015-2019 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdint.h>
7 #include <string.h>
8 #include <stdlib.h>
9 
10 #include <rte_mbuf.h>
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
17 #include <rte_flow.h>
18 
19 #include <mlx5_prm.h>
20 #include <mlx5_common.h>
21 
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_rxtx.h"
27 #include "mlx5_rx.h"
28 #include "mlx5_tx.h"
29 
30 /* static asserts */
31 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
32 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
33 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
34 		(sizeof(uint16_t) +
35 		 sizeof(rte_v128u32_t)),
36 		"invalid Ethernet Segment data size");
37 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
38 		(sizeof(uint16_t) +
39 		 sizeof(struct rte_vlan_hdr) +
40 		 2 * RTE_ETHER_ADDR_LEN),
41 		"invalid Ethernet Segment data size");
42 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
43 		(sizeof(uint16_t) +
44 		 sizeof(rte_v128u32_t)),
45 		"invalid Ethernet Segment data size");
46 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
47 		(sizeof(uint16_t) +
48 		 sizeof(struct rte_vlan_hdr) +
49 		 2 * RTE_ETHER_ADDR_LEN),
50 		"invalid Ethernet Segment data size");
51 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
52 		(sizeof(uint16_t) +
53 		 sizeof(rte_v128u32_t)),
54 		"invalid Ethernet Segment data size");
55 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
56 		(sizeof(uint16_t) +
57 		 sizeof(struct rte_vlan_hdr) +
58 		 2 * RTE_ETHER_ADDR_LEN),
59 		"invalid Ethernet Segment data size");
60 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
61 		(2 * RTE_ETHER_ADDR_LEN),
62 		"invalid Data Segment data size");
63 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
64 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
65 static_assert((sizeof(struct rte_vlan_hdr) +
66 			sizeof(struct rte_ether_hdr)) ==
67 		MLX5_ESEG_MIN_INLINE_SIZE,
68 		"invalid min inline data size");
69 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
70 		MLX5_DSEG_MAX, "invalid WQE max size");
71 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
72 		"invalid WQE Control Segment size");
73 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
74 		"invalid WQE Ethernet Segment size");
75 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
76 		"invalid WQE Data Segment size");
77 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
78 		"invalid WQE size");
79 
80 alignas(RTE_CACHE_LINE_SIZE) uint32_t mlx5_ptype_table[] = {
81 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
82 };
83 
84 alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_cksum_table[1 << 10];
85 alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_swp_types_table[1 << 10];
86 
87 uint64_t rte_net_mlx5_dynf_inline_mask;
88 
89 /**
90  * Build a table to translate Rx completion flags to packet type.
91  *
92  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
93  */
94 void
mlx5_set_ptype_table(void)95 mlx5_set_ptype_table(void)
96 {
97 	unsigned int i;
98 	uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
99 
100 	/* Last entry must not be overwritten, reserved for errored packet. */
101 	for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
102 		(*p)[i] = RTE_PTYPE_UNKNOWN;
103 	/*
104 	 * The index to the array should have:
105 	 * bit[1:0] = l3_hdr_type
106 	 * bit[4:2] = l4_hdr_type
107 	 * bit[5] = ip_frag
108 	 * bit[6] = tunneled
109 	 * bit[7] = outer_l3_type
110 	 */
111 	/* L2 */
112 	(*p)[0x00] = RTE_PTYPE_L2_ETHER;
113 	/* L3 */
114 	(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
115 		     RTE_PTYPE_L4_NONFRAG;
116 	(*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
117 		     RTE_PTYPE_L4_NONFRAG;
118 	/* Fragmented */
119 	(*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
120 		     RTE_PTYPE_L4_FRAG;
121 	(*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
122 		     RTE_PTYPE_L4_FRAG;
123 	/* TCP */
124 	(*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
125 		     RTE_PTYPE_L4_TCP;
126 	(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
127 		     RTE_PTYPE_L4_TCP;
128 	(*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
129 		     RTE_PTYPE_L4_TCP;
130 	(*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
131 		     RTE_PTYPE_L4_TCP;
132 	(*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
133 		     RTE_PTYPE_L4_TCP;
134 	(*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
135 		     RTE_PTYPE_L4_TCP;
136 	/* UDP */
137 	(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
138 		     RTE_PTYPE_L4_UDP;
139 	(*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
140 		     RTE_PTYPE_L4_UDP;
141 	/* Repeat with outer_l3_type being set. Just in case. */
142 	(*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
143 		     RTE_PTYPE_L4_NONFRAG;
144 	(*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
145 		     RTE_PTYPE_L4_NONFRAG;
146 	(*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
147 		     RTE_PTYPE_L4_FRAG;
148 	(*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
149 		     RTE_PTYPE_L4_FRAG;
150 	(*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
151 		     RTE_PTYPE_L4_TCP;
152 	(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
153 		     RTE_PTYPE_L4_TCP;
154 	(*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
155 		     RTE_PTYPE_L4_TCP;
156 	(*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
157 		     RTE_PTYPE_L4_TCP;
158 	(*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
159 		     RTE_PTYPE_L4_TCP;
160 	(*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
161 		     RTE_PTYPE_L4_TCP;
162 	(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 		     RTE_PTYPE_L4_UDP;
164 	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 		     RTE_PTYPE_L4_UDP;
166 	/* Tunneled - L3 */
167 	(*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
168 	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
169 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
170 		     RTE_PTYPE_INNER_L4_NONFRAG;
171 	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
173 		     RTE_PTYPE_INNER_L4_NONFRAG;
174 	(*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
175 	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
177 		     RTE_PTYPE_INNER_L4_NONFRAG;
178 	(*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
180 		     RTE_PTYPE_INNER_L4_NONFRAG;
181 	/* Tunneled - Fragmented */
182 	(*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184 		     RTE_PTYPE_INNER_L4_FRAG;
185 	(*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
187 		     RTE_PTYPE_INNER_L4_FRAG;
188 	(*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190 		     RTE_PTYPE_INNER_L4_FRAG;
191 	(*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
192 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
193 		     RTE_PTYPE_INNER_L4_FRAG;
194 	/* Tunneled - TCP */
195 	(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
196 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
197 		     RTE_PTYPE_INNER_L4_TCP;
198 	(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
200 		     RTE_PTYPE_INNER_L4_TCP;
201 	(*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
202 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
203 		     RTE_PTYPE_INNER_L4_TCP;
204 	(*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
205 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
206 		     RTE_PTYPE_INNER_L4_TCP;
207 	(*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
209 		     RTE_PTYPE_INNER_L4_TCP;
210 	(*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
212 		     RTE_PTYPE_INNER_L4_TCP;
213 	(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
214 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
215 		     RTE_PTYPE_INNER_L4_TCP;
216 	(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
217 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
218 		     RTE_PTYPE_INNER_L4_TCP;
219 	(*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
220 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
221 		     RTE_PTYPE_INNER_L4_TCP;
222 	(*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
223 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
224 		     RTE_PTYPE_INNER_L4_TCP;
225 	(*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
226 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
227 		     RTE_PTYPE_INNER_L4_TCP;
228 	(*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
229 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
230 		     RTE_PTYPE_INNER_L4_TCP;
231 	/* Tunneled - UDP */
232 	(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
234 		     RTE_PTYPE_INNER_L4_UDP;
235 	(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
236 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
237 		     RTE_PTYPE_INNER_L4_UDP;
238 	(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
239 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
240 		     RTE_PTYPE_INNER_L4_UDP;
241 	(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
242 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
243 		     RTE_PTYPE_INNER_L4_UDP;
244 }
245 
246 /**
247  * Build a table to translate packet to checksum type of Verbs.
248  */
249 void
mlx5_set_cksum_table(void)250 mlx5_set_cksum_table(void)
251 {
252 	unsigned int i;
253 	uint8_t v;
254 
255 	/*
256 	 * The index should have:
257 	 * bit[0] = RTE_MBUF_F_TX_TCP_SEG
258 	 * bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
259 	 * bit[4] = RTE_MBUF_F_TX_IP_CKSUM
260 	 * bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
261 	 * bit[9] = tunnel
262 	 */
263 	for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
264 		v = 0;
265 		if (i & (1 << 9)) {
266 			/* Tunneled packet. */
267 			if (i & (1 << 8)) /* Outer IP. */
268 				v |= MLX5_ETH_WQE_L3_CSUM;
269 			if (i & (1 << 4)) /* Inner IP. */
270 				v |= MLX5_ETH_WQE_L3_INNER_CSUM;
271 			if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
272 				v |= MLX5_ETH_WQE_L4_INNER_CSUM;
273 		} else {
274 			/* No tunnel. */
275 			if (i & (1 << 4)) /* IP. */
276 				v |= MLX5_ETH_WQE_L3_CSUM;
277 			if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
278 				v |= MLX5_ETH_WQE_L4_CSUM;
279 		}
280 		mlx5_cksum_table[i] = v;
281 	}
282 }
283 
284 /**
285  * Build a table to translate packet type of mbuf to SWP type of Verbs.
286  */
287 void
mlx5_set_swp_types_table(void)288 mlx5_set_swp_types_table(void)
289 {
290 	unsigned int i;
291 	uint8_t v;
292 
293 	/*
294 	 * The index should have:
295 	 * bit[0:1] = RTE_MBUF_F_TX_L4_MASK
296 	 * bit[4] = RTE_MBUF_F_TX_IPV6
297 	 * bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
298 	 * bit[9] = RTE_MBUF_F_TX_OUTER_UDP
299 	 */
300 	for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
301 		v = 0;
302 		if (i & (1 << 8))
303 			v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
304 		if (i & (1 << 9))
305 			v |= MLX5_ETH_WQE_L4_OUTER_UDP;
306 		if (i & (1 << 4))
307 			v |= MLX5_ETH_WQE_L3_INNER_IPV6;
308 		if ((i & 3) == (RTE_MBUF_F_TX_UDP_CKSUM >> 52))
309 			v |= MLX5_ETH_WQE_L4_INNER_UDP;
310 		mlx5_swp_types_table[i] = v;
311 	}
312 }
313 
314 #define MLX5_SYSTEM_LOG_DIR "/var/log"
315 /**
316  * Dump debug information to log file.
317  *
318  * @param fname
319  *   The file name.
320  * @param hex_title
321  *   If not NULL this string is printed as a header to the output
322  *   and the output will be in hexadecimal view.
323  * @param buf
324  *   This is the buffer address to print out.
325  * @param len
326  *   The number of bytes to dump out.
327  */
328 void
mlx5_dump_debug_information(const char * fname,const char * hex_title,const void * buf,unsigned int hex_len)329 mlx5_dump_debug_information(const char *fname, const char *hex_title,
330 			    const void *buf, unsigned int hex_len)
331 {
332 	FILE *fd;
333 
334 	MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
335 	fd = fopen(path, "a+");
336 	if (!fd) {
337 		DRV_LOG(WARNING, "cannot open %s for debug dump", path);
338 		MKSTR(path2, "./%s", fname);
339 		fd = fopen(path2, "a+");
340 		if (!fd) {
341 			DRV_LOG(ERR, "cannot open %s for debug dump", path2);
342 			return;
343 		}
344 		DRV_LOG(INFO, "New debug dump in file %s", path2);
345 	} else {
346 		DRV_LOG(INFO, "New debug dump in file %s", path);
347 	}
348 	if (hex_title)
349 		rte_hexdump(fd, hex_title, buf, hex_len);
350 	else
351 		fprintf(fd, "%s", (const char *)buf);
352 	fprintf(fd, "\n\n\n");
353 	fclose(fd);
354 }
355 
356 /**
357  * Dump information to a logfile
358  *
359  * @param fd
360  *   File descriptor to logfile. File descriptor open/close is managed by caller.
361  * @param title
362  *   If not NULL this string is printed as a header to the output
363  *   and the output will be in hexadecimal view.
364  * @param buf
365  *   This is the buffer address to print out.
366  * @param len
367  *   The number of bytes to dump out.
368  */
369 void
mlx5_dump_to_file(FILE * fd,const char * title,const void * buf,unsigned int len)370 mlx5_dump_to_file(FILE *fd, const char *title,
371 			    const void *buf, unsigned int len)
372 {
373 	if (title)
374 		rte_hexdump(fd, title, buf, len);
375 	else
376 		fprintf(fd, "%s", (const char *)buf);
377 	fprintf(fd, "\n\n\n");
378 }
379 
380 /**
381  * Modify a Verbs/DevX queue state.
382  * This must be called from the primary process.
383  *
384  * @param dev
385  *   Pointer to Ethernet device.
386  * @param sm
387  *   State modify request parameters.
388  *
389  * @return
390  *   0 in case of success else non-zero value and rte_errno is set.
391  */
392 int
mlx5_queue_state_modify_primary(struct rte_eth_dev * dev,const struct mlx5_mp_arg_queue_state_modify * sm)393 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
394 			const struct mlx5_mp_arg_queue_state_modify *sm)
395 {
396 	int ret;
397 	struct mlx5_priv *priv = dev->data->dev_private;
398 
399 	if (sm->is_wq) {
400 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, sm->queue_id);
401 
402 		ret = priv->obj_ops.rxq_obj_modify(rxq, sm->state);
403 		if (ret) {
404 			DRV_LOG(ERR, "Cannot change Rx WQ state to %u  - %s",
405 					sm->state, strerror(errno));
406 			rte_errno = errno;
407 			return ret;
408 		}
409 	} else {
410 		struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
411 		struct mlx5_txq_ctrl *txq_ctrl =
412 			container_of(txq, struct mlx5_txq_ctrl, txq);
413 
414 		ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
415 						   MLX5_TXQ_MOD_ERR2RDY,
416 						   (uint8_t)priv->dev_port);
417 		if (ret)
418 			return ret;
419 	}
420 	return 0;
421 }
422 
423 /**
424  * Modify a Verbs queue state.
425  *
426  * @param dev
427  *   Pointer to Ethernet device.
428  * @param sm
429  *   State modify request parameters.
430  *
431  * @return
432  *   0 in case of success else non-zero value.
433  */
434 int
mlx5_queue_state_modify(struct rte_eth_dev * dev,struct mlx5_mp_arg_queue_state_modify * sm)435 mlx5_queue_state_modify(struct rte_eth_dev *dev,
436 			struct mlx5_mp_arg_queue_state_modify *sm)
437 {
438 	struct mlx5_priv *priv = dev->data->dev_private;
439 	int ret = 0;
440 
441 	switch (rte_eal_process_type()) {
442 	case RTE_PROC_PRIMARY:
443 		ret = mlx5_queue_state_modify_primary(dev, sm);
444 		break;
445 	case RTE_PROC_SECONDARY:
446 		ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
447 		break;
448 	default:
449 		break;
450 	}
451 	return ret;
452 }
453