xref: /dpdk/drivers/net/mlx5/mlx5.c (revision 7cc8ef9cf4e9d1f3b1c16daea706f9f433968c61)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 
13 #include <rte_malloc.h>
14 #include <ethdev_driver.h>
15 #include <rte_pci.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_kvargs.h>
19 #include <rte_rwlock.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_eal_paging.h>
23 #include <rte_alarm.h>
24 #include <rte_cycles.h>
25 
26 #include <mlx5_glue.h>
27 #include <mlx5_devx_cmds.h>
28 #include <mlx5_common.h>
29 #include <mlx5_common_os.h>
30 #include <mlx5_common_mp.h>
31 #include <mlx5_malloc.h>
32 
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_rxtx.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "mlx5_autoconf.h"
40 #include "mlx5_flow.h"
41 #include "mlx5_flow_os.h"
42 #include "rte_pmd_mlx5.h"
43 
44 #define MLX5_ETH_DRIVER_NAME mlx5_eth
45 
46 /* Device parameter to enable RX completion queue compression. */
47 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
48 
49 /* Device parameter to enable padding Rx packet to cacheline size. */
50 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
51 
52 /* Device parameter to enable Multi-Packet Rx queue. */
53 #define MLX5_RX_MPRQ_EN "mprq_en"
54 
55 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
56 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
57 
58 /* Device parameter to configure log 2 of the stride size for MPRQ. */
59 #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size"
60 
61 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
62 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
63 
64 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
65 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
66 
67 /* Device parameter to configure inline send. Deprecated, ignored.*/
68 #define MLX5_TXQ_INLINE "txq_inline"
69 
70 /* Device parameter to limit packet size to inline with ordinary SEND. */
71 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
72 
73 /* Device parameter to configure minimal data size to inline. */
74 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
75 
76 /* Device parameter to limit packet size to inline with Enhanced MPW. */
77 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
78 
79 /*
80  * Device parameter to configure the number of TX queues threshold for
81  * enabling inline send.
82  */
83 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
84 
85 /*
86  * Device parameter to configure the number of TX queues threshold for
87  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
88  */
89 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
90 
91 /* Device parameter to enable multi-packet send WQEs. */
92 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
93 
94 /*
95  * Device parameter to include 2 dsegs in the title WQEBB.
96  * Deprecated, ignored.
97  */
98 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
99 
100 /*
101  * Device parameter to limit the size of inlining packet.
102  * Deprecated, ignored.
103  */
104 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
105 
106 /*
107  * Device parameter to enable Tx scheduling on timestamps
108  * and specify the packet pacing granularity in nanoseconds.
109  */
110 #define MLX5_TX_PP "tx_pp"
111 
112 /*
113  * Device parameter to specify skew in nanoseconds on Tx datapath,
114  * it represents the time between SQ start WQE processing and
115  * appearing actual packet data on the wire.
116  */
117 #define MLX5_TX_SKEW "tx_skew"
118 
119 /*
120  * Device parameter to enable hardware Tx vector.
121  * Deprecated, ignored (no vectorized Tx routines anymore).
122  */
123 #define MLX5_TX_VEC_EN "tx_vec_en"
124 
125 /* Device parameter to enable hardware Rx vector. */
126 #define MLX5_RX_VEC_EN "rx_vec_en"
127 
128 /* Allow L3 VXLAN flow creation. */
129 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
130 
131 /* Activate DV E-Switch flow steering. */
132 #define MLX5_DV_ESW_EN "dv_esw_en"
133 
134 /* Activate DV flow steering. */
135 #define MLX5_DV_FLOW_EN "dv_flow_en"
136 
137 /* Enable extensive flow metadata support. */
138 #define MLX5_DV_XMETA_EN "dv_xmeta_en"
139 
140 /* Device parameter to let the user manage the lacp traffic of bonded device */
141 #define MLX5_LACP_BY_USER "lacp_by_user"
142 
143 /* Activate Netlink support in VF mode. */
144 #define MLX5_VF_NL_EN "vf_nl_en"
145 
146 /* Select port representors to instantiate. */
147 #define MLX5_REPRESENTOR "representor"
148 
149 /* Device parameter to configure the maximum number of dump files per queue. */
150 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
151 
152 /* Configure timeout of LRO session (in microseconds). */
153 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
154 
155 /*
156  * Device parameter to configure the total data buffer size for a single
157  * hairpin queue (logarithm value).
158  */
159 #define MLX5_HP_BUF_SIZE "hp_buf_log_sz"
160 
161 /* Flow memory reclaim mode. */
162 #define MLX5_RECLAIM_MEM "reclaim_mem_mode"
163 
164 /* Decap will be used or not. */
165 #define MLX5_DECAP_EN "decap_en"
166 
167 /* Device parameter to configure allow or prevent duplicate rules pattern. */
168 #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
169 
170 /* Device parameter to configure the delay drop when creating Rxqs. */
171 #define MLX5_DELAY_DROP "delay_drop"
172 
173 /* Shared memory between primary and secondary processes. */
174 struct mlx5_shared_data *mlx5_shared_data;
175 
176 /** Driver-specific log messages type. */
177 int mlx5_logtype;
178 
179 static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
180 						LIST_HEAD_INITIALIZER();
181 static pthread_mutex_t mlx5_dev_ctx_list_mutex;
182 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
183 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
184 	[MLX5_IPOOL_DECAP_ENCAP] = {
185 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
186 		.trunk_size = 64,
187 		.grow_trunk = 3,
188 		.grow_shift = 2,
189 		.need_lock = 1,
190 		.release_mem_en = 1,
191 		.malloc = mlx5_malloc,
192 		.free = mlx5_free,
193 		.type = "mlx5_encap_decap_ipool",
194 	},
195 	[MLX5_IPOOL_PUSH_VLAN] = {
196 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
197 		.trunk_size = 64,
198 		.grow_trunk = 3,
199 		.grow_shift = 2,
200 		.need_lock = 1,
201 		.release_mem_en = 1,
202 		.malloc = mlx5_malloc,
203 		.free = mlx5_free,
204 		.type = "mlx5_push_vlan_ipool",
205 	},
206 	[MLX5_IPOOL_TAG] = {
207 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
208 		.trunk_size = 64,
209 		.grow_trunk = 3,
210 		.grow_shift = 2,
211 		.need_lock = 1,
212 		.release_mem_en = 0,
213 		.per_core_cache = (1 << 16),
214 		.malloc = mlx5_malloc,
215 		.free = mlx5_free,
216 		.type = "mlx5_tag_ipool",
217 	},
218 	[MLX5_IPOOL_PORT_ID] = {
219 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
220 		.trunk_size = 64,
221 		.grow_trunk = 3,
222 		.grow_shift = 2,
223 		.need_lock = 1,
224 		.release_mem_en = 1,
225 		.malloc = mlx5_malloc,
226 		.free = mlx5_free,
227 		.type = "mlx5_port_id_ipool",
228 	},
229 	[MLX5_IPOOL_JUMP] = {
230 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
231 		.trunk_size = 64,
232 		.grow_trunk = 3,
233 		.grow_shift = 2,
234 		.need_lock = 1,
235 		.release_mem_en = 1,
236 		.malloc = mlx5_malloc,
237 		.free = mlx5_free,
238 		.type = "mlx5_jump_ipool",
239 	},
240 	[MLX5_IPOOL_SAMPLE] = {
241 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
242 		.trunk_size = 64,
243 		.grow_trunk = 3,
244 		.grow_shift = 2,
245 		.need_lock = 1,
246 		.release_mem_en = 1,
247 		.malloc = mlx5_malloc,
248 		.free = mlx5_free,
249 		.type = "mlx5_sample_ipool",
250 	},
251 	[MLX5_IPOOL_DEST_ARRAY] = {
252 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
253 		.trunk_size = 64,
254 		.grow_trunk = 3,
255 		.grow_shift = 2,
256 		.need_lock = 1,
257 		.release_mem_en = 1,
258 		.malloc = mlx5_malloc,
259 		.free = mlx5_free,
260 		.type = "mlx5_dest_array_ipool",
261 	},
262 	[MLX5_IPOOL_TUNNEL_ID] = {
263 		.size = sizeof(struct mlx5_flow_tunnel),
264 		.trunk_size = MLX5_MAX_TUNNELS,
265 		.need_lock = 1,
266 		.release_mem_en = 1,
267 		.type = "mlx5_tunnel_offload",
268 	},
269 	[MLX5_IPOOL_TNL_TBL_ID] = {
270 		.size = 0,
271 		.need_lock = 1,
272 		.type = "mlx5_flow_tnl_tbl_ipool",
273 	},
274 #endif
275 	[MLX5_IPOOL_MTR] = {
276 		/**
277 		 * The ipool index should grow continually from small to big,
278 		 * for meter idx, so not set grow_trunk to avoid meter index
279 		 * not jump continually.
280 		 */
281 		.size = sizeof(struct mlx5_legacy_flow_meter),
282 		.trunk_size = 64,
283 		.need_lock = 1,
284 		.release_mem_en = 1,
285 		.malloc = mlx5_malloc,
286 		.free = mlx5_free,
287 		.type = "mlx5_meter_ipool",
288 	},
289 	[MLX5_IPOOL_MCP] = {
290 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
291 		.trunk_size = 64,
292 		.grow_trunk = 3,
293 		.grow_shift = 2,
294 		.need_lock = 1,
295 		.release_mem_en = 1,
296 		.malloc = mlx5_malloc,
297 		.free = mlx5_free,
298 		.type = "mlx5_mcp_ipool",
299 	},
300 	[MLX5_IPOOL_HRXQ] = {
301 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
302 		.trunk_size = 64,
303 		.grow_trunk = 3,
304 		.grow_shift = 2,
305 		.need_lock = 1,
306 		.release_mem_en = 1,
307 		.malloc = mlx5_malloc,
308 		.free = mlx5_free,
309 		.type = "mlx5_hrxq_ipool",
310 	},
311 	[MLX5_IPOOL_MLX5_FLOW] = {
312 		/*
313 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
314 		 * It set in run time according to PCI function configuration.
315 		 */
316 		.size = 0,
317 		.trunk_size = 64,
318 		.grow_trunk = 3,
319 		.grow_shift = 2,
320 		.need_lock = 1,
321 		.release_mem_en = 0,
322 		.per_core_cache = 1 << 19,
323 		.malloc = mlx5_malloc,
324 		.free = mlx5_free,
325 		.type = "mlx5_flow_handle_ipool",
326 	},
327 	[MLX5_IPOOL_RTE_FLOW] = {
328 		.size = sizeof(struct rte_flow),
329 		.trunk_size = 4096,
330 		.need_lock = 1,
331 		.release_mem_en = 1,
332 		.malloc = mlx5_malloc,
333 		.free = mlx5_free,
334 		.type = "rte_flow_ipool",
335 	},
336 	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
337 		.size = 0,
338 		.need_lock = 1,
339 		.type = "mlx5_flow_rss_id_ipool",
340 	},
341 	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
342 		.size = sizeof(struct mlx5_shared_action_rss),
343 		.trunk_size = 64,
344 		.grow_trunk = 3,
345 		.grow_shift = 2,
346 		.need_lock = 1,
347 		.release_mem_en = 1,
348 		.malloc = mlx5_malloc,
349 		.free = mlx5_free,
350 		.type = "mlx5_shared_action_rss",
351 	},
352 	[MLX5_IPOOL_MTR_POLICY] = {
353 		/**
354 		 * The ipool index should grow continually from small to big,
355 		 * for policy idx, so not set grow_trunk to avoid policy index
356 		 * not jump continually.
357 		 */
358 		.size = sizeof(struct mlx5_flow_meter_sub_policy),
359 		.trunk_size = 64,
360 		.need_lock = 1,
361 		.release_mem_en = 1,
362 		.malloc = mlx5_malloc,
363 		.free = mlx5_free,
364 		.type = "mlx5_meter_policy_ipool",
365 	},
366 };
367 
368 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
369 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
370 
371 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024
372 
373 /**
374  * Decide whether representor ID is a HPF(host PF) port on BF2.
375  *
376  * @param dev
377  *   Pointer to Ethernet device structure.
378  *
379  * @return
380  *   Non-zero if HPF, otherwise 0.
381  */
382 bool
383 mlx5_is_hpf(struct rte_eth_dev *dev)
384 {
385 	struct mlx5_priv *priv = dev->data->dev_private;
386 	uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id);
387 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
388 
389 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF &&
390 	       MLX5_REPRESENTOR_REPR(-1) == repr;
391 }
392 
393 /**
394  * Decide whether representor ID is a SF port representor.
395  *
396  * @param dev
397  *   Pointer to Ethernet device structure.
398  *
399  * @return
400  *   Non-zero if HPF, otherwise 0.
401  */
402 bool
403 mlx5_is_sf_repr(struct rte_eth_dev *dev)
404 {
405 	struct mlx5_priv *priv = dev->data->dev_private;
406 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
407 
408 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
409 }
410 
411 /**
412  * Initialize the ASO aging management structure.
413  *
414  * @param[in] sh
415  *   Pointer to mlx5_dev_ctx_shared object to free
416  *
417  * @return
418  *   0 on success, a negative errno value otherwise and rte_errno is set.
419  */
420 int
421 mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
422 {
423 	int err;
424 
425 	if (sh->aso_age_mng)
426 		return 0;
427 	sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
428 				      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
429 	if (!sh->aso_age_mng) {
430 		DRV_LOG(ERR, "aso_age_mng allocation was failed.");
431 		rte_errno = ENOMEM;
432 		return -ENOMEM;
433 	}
434 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT);
435 	if (err) {
436 		mlx5_free(sh->aso_age_mng);
437 		return -1;
438 	}
439 	rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
440 	rte_spinlock_init(&sh->aso_age_mng->free_sl);
441 	LIST_INIT(&sh->aso_age_mng->free);
442 	return 0;
443 }
444 
445 /**
446  * Close and release all the resources of the ASO aging management structure.
447  *
448  * @param[in] sh
449  *   Pointer to mlx5_dev_ctx_shared object to free.
450  */
451 static void
452 mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
453 {
454 	int i, j;
455 
456 	mlx5_aso_flow_hit_queue_poll_stop(sh);
457 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
458 	if (sh->aso_age_mng->pools) {
459 		struct mlx5_aso_age_pool *pool;
460 
461 		for (i = 0; i < sh->aso_age_mng->next; ++i) {
462 			pool = sh->aso_age_mng->pools[i];
463 			claim_zero(mlx5_devx_cmd_destroy
464 						(pool->flow_hit_aso_obj));
465 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
466 				if (pool->actions[j].dr_action)
467 					claim_zero
468 					    (mlx5_flow_os_destroy_flow_action
469 					      (pool->actions[j].dr_action));
470 			mlx5_free(pool);
471 		}
472 		mlx5_free(sh->aso_age_mng->pools);
473 	}
474 	mlx5_free(sh->aso_age_mng);
475 }
476 
477 /**
478  * Initialize the shared aging list information per port.
479  *
480  * @param[in] sh
481  *   Pointer to mlx5_dev_ctx_shared object.
482  */
483 static void
484 mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
485 {
486 	uint32_t i;
487 	struct mlx5_age_info *age_info;
488 
489 	for (i = 0; i < sh->max_port; i++) {
490 		age_info = &sh->port[i].age_info;
491 		age_info->flags = 0;
492 		TAILQ_INIT(&age_info->aged_counters);
493 		LIST_INIT(&age_info->aged_aso);
494 		rte_spinlock_init(&age_info->aged_sl);
495 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
496 	}
497 }
498 
499 /**
500  * DV flow counter mode detect and config.
501  *
502  * @param dev
503  *   Pointer to rte_eth_dev structure.
504  *
505  */
506 void
507 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
508 {
509 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
510 	struct mlx5_priv *priv = dev->data->dev_private;
511 	struct mlx5_dev_ctx_shared *sh = priv->sh;
512 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
513 	bool fallback;
514 
515 #ifndef HAVE_IBV_DEVX_ASYNC
516 	fallback = true;
517 #else
518 	fallback = false;
519 	if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
520 	    !hca_attr->flow_counters_dump ||
521 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
522 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
523 		fallback = true;
524 #endif
525 	if (fallback)
526 		DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
527 			"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
528 			hca_attr->flow_counters_dump,
529 			hca_attr->flow_counter_bulk_alloc_bitmap);
530 	/* Initialize fallback mode only on the port initializes sh. */
531 	if (sh->refcnt == 1)
532 		sh->cmng.counter_fallback = fallback;
533 	else if (fallback != sh->cmng.counter_fallback)
534 		DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
535 			"with others:%d.", PORT_ID(priv), fallback);
536 #endif
537 }
538 
539 /**
540  * Initialize the counters management structure.
541  *
542  * @param[in] sh
543  *   Pointer to mlx5_dev_ctx_shared object to free
544  */
545 static void
546 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
547 {
548 	int i;
549 
550 	memset(&sh->cmng, 0, sizeof(sh->cmng));
551 	TAILQ_INIT(&sh->cmng.flow_counters);
552 	sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
553 	sh->cmng.max_id = -1;
554 	sh->cmng.last_pool_idx = POOL_IDX_INVALID;
555 	rte_spinlock_init(&sh->cmng.pool_update_sl);
556 	for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
557 		TAILQ_INIT(&sh->cmng.counters[i]);
558 		rte_spinlock_init(&sh->cmng.csl[i]);
559 	}
560 }
561 
562 /**
563  * Destroy all the resources allocated for a counter memory management.
564  *
565  * @param[in] mng
566  *   Pointer to the memory management structure.
567  */
568 static void
569 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
570 {
571 	uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
572 
573 	LIST_REMOVE(mng, next);
574 	mlx5_os_wrapped_mkey_destroy(&mng->wm);
575 	mlx5_free(mem);
576 }
577 
578 /**
579  * Close and release all the resources of the counters management.
580  *
581  * @param[in] sh
582  *   Pointer to mlx5_dev_ctx_shared object to free.
583  */
584 static void
585 mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
586 {
587 	struct mlx5_counter_stats_mem_mng *mng;
588 	int i, j;
589 	int retries = 1024;
590 
591 	rte_errno = 0;
592 	while (--retries) {
593 		rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
594 		if (rte_errno != EINPROGRESS)
595 			break;
596 		rte_pause();
597 	}
598 
599 	if (sh->cmng.pools) {
600 		struct mlx5_flow_counter_pool *pool;
601 		uint16_t n_valid = sh->cmng.n_valid;
602 		bool fallback = sh->cmng.counter_fallback;
603 
604 		for (i = 0; i < n_valid; ++i) {
605 			pool = sh->cmng.pools[i];
606 			if (!fallback && pool->min_dcs)
607 				claim_zero(mlx5_devx_cmd_destroy
608 							       (pool->min_dcs));
609 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
610 				struct mlx5_flow_counter *cnt =
611 						MLX5_POOL_GET_CNT(pool, j);
612 
613 				if (cnt->action)
614 					claim_zero
615 					 (mlx5_flow_os_destroy_flow_action
616 					  (cnt->action));
617 				if (fallback && MLX5_POOL_GET_CNT
618 				    (pool, j)->dcs_when_free)
619 					claim_zero(mlx5_devx_cmd_destroy
620 						   (cnt->dcs_when_free));
621 			}
622 			mlx5_free(pool);
623 		}
624 		mlx5_free(sh->cmng.pools);
625 	}
626 	mng = LIST_FIRST(&sh->cmng.mem_mngs);
627 	while (mng) {
628 		mlx5_flow_destroy_counter_stat_mem_mng(mng);
629 		mng = LIST_FIRST(&sh->cmng.mem_mngs);
630 	}
631 	memset(&sh->cmng, 0, sizeof(sh->cmng));
632 }
633 
634 /**
635  * Initialize the aso flow meters management structure.
636  *
637  * @param[in] sh
638  *   Pointer to mlx5_dev_ctx_shared object to free
639  */
640 int
641 mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
642 {
643 	if (!sh->mtrmng) {
644 		sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
645 			sizeof(*sh->mtrmng),
646 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
647 		if (!sh->mtrmng) {
648 			DRV_LOG(ERR,
649 			"meter management allocation was failed.");
650 			rte_errno = ENOMEM;
651 			return -ENOMEM;
652 		}
653 		if (sh->meter_aso_en) {
654 			rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
655 			rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
656 			LIST_INIT(&sh->mtrmng->pools_mng.meters);
657 		}
658 		sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
659 	}
660 	return 0;
661 }
662 
663 /**
664  * Close and release all the resources of
665  * the ASO flow meter management structure.
666  *
667  * @param[in] sh
668  *   Pointer to mlx5_dev_ctx_shared object to free.
669  */
670 static void
671 mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
672 {
673 	struct mlx5_aso_mtr_pool *mtr_pool;
674 	struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
675 	uint32_t idx;
676 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
677 	struct mlx5_aso_mtr *aso_mtr;
678 	int i;
679 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
680 
681 	if (sh->meter_aso_en) {
682 		mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
683 		idx = mtrmng->pools_mng.n_valid;
684 		while (idx--) {
685 			mtr_pool = mtrmng->pools_mng.pools[idx];
686 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
687 			for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
688 				aso_mtr = &mtr_pool->mtrs[i];
689 				if (aso_mtr->fm.meter_action)
690 					claim_zero
691 					(mlx5_glue->destroy_flow_action
692 					(aso_mtr->fm.meter_action));
693 			}
694 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
695 			claim_zero(mlx5_devx_cmd_destroy
696 						(mtr_pool->devx_obj));
697 			mtrmng->pools_mng.n_valid--;
698 			mlx5_free(mtr_pool);
699 		}
700 		mlx5_free(sh->mtrmng->pools_mng.pools);
701 	}
702 	mlx5_free(sh->mtrmng);
703 	sh->mtrmng = NULL;
704 }
705 
706 /* Send FLOW_AGED event if needed. */
707 void
708 mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
709 {
710 	struct mlx5_age_info *age_info;
711 	uint32_t i;
712 
713 	for (i = 0; i < sh->max_port; i++) {
714 		age_info = &sh->port[i].age_info;
715 		if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
716 			continue;
717 		MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW);
718 		if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) {
719 			MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER);
720 			rte_eth_dev_callback_process
721 				(&rte_eth_devices[sh->port[i].devx_ih_port_id],
722 				RTE_ETH_EVENT_FLOW_AGED, NULL);
723 		}
724 	}
725 }
726 
727 /*
728  * Initialize the ASO connection tracking structure.
729  *
730  * @param[in] sh
731  *   Pointer to mlx5_dev_ctx_shared object.
732  *
733  * @return
734  *   0 on success, a negative errno value otherwise and rte_errno is set.
735  */
736 int
737 mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)
738 {
739 	int err;
740 
741 	if (sh->ct_mng)
742 		return 0;
743 	sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng),
744 				 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
745 	if (!sh->ct_mng) {
746 		DRV_LOG(ERR, "ASO CT management allocation failed.");
747 		rte_errno = ENOMEM;
748 		return -rte_errno;
749 	}
750 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING);
751 	if (err) {
752 		mlx5_free(sh->ct_mng);
753 		/* rte_errno should be extracted from the failure. */
754 		rte_errno = EINVAL;
755 		return -rte_errno;
756 	}
757 	rte_spinlock_init(&sh->ct_mng->ct_sl);
758 	rte_rwlock_init(&sh->ct_mng->resize_rwl);
759 	LIST_INIT(&sh->ct_mng->free_cts);
760 	return 0;
761 }
762 
763 /*
764  * Close and release all the resources of the
765  * ASO connection tracking management structure.
766  *
767  * @param[in] sh
768  *   Pointer to mlx5_dev_ctx_shared object to free.
769  */
770 static void
771 mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
772 {
773 	struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
774 	struct mlx5_aso_ct_pool *ct_pool;
775 	struct mlx5_aso_ct_action *ct;
776 	uint32_t idx;
777 	uint32_t val;
778 	uint32_t cnt;
779 	int i;
780 
781 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING);
782 	idx = mng->next;
783 	while (idx--) {
784 		cnt = 0;
785 		ct_pool = mng->pools[idx];
786 		for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
787 			ct = &ct_pool->actions[i];
788 			val = __atomic_fetch_sub(&ct->refcnt, 1,
789 						 __ATOMIC_RELAXED);
790 			MLX5_ASSERT(val == 1);
791 			if (val > 1)
792 				cnt++;
793 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
794 			if (ct->dr_action_orig)
795 				claim_zero(mlx5_glue->destroy_flow_action
796 							(ct->dr_action_orig));
797 			if (ct->dr_action_rply)
798 				claim_zero(mlx5_glue->destroy_flow_action
799 							(ct->dr_action_rply));
800 #endif
801 		}
802 		claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj));
803 		if (cnt) {
804 			DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u",
805 				cnt, i);
806 		}
807 		mlx5_free(ct_pool);
808 		/* in case of failure. */
809 		mng->next--;
810 	}
811 	mlx5_free(mng->pools);
812 	mlx5_free(mng);
813 	/* Management structure must be cleared to 0s during allocation. */
814 	sh->ct_mng = NULL;
815 }
816 
817 /**
818  * Initialize the flow resources' indexed mempool.
819  *
820  * @param[in] sh
821  *   Pointer to mlx5_dev_ctx_shared object.
822  */
823 static void
824 mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
825 {
826 	uint8_t i;
827 	struct mlx5_indexed_pool_config cfg;
828 
829 	for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
830 		cfg = mlx5_ipool_cfg[i];
831 		switch (i) {
832 		default:
833 			break;
834 		/*
835 		 * Set MLX5_IPOOL_MLX5_FLOW ipool size
836 		 * according to PCI function flow configuration.
837 		 */
838 		case MLX5_IPOOL_MLX5_FLOW:
839 			cfg.size = sh->config.dv_flow_en ?
840 				sizeof(struct mlx5_flow_handle) :
841 				MLX5_FLOW_HANDLE_VERBS_SIZE;
842 			break;
843 		}
844 		if (sh->config.reclaim_mode) {
845 			cfg.release_mem_en = 1;
846 			cfg.per_core_cache = 0;
847 		} else {
848 			cfg.release_mem_en = 0;
849 		}
850 		sh->ipool[i] = mlx5_ipool_create(&cfg);
851 	}
852 }
853 
854 
855 /**
856  * Release the flow resources' indexed mempool.
857  *
858  * @param[in] sh
859  *   Pointer to mlx5_dev_ctx_shared object.
860  */
861 static void
862 mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
863 {
864 	uint8_t i;
865 
866 	for (i = 0; i < MLX5_IPOOL_MAX; ++i)
867 		mlx5_ipool_destroy(sh->ipool[i]);
868 	for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)
869 		if (sh->mdh_ipools[i])
870 			mlx5_ipool_destroy(sh->mdh_ipools[i]);
871 }
872 
873 /*
874  * Check if dynamic flex parser for eCPRI already exists.
875  *
876  * @param dev
877  *   Pointer to Ethernet device structure.
878  *
879  * @return
880  *   true on exists, false on not.
881  */
882 bool
883 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
884 {
885 	struct mlx5_priv *priv = dev->data->dev_private;
886 	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
887 
888 	return !!prf->obj;
889 }
890 
891 /*
892  * Allocation of a flex parser for eCPRI. Once created, this parser related
893  * resources will be held until the device is closed.
894  *
895  * @param dev
896  *   Pointer to Ethernet device structure.
897  *
898  * @return
899  *   0 on success, a negative errno value otherwise and rte_errno is set.
900  */
901 int
902 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
903 {
904 	struct mlx5_priv *priv = dev->data->dev_private;
905 	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
906 	struct mlx5_devx_graph_node_attr node = {
907 		.modify_field_select = 0,
908 	};
909 	uint32_t ids[8];
910 	int ret;
911 
912 	if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
913 		DRV_LOG(ERR, "Dynamic flex parser is not supported "
914 			"for device %s.", priv->dev_data->name);
915 		return -ENOTSUP;
916 	}
917 	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
918 	/* 8 bytes now: 4B common header + 4B message body header. */
919 	node.header_length_base_value = 0x8;
920 	/* After MAC layer: Ether / VLAN. */
921 	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
922 	/* Type of compared condition should be 0xAEFE in the L2 layer. */
923 	node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
924 	/* Sample #0: type in common header. */
925 	node.sample[0].flow_match_sample_en = 1;
926 	/* Fixed offset. */
927 	node.sample[0].flow_match_sample_offset_mode = 0x0;
928 	/* Only the 2nd byte will be used. */
929 	node.sample[0].flow_match_sample_field_base_offset = 0x0;
930 	/* Sample #1: message payload. */
931 	node.sample[1].flow_match_sample_en = 1;
932 	/* Fixed offset. */
933 	node.sample[1].flow_match_sample_offset_mode = 0x0;
934 	/*
935 	 * Only the first two bytes will be used right now, and its offset will
936 	 * start after the common header that with the length of a DW(u32).
937 	 */
938 	node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
939 	prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
940 	if (!prf->obj) {
941 		DRV_LOG(ERR, "Failed to create flex parser node object.");
942 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
943 	}
944 	prf->num = 2;
945 	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
946 	if (ret) {
947 		DRV_LOG(ERR, "Failed to query sample IDs.");
948 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
949 	}
950 	prf->offset[0] = 0x0;
951 	prf->offset[1] = sizeof(uint32_t);
952 	prf->ids[0] = ids[0];
953 	prf->ids[1] = ids[1];
954 	return 0;
955 }
956 
957 /*
958  * Destroy the flex parser node, including the parser itself, input / output
959  * arcs and DW samples. Resources could be reused then.
960  *
961  * @param dev
962  *   Pointer to Ethernet device structure.
963  */
964 static void
965 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
966 {
967 	struct mlx5_priv *priv = dev->data->dev_private;
968 	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
969 
970 	if (prf->obj)
971 		mlx5_devx_cmd_destroy(prf->obj);
972 	prf->obj = NULL;
973 }
974 
975 uint32_t
976 mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
977 {
978 	uint32_t sw_parsing_offloads = 0;
979 
980 	if (attr->swp) {
981 		sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
982 		if (attr->swp_csum)
983 			sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
984 
985 		if (attr->swp_lso)
986 			sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
987 	}
988 	return sw_parsing_offloads;
989 }
990 
991 uint32_t
992 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
993 {
994 	uint32_t tn_offloads = 0;
995 
996 	if (attr->tunnel_stateless_vxlan)
997 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
998 	if (attr->tunnel_stateless_gre)
999 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
1000 	if (attr->tunnel_stateless_geneve_rx)
1001 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
1002 	return tn_offloads;
1003 }
1004 
1005 /* Fill all fields of UAR structure. */
1006 static int
1007 mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
1008 {
1009 	int ret;
1010 
1011 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
1012 	if (ret) {
1013 		DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
1014 		return -rte_errno;
1015 	}
1016 	MLX5_ASSERT(sh->tx_uar.obj);
1017 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
1018 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
1019 	if (ret) {
1020 		DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
1021 		mlx5_devx_uar_release(&sh->tx_uar);
1022 		return -rte_errno;
1023 	}
1024 	MLX5_ASSERT(sh->rx_uar.obj);
1025 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
1026 	return 0;
1027 }
1028 
1029 static void
1030 mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
1031 {
1032 	mlx5_devx_uar_release(&sh->rx_uar);
1033 	mlx5_devx_uar_release(&sh->tx_uar);
1034 }
1035 
1036 /**
1037  * rte_mempool_walk() callback to unregister Rx mempools.
1038  * It used when implicit mempool registration is disabled.
1039  *
1040  * @param mp
1041  *   The mempool being walked.
1042  * @param arg
1043  *   Pointer to the device shared context.
1044  */
1045 static void
1046 mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
1047 {
1048 	struct mlx5_dev_ctx_shared *sh = arg;
1049 
1050 	mlx5_dev_mempool_unregister(sh->cdev, mp);
1051 }
1052 
1053 /**
1054  * Callback used when implicit mempool registration is disabled
1055  * in order to track Rx mempool destruction.
1056  *
1057  * @param event
1058  *   Mempool life cycle event.
1059  * @param mp
1060  *   An Rx mempool registered explicitly when the port is started.
1061  * @param arg
1062  *   Pointer to a device shared context.
1063  */
1064 static void
1065 mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
1066 					struct rte_mempool *mp, void *arg)
1067 {
1068 	struct mlx5_dev_ctx_shared *sh = arg;
1069 
1070 	if (event == RTE_MEMPOOL_EVENT_DESTROY)
1071 		mlx5_dev_mempool_unregister(sh->cdev, mp);
1072 }
1073 
1074 int
1075 mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
1076 {
1077 	struct mlx5_priv *priv = dev->data->dev_private;
1078 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1079 	int ret;
1080 
1081 	/* Check if we only need to track Rx mempool destruction. */
1082 	if (!sh->cdev->config.mr_mempool_reg_en) {
1083 		ret = rte_mempool_event_callback_register
1084 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
1085 		return ret == 0 || rte_errno == EEXIST ? 0 : ret;
1086 	}
1087 	return mlx5_dev_mempool_subscribe(sh->cdev);
1088 }
1089 
1090 /**
1091  * Set up multiple TISs with different affinities according to
1092  * number of bonding ports
1093  *
1094  * @param priv
1095  * Pointer of shared context.
1096  *
1097  * @return
1098  * Zero on success, -1 otherwise.
1099  */
1100 static int
1101 mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
1102 {
1103 	int i;
1104 	struct mlx5_devx_lag_context lag_ctx = { 0 };
1105 	struct mlx5_devx_tis_attr tis_attr = { 0 };
1106 
1107 	tis_attr.transport_domain = sh->td->id;
1108 	if (sh->bond.n_port) {
1109 		if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
1110 			sh->lag.tx_remap_affinity[0] =
1111 				lag_ctx.tx_remap_affinity_1;
1112 			sh->lag.tx_remap_affinity[1] =
1113 				lag_ctx.tx_remap_affinity_2;
1114 			sh->lag.affinity_mode = lag_ctx.port_select_mode;
1115 		} else {
1116 			DRV_LOG(ERR, "Failed to query lag affinity.");
1117 			return -1;
1118 		}
1119 		if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
1120 			for (i = 0; i < sh->bond.n_port; i++) {
1121 				tis_attr.lag_tx_port_affinity =
1122 					MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
1123 							sh->bond.n_port);
1124 				sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,
1125 						&tis_attr);
1126 				if (!sh->tis[i]) {
1127 					DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
1128 						" %s.", i, sh->bond.n_port,
1129 						sh->ibdev_name);
1130 					return -1;
1131 				}
1132 			}
1133 			DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
1134 				sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
1135 				lag_ctx.tx_remap_affinity_2);
1136 			return 0;
1137 		}
1138 		if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
1139 			DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
1140 					sh->ibdev_name);
1141 	}
1142 	tis_attr.lag_tx_port_affinity = 0;
1143 	sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
1144 	if (!sh->tis[0]) {
1145 		DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
1146 			" %s.", sh->ibdev_name);
1147 		return -1;
1148 	}
1149 	return 0;
1150 }
1151 
1152 /**
1153  * Verify and store value for share device argument.
1154  *
1155  * @param[in] key
1156  *   Key argument to verify.
1157  * @param[in] val
1158  *   Value associated with key.
1159  * @param opaque
1160  *   User data.
1161  *
1162  * @return
1163  *   0 on success, a negative errno value otherwise and rte_errno is set.
1164  */
1165 static int
1166 mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
1167 {
1168 	struct mlx5_sh_config *config = opaque;
1169 	signed long tmp;
1170 
1171 	errno = 0;
1172 	tmp = strtol(val, NULL, 0);
1173 	if (errno) {
1174 		rte_errno = errno;
1175 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1176 		return -rte_errno;
1177 	}
1178 	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
1179 		/* Negative values are acceptable for some keys only. */
1180 		rte_errno = EINVAL;
1181 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
1182 		return -rte_errno;
1183 	}
1184 	if (strcmp(MLX5_TX_PP, key) == 0) {
1185 		unsigned long mod = tmp >= 0 ? tmp : -tmp;
1186 
1187 		if (!mod) {
1188 			DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
1189 			rte_errno = EINVAL;
1190 			return -rte_errno;
1191 		}
1192 		config->tx_pp = tmp;
1193 	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
1194 		config->tx_skew = tmp;
1195 	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1196 		config->l3_vxlan_en = !!tmp;
1197 	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1198 		config->vf_nl_en = !!tmp;
1199 	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1200 		config->dv_esw_en = !!tmp;
1201 	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1202 		if (tmp > 2) {
1203 			DRV_LOG(ERR, "Invalid %s parameter.", key);
1204 			rte_errno = EINVAL;
1205 			return -rte_errno;
1206 		}
1207 		config->dv_flow_en = tmp;
1208 	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
1209 		if (tmp != MLX5_XMETA_MODE_LEGACY &&
1210 		    tmp != MLX5_XMETA_MODE_META16 &&
1211 		    tmp != MLX5_XMETA_MODE_META32 &&
1212 		    tmp != MLX5_XMETA_MODE_MISS_INFO) {
1213 			DRV_LOG(ERR, "Invalid extensive metadata parameter.");
1214 			rte_errno = EINVAL;
1215 			return -rte_errno;
1216 		}
1217 		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
1218 			config->dv_xmeta_en = tmp;
1219 		else
1220 			config->dv_miss_info = 1;
1221 	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
1222 		config->lacp_by_user = !!tmp;
1223 	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
1224 		if (tmp != MLX5_RCM_NONE &&
1225 		    tmp != MLX5_RCM_LIGHT &&
1226 		    tmp != MLX5_RCM_AGGR) {
1227 			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
1228 			rte_errno = EINVAL;
1229 			return -rte_errno;
1230 		}
1231 		config->reclaim_mode = tmp;
1232 	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
1233 		config->decap_en = !!tmp;
1234 	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
1235 		config->allow_duplicate_pattern = !!tmp;
1236 	}
1237 	return 0;
1238 }
1239 
1240 /**
1241  * Parse user device parameters and adjust them according to device
1242  * capabilities.
1243  *
1244  * @param sh
1245  *   Pointer to shared device context.
1246  * @param mkvlist
1247  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1248  * @param config
1249  *   Pointer to shared device configuration structure.
1250  *
1251  * @return
1252  *   0 on success, a negative errno value otherwise and rte_errno is set.
1253  */
1254 static int
1255 mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
1256 				struct mlx5_kvargs_ctrl *mkvlist,
1257 				struct mlx5_sh_config *config)
1258 {
1259 	const char **params = (const char *[]){
1260 		MLX5_TX_PP,
1261 		MLX5_TX_SKEW,
1262 		MLX5_L3_VXLAN_EN,
1263 		MLX5_VF_NL_EN,
1264 		MLX5_DV_ESW_EN,
1265 		MLX5_DV_FLOW_EN,
1266 		MLX5_DV_XMETA_EN,
1267 		MLX5_LACP_BY_USER,
1268 		MLX5_RECLAIM_MEM,
1269 		MLX5_DECAP_EN,
1270 		MLX5_ALLOW_DUPLICATE_PATTERN,
1271 		NULL,
1272 	};
1273 	int ret = 0;
1274 
1275 	/* Default configuration. */
1276 	memset(config, 0, sizeof(*config));
1277 	config->vf_nl_en = 1;
1278 	config->dv_esw_en = 1;
1279 	config->dv_flow_en = 1;
1280 	config->decap_en = 1;
1281 	config->allow_duplicate_pattern = 1;
1282 	if (mkvlist != NULL) {
1283 		/* Process parameters. */
1284 		ret = mlx5_kvargs_process(mkvlist, params,
1285 					  mlx5_dev_args_check_handler, config);
1286 		if (ret) {
1287 			DRV_LOG(ERR, "Failed to process device arguments: %s",
1288 				strerror(rte_errno));
1289 			return -rte_errno;
1290 		}
1291 	}
1292 	/* Adjust parameters according to device capabilities. */
1293 	if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
1294 		DRV_LOG(WARNING, "DV flow is not supported.");
1295 		config->dv_flow_en = 0;
1296 	}
1297 	if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
1298 		DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
1299 		config->dv_esw_en = 0;
1300 	}
1301 	if (config->dv_miss_info && config->dv_esw_en)
1302 		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
1303 	if (!config->dv_esw_en &&
1304 	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1305 		DRV_LOG(WARNING,
1306 			"Metadata mode %u is not supported (no E-Switch).",
1307 			config->dv_xmeta_en);
1308 		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1309 	}
1310 	if (config->tx_pp && !sh->dev_cap.txpp_en) {
1311 		DRV_LOG(ERR, "Packet pacing is not supported.");
1312 		rte_errno = ENODEV;
1313 		return -rte_errno;
1314 	}
1315 	if (!config->tx_pp && config->tx_skew) {
1316 		DRV_LOG(WARNING,
1317 			"\"tx_skew\" doesn't affect without \"tx_pp\".");
1318 	}
1319 	/*
1320 	 * If HW has bug working with tunnel packet decapsulation and scatter
1321 	 * FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
1322 	 * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1323 	 */
1324 	if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en)
1325 		config->hw_fcs_strip = 0;
1326 	else
1327 		config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
1328 	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1329 		(config->hw_fcs_strip ? "" : "not "));
1330 	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
1331 	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
1332 	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
1333 	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
1334 	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
1335 	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
1336 	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
1337 	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
1338 	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
1339 	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
1340 	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
1341 	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
1342 		config->allow_duplicate_pattern);
1343 	return 0;
1344 }
1345 
1346 /**
1347  * Configure realtime timestamp format.
1348  *
1349  * @param sh
1350  *   Pointer to mlx5_dev_ctx_shared object.
1351  * @param hca_attr
1352  *   Pointer to DevX HCA capabilities structure.
1353  */
1354 void
1355 mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
1356 			 struct mlx5_hca_attr *hca_attr)
1357 {
1358 	uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
1359 	uint32_t reg[dw_cnt];
1360 	int ret = ENOTSUP;
1361 
1362 	if (hca_attr->access_register_user)
1363 		ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
1364 						  MLX5_REGISTER_ID_MTUTC, 0,
1365 						  reg, dw_cnt);
1366 	if (!ret) {
1367 		uint32_t ts_mode;
1368 
1369 		/* MTUTC register is read successfully. */
1370 		ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
1371 		if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1372 			sh->dev_cap.rt_timestamp = 1;
1373 	} else {
1374 		/* Kernel does not support register reading. */
1375 		if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
1376 			sh->dev_cap.rt_timestamp = 1;
1377 	}
1378 }
1379 
1380 /**
1381  * Allocate shared device context. If there is multiport device the
1382  * master and representors will share this context, if there is single
1383  * port dedicated device, the context will be used by only given
1384  * port due to unification.
1385  *
1386  * Routine first searches the context for the specified device name,
1387  * if found the shared context assumed and reference counter is incremented.
1388  * If no context found the new one is created and initialized with specified
1389  * device context and parameters.
1390  *
1391  * @param[in] spawn
1392  *   Pointer to the device attributes (name, port, etc).
1393  * @param mkvlist
1394  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1395  *
1396  * @return
1397  *   Pointer to mlx5_dev_ctx_shared object on success,
1398  *   otherwise NULL and rte_errno is set.
1399  */
1400 struct mlx5_dev_ctx_shared *
1401 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
1402 			  struct mlx5_kvargs_ctrl *mkvlist)
1403 {
1404 	struct mlx5_dev_ctx_shared *sh;
1405 	int err = 0;
1406 	uint32_t i;
1407 
1408 	MLX5_ASSERT(spawn);
1409 	/* Secondary process should not create the shared context. */
1410 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1411 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
1412 	/* Search for IB context by device name. */
1413 	LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
1414 		if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
1415 			sh->refcnt++;
1416 			goto exit;
1417 		}
1418 	}
1419 	/* No device found, we have to create new shared context. */
1420 	MLX5_ASSERT(spawn->max_port);
1421 	sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1422 			 sizeof(struct mlx5_dev_ctx_shared) +
1423 			 spawn->max_port * sizeof(struct mlx5_dev_shared_port),
1424 			 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1425 	if (!sh) {
1426 		DRV_LOG(ERR, "Shared context allocation failure.");
1427 		rte_errno = ENOMEM;
1428 		goto exit;
1429 	}
1430 	pthread_mutex_init(&sh->txpp.mutex, NULL);
1431 	sh->numa_node = spawn->cdev->dev->numa_node;
1432 	sh->cdev = spawn->cdev;
1433 	sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
1434 	if (spawn->bond_info)
1435 		sh->bond = *spawn->bond_info;
1436 	err = mlx5_os_capabilities_prepare(sh);
1437 	if (err) {
1438 		DRV_LOG(ERR, "Fail to configure device capabilities.");
1439 		goto error;
1440 	}
1441 	err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config);
1442 	if (err) {
1443 		DRV_LOG(ERR, "Failed to process device configure: %s",
1444 			strerror(rte_errno));
1445 		goto error;
1446 	}
1447 	sh->refcnt = 1;
1448 	sh->max_port = spawn->max_port;
1449 	strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
1450 		sizeof(sh->ibdev_name) - 1);
1451 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
1452 		sizeof(sh->ibdev_path) - 1);
1453 	/*
1454 	 * Setting port_id to max unallowed value means there is no interrupt
1455 	 * subhandler installed for the given port index i.
1456 	 */
1457 	for (i = 0; i < sh->max_port; i++) {
1458 		sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
1459 		sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
1460 	}
1461 	if (sh->cdev->config.devx) {
1462 		sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
1463 		if (!sh->td) {
1464 			DRV_LOG(ERR, "TD allocation failure");
1465 			rte_errno = ENOMEM;
1466 			goto error;
1467 		}
1468 		if (mlx5_setup_tis(sh)) {
1469 			DRV_LOG(ERR, "TIS allocation failure");
1470 			rte_errno = ENOMEM;
1471 			goto error;
1472 		}
1473 		err = mlx5_rxtx_uars_prepare(sh);
1474 		if (err)
1475 			goto error;
1476 #ifndef RTE_ARCH_64
1477 	} else {
1478 		/* Initialize UAR access locks for 32bit implementations. */
1479 		rte_spinlock_init(&sh->uar_lock_cq);
1480 		for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1481 			rte_spinlock_init(&sh->uar_lock[i]);
1482 #endif
1483 	}
1484 	mlx5_os_dev_shared_handler_install(sh);
1485 	if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
1486 		err = mlx5_flow_os_init_workspace_once();
1487 		if (err)
1488 			goto error;
1489 	}
1490 	mlx5_flow_aging_init(sh);
1491 	mlx5_flow_counters_mng_init(sh);
1492 	mlx5_flow_ipool_create(sh);
1493 	/* Add context to the global device list. */
1494 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
1495 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
1496 exit:
1497 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1498 	return sh;
1499 error:
1500 	err = rte_errno;
1501 	pthread_mutex_destroy(&sh->txpp.mutex);
1502 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1503 	MLX5_ASSERT(sh);
1504 	mlx5_rxtx_uars_release(sh);
1505 	i = 0;
1506 	do {
1507 		if (sh->tis[i])
1508 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
1509 	} while (++i < (uint32_t)sh->bond.n_port);
1510 	if (sh->td)
1511 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
1512 	mlx5_free(sh);
1513 	rte_errno = err;
1514 	return NULL;
1515 }
1516 
1517 /**
1518  * Free shared IB device context. Decrement counter and if zero free
1519  * all allocated resources and close handles.
1520  *
1521  * @param[in] sh
1522  *   Pointer to mlx5_dev_ctx_shared object to free
1523  */
1524 void
1525 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
1526 {
1527 	int ret;
1528 	int i = 0;
1529 
1530 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
1531 #ifdef RTE_LIBRTE_MLX5_DEBUG
1532 	/* Check the object presence in the list. */
1533 	struct mlx5_dev_ctx_shared *lctx;
1534 
1535 	LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
1536 		if (lctx == sh)
1537 			break;
1538 	MLX5_ASSERT(lctx);
1539 	if (lctx != sh) {
1540 		DRV_LOG(ERR, "Freeing non-existing shared IB context");
1541 		goto exit;
1542 	}
1543 #endif
1544 	MLX5_ASSERT(sh);
1545 	MLX5_ASSERT(sh->refcnt);
1546 	/* Secondary process should not free the shared context. */
1547 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1548 	if (--sh->refcnt)
1549 		goto exit;
1550 	/* Stop watching for mempool events and unregister all mempools. */
1551 	if (!sh->cdev->config.mr_mempool_reg_en) {
1552 		ret = rte_mempool_event_callback_unregister
1553 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
1554 		if (ret == 0)
1555 			rte_mempool_walk
1556 			     (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
1557 	}
1558 	/* Remove context from the global device list. */
1559 	LIST_REMOVE(sh, next);
1560 	/* Release resources on the last device removal. */
1561 	if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
1562 		mlx5_os_net_cleanup();
1563 		mlx5_flow_os_release_workspace();
1564 	}
1565 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1566 	if (sh->flex_parsers_dv) {
1567 		mlx5_list_destroy(sh->flex_parsers_dv);
1568 		sh->flex_parsers_dv = NULL;
1569 	}
1570 	/*
1571 	 *  Ensure there is no async event handler installed.
1572 	 *  Only primary process handles async device events.
1573 	 **/
1574 	mlx5_flow_counters_mng_close(sh);
1575 	if (sh->ct_mng)
1576 		mlx5_flow_aso_ct_mng_close(sh);
1577 	if (sh->aso_age_mng) {
1578 		mlx5_flow_aso_age_mng_close(sh);
1579 		sh->aso_age_mng = NULL;
1580 	}
1581 	if (sh->mtrmng)
1582 		mlx5_aso_flow_mtrs_mng_close(sh);
1583 	mlx5_flow_ipool_destroy(sh);
1584 	mlx5_os_dev_shared_handler_uninstall(sh);
1585 	mlx5_rxtx_uars_release(sh);
1586 	do {
1587 		if (sh->tis[i])
1588 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
1589 	} while (++i < sh->bond.n_port);
1590 	if (sh->td)
1591 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
1592 	MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
1593 	pthread_mutex_destroy(&sh->txpp.mutex);
1594 	mlx5_free(sh);
1595 	return;
1596 exit:
1597 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1598 }
1599 
1600 /**
1601  * Destroy table hash list.
1602  *
1603  * @param[in] priv
1604  *   Pointer to the private device data structure.
1605  */
1606 void
1607 mlx5_free_table_hash_list(struct mlx5_priv *priv)
1608 {
1609 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1610 	struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
1611 				   &sh->groups : &sh->flow_tbls;
1612 	if (*tbls == NULL)
1613 		return;
1614 	mlx5_hlist_destroy(*tbls);
1615 	*tbls = NULL;
1616 }
1617 
1618 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1619 /**
1620  * Allocate HW steering group hash list.
1621  *
1622  * @param[in] priv
1623  *   Pointer to the private device data structure.
1624  */
1625 static int
1626 mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
1627 {
1628 	int err = 0;
1629 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1630 	char s[MLX5_NAME_SIZE];
1631 
1632 	MLX5_ASSERT(sh);
1633 	snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
1634 	sh->groups = mlx5_hlist_create
1635 			(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
1636 			 false, true, sh,
1637 			 flow_hw_grp_create_cb,
1638 			 flow_hw_grp_match_cb,
1639 			 flow_hw_grp_remove_cb,
1640 			 flow_hw_grp_clone_cb,
1641 			 flow_hw_grp_clone_free_cb);
1642 	if (!sh->groups) {
1643 		DRV_LOG(ERR, "flow groups with hash creation failed.");
1644 		err = ENOMEM;
1645 	}
1646 	return err;
1647 }
1648 #endif
1649 
1650 
1651 /**
1652  * Initialize flow table hash list and create the root tables entry
1653  * for each domain.
1654  *
1655  * @param[in] priv
1656  *   Pointer to the private device data structure.
1657  *
1658  * @return
1659  *   Zero on success, positive error code otherwise.
1660  */
1661 int
1662 mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
1663 {
1664 	int err = 0;
1665 
1666 	/* Tables are only used in DV and DR modes. */
1667 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1668 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1669 	char s[MLX5_NAME_SIZE];
1670 
1671 	if (priv->sh->config.dv_flow_en == 2)
1672 		return mlx5_alloc_hw_group_hash_list(priv);
1673 	MLX5_ASSERT(sh);
1674 	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
1675 	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
1676 					  false, true, sh,
1677 					  flow_dv_tbl_create_cb,
1678 					  flow_dv_tbl_match_cb,
1679 					  flow_dv_tbl_remove_cb,
1680 					  flow_dv_tbl_clone_cb,
1681 					  flow_dv_tbl_clone_free_cb);
1682 	if (!sh->flow_tbls) {
1683 		DRV_LOG(ERR, "flow tables with hash creation failed.");
1684 		err = ENOMEM;
1685 		return err;
1686 	}
1687 #ifndef HAVE_MLX5DV_DR
1688 	struct rte_flow_error error;
1689 	struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
1690 
1691 	/*
1692 	 * In case we have not DR support, the zero tables should be created
1693 	 * because DV expect to see them even if they cannot be created by
1694 	 * RDMA-CORE.
1695 	 */
1696 	if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0,
1697 		NULL, 0, 1, 0, &error) ||
1698 	    !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0,
1699 		NULL, 0, 1, 0, &error) ||
1700 	    !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0,
1701 		NULL, 0, 1, 0, &error)) {
1702 		err = ENOMEM;
1703 		goto error;
1704 	}
1705 	return err;
1706 error:
1707 	mlx5_free_table_hash_list(priv);
1708 #endif /* HAVE_MLX5DV_DR */
1709 #endif
1710 	return err;
1711 }
1712 
1713 /**
1714  * Retrieve integer value from environment variable.
1715  *
1716  * @param[in] name
1717  *   Environment variable name.
1718  *
1719  * @return
1720  *   Integer value, 0 if the variable is not set.
1721  */
1722 int
1723 mlx5_getenv_int(const char *name)
1724 {
1725 	const char *val = getenv(name);
1726 
1727 	if (val == NULL)
1728 		return 0;
1729 	return atoi(val);
1730 }
1731 
1732 /**
1733  * DPDK callback to add udp tunnel port
1734  *
1735  * @param[in] dev
1736  *   A pointer to eth_dev
1737  * @param[in] udp_tunnel
1738  *   A pointer to udp tunnel
1739  *
1740  * @return
1741  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
1742  */
1743 int
1744 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
1745 			 struct rte_eth_udp_tunnel *udp_tunnel)
1746 {
1747 	MLX5_ASSERT(udp_tunnel != NULL);
1748 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
1749 	    udp_tunnel->udp_port == 4789)
1750 		return 0;
1751 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
1752 	    udp_tunnel->udp_port == 4790)
1753 		return 0;
1754 	return -ENOTSUP;
1755 }
1756 
1757 /**
1758  * Initialize process private data structure.
1759  *
1760  * @param dev
1761  *   Pointer to Ethernet device structure.
1762  *
1763  * @return
1764  *   0 on success, a negative errno value otherwise and rte_errno is set.
1765  */
1766 int
1767 mlx5_proc_priv_init(struct rte_eth_dev *dev)
1768 {
1769 	struct mlx5_priv *priv = dev->data->dev_private;
1770 	struct mlx5_proc_priv *ppriv;
1771 	size_t ppriv_size;
1772 
1773 	mlx5_proc_priv_uninit(dev);
1774 	/*
1775 	 * UAR register table follows the process private structure. BlueFlame
1776 	 * registers for Tx queues are stored in the table.
1777 	 */
1778 	ppriv_size = sizeof(struct mlx5_proc_priv) +
1779 		     priv->txqs_n * sizeof(struct mlx5_uar_data);
1780 	ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
1781 			    RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1782 	if (!ppriv) {
1783 		rte_errno = ENOMEM;
1784 		return -rte_errno;
1785 	}
1786 	ppriv->uar_table_sz = priv->txqs_n;
1787 	dev->process_private = ppriv;
1788 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1789 		priv->sh->pppriv = ppriv;
1790 	return 0;
1791 }
1792 
1793 /**
1794  * Un-initialize process private data structure.
1795  *
1796  * @param dev
1797  *   Pointer to Ethernet device structure.
1798  */
1799 void
1800 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
1801 {
1802 	if (!dev->process_private)
1803 		return;
1804 	mlx5_free(dev->process_private);
1805 	dev->process_private = NULL;
1806 }
1807 
1808 /**
1809  * DPDK callback to close the device.
1810  *
1811  * Destroy all queues and objects, free memory.
1812  *
1813  * @param dev
1814  *   Pointer to Ethernet device structure.
1815  */
1816 int
1817 mlx5_dev_close(struct rte_eth_dev *dev)
1818 {
1819 	struct mlx5_priv *priv = dev->data->dev_private;
1820 	unsigned int i;
1821 	int ret;
1822 
1823 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1824 		/* Check if process_private released. */
1825 		if (!dev->process_private)
1826 			return 0;
1827 		mlx5_tx_uar_uninit_secondary(dev);
1828 		mlx5_proc_priv_uninit(dev);
1829 		rte_eth_dev_release_port(dev);
1830 		return 0;
1831 	}
1832 	if (!priv->sh)
1833 		return 0;
1834 	DRV_LOG(DEBUG, "port %u closing device \"%s\"",
1835 		dev->data->port_id,
1836 		((priv->sh->cdev->ctx != NULL) ?
1837 		mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
1838 	/*
1839 	 * If default mreg copy action is removed at the stop stage,
1840 	 * the search will return none and nothing will be done anymore.
1841 	 */
1842 	mlx5_flow_stop_default(dev);
1843 	mlx5_traffic_disable(dev);
1844 	/*
1845 	 * If all the flows are already flushed in the device stop stage,
1846 	 * then this will return directly without any action.
1847 	 */
1848 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
1849 	mlx5_action_handle_flush(dev);
1850 	mlx5_flow_meter_flush(dev, NULL);
1851 	/* Prevent crashes when queues are still in use. */
1852 	dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1853 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
1854 	rte_wmb();
1855 	/* Disable datapath on secondary process. */
1856 	mlx5_mp_os_req_stop_rxtx(dev);
1857 	/* Free the eCPRI flex parser resource. */
1858 	mlx5_flex_parser_ecpri_release(dev);
1859 	mlx5_flex_item_port_cleanup(dev);
1860 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
1861 	flow_hw_resource_release(dev);
1862 #endif
1863 	if (priv->rxq_privs != NULL) {
1864 		/* XXX race condition if mlx5_rx_burst() is still running. */
1865 		rte_delay_us_sleep(1000);
1866 		for (i = 0; (i != priv->rxqs_n); ++i)
1867 			mlx5_rxq_release(dev, i);
1868 		priv->rxqs_n = 0;
1869 		mlx5_free(priv->rxq_privs);
1870 		priv->rxq_privs = NULL;
1871 	}
1872 	if (priv->txqs != NULL) {
1873 		/* XXX race condition if mlx5_tx_burst() is still running. */
1874 		rte_delay_us_sleep(1000);
1875 		for (i = 0; (i != priv->txqs_n); ++i)
1876 			mlx5_txq_release(dev, i);
1877 		priv->txqs_n = 0;
1878 		priv->txqs = NULL;
1879 	}
1880 	mlx5_proc_priv_uninit(dev);
1881 	if (priv->q_counters) {
1882 		mlx5_devx_cmd_destroy(priv->q_counters);
1883 		priv->q_counters = NULL;
1884 	}
1885 	if (priv->drop_queue.hrxq)
1886 		mlx5_drop_action_destroy(dev);
1887 	if (priv->mreg_cp_tbl)
1888 		mlx5_hlist_destroy(priv->mreg_cp_tbl);
1889 	mlx5_mprq_free_mp(dev);
1890 	mlx5_os_free_shared_dr(priv);
1891 	if (priv->rss_conf.rss_key != NULL)
1892 		mlx5_free(priv->rss_conf.rss_key);
1893 	if (priv->reta_idx != NULL)
1894 		mlx5_free(priv->reta_idx);
1895 	if (priv->sh->dev_cap.vf)
1896 		mlx5_os_mac_addr_flush(dev);
1897 	if (priv->nl_socket_route >= 0)
1898 		close(priv->nl_socket_route);
1899 	if (priv->nl_socket_rdma >= 0)
1900 		close(priv->nl_socket_rdma);
1901 	if (priv->vmwa_context)
1902 		mlx5_vlan_vmwa_exit(priv->vmwa_context);
1903 	ret = mlx5_hrxq_verify(dev);
1904 	if (ret)
1905 		DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
1906 			dev->data->port_id);
1907 	ret = mlx5_ind_table_obj_verify(dev);
1908 	if (ret)
1909 		DRV_LOG(WARNING, "port %u some indirection table still remain",
1910 			dev->data->port_id);
1911 	ret = mlx5_rxq_obj_verify(dev);
1912 	if (ret)
1913 		DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
1914 			dev->data->port_id);
1915 	ret = mlx5_ext_rxq_verify(dev);
1916 	if (ret)
1917 		DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
1918 			dev->data->port_id);
1919 	ret = mlx5_rxq_verify(dev);
1920 	if (ret)
1921 		DRV_LOG(WARNING, "port %u some Rx queues still remain",
1922 			dev->data->port_id);
1923 	ret = mlx5_txq_obj_verify(dev);
1924 	if (ret)
1925 		DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
1926 			dev->data->port_id);
1927 	ret = mlx5_txq_verify(dev);
1928 	if (ret)
1929 		DRV_LOG(WARNING, "port %u some Tx queues still remain",
1930 			dev->data->port_id);
1931 	ret = mlx5_flow_verify(dev);
1932 	if (ret)
1933 		DRV_LOG(WARNING, "port %u some flows still remain",
1934 			dev->data->port_id);
1935 	if (priv->hrxqs)
1936 		mlx5_list_destroy(priv->hrxqs);
1937 	mlx5_free(priv->ext_rxqs);
1938 	/*
1939 	 * Free the shared context in last turn, because the cleanup
1940 	 * routines above may use some shared fields, like
1941 	 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
1942 	 * ifindex if Netlink fails.
1943 	 */
1944 	mlx5_free_shared_dev_ctx(priv->sh);
1945 	if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1946 		unsigned int c = 0;
1947 		uint16_t port_id;
1948 
1949 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
1950 			struct mlx5_priv *opriv =
1951 				rte_eth_devices[port_id].data->dev_private;
1952 
1953 			if (!opriv ||
1954 			    opriv->domain_id != priv->domain_id ||
1955 			    &rte_eth_devices[port_id] == dev)
1956 				continue;
1957 			++c;
1958 			break;
1959 		}
1960 		if (!c)
1961 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1962 	}
1963 	memset(priv, 0, sizeof(*priv));
1964 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1965 	/*
1966 	 * Reset mac_addrs to NULL such that it is not freed as part of
1967 	 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
1968 	 * it is freed when dev_private is freed.
1969 	 */
1970 	dev->data->mac_addrs = NULL;
1971 	return 0;
1972 }
1973 
1974 const struct eth_dev_ops mlx5_dev_ops = {
1975 	.dev_configure = mlx5_dev_configure,
1976 	.dev_start = mlx5_dev_start,
1977 	.dev_stop = mlx5_dev_stop,
1978 	.dev_set_link_down = mlx5_set_link_down,
1979 	.dev_set_link_up = mlx5_set_link_up,
1980 	.dev_close = mlx5_dev_close,
1981 	.promiscuous_enable = mlx5_promiscuous_enable,
1982 	.promiscuous_disable = mlx5_promiscuous_disable,
1983 	.allmulticast_enable = mlx5_allmulticast_enable,
1984 	.allmulticast_disable = mlx5_allmulticast_disable,
1985 	.link_update = mlx5_link_update,
1986 	.stats_get = mlx5_stats_get,
1987 	.stats_reset = mlx5_stats_reset,
1988 	.xstats_get = mlx5_xstats_get,
1989 	.xstats_reset = mlx5_xstats_reset,
1990 	.xstats_get_names = mlx5_xstats_get_names,
1991 	.fw_version_get = mlx5_fw_version_get,
1992 	.dev_infos_get = mlx5_dev_infos_get,
1993 	.representor_info_get = mlx5_representor_info_get,
1994 	.read_clock = mlx5_txpp_read_clock,
1995 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
1996 	.vlan_filter_set = mlx5_vlan_filter_set,
1997 	.rx_queue_setup = mlx5_rx_queue_setup,
1998 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
1999 	.tx_queue_setup = mlx5_tx_queue_setup,
2000 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2001 	.rx_queue_release = mlx5_rx_queue_release,
2002 	.tx_queue_release = mlx5_tx_queue_release,
2003 	.rx_queue_start = mlx5_rx_queue_start,
2004 	.rx_queue_stop = mlx5_rx_queue_stop,
2005 	.tx_queue_start = mlx5_tx_queue_start,
2006 	.tx_queue_stop = mlx5_tx_queue_stop,
2007 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2008 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2009 	.mac_addr_remove = mlx5_mac_addr_remove,
2010 	.mac_addr_add = mlx5_mac_addr_add,
2011 	.mac_addr_set = mlx5_mac_addr_set,
2012 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2013 	.mtu_set = mlx5_dev_set_mtu,
2014 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2015 	.vlan_offload_set = mlx5_vlan_offload_set,
2016 	.reta_update = mlx5_dev_rss_reta_update,
2017 	.reta_query = mlx5_dev_rss_reta_query,
2018 	.rss_hash_update = mlx5_rss_hash_update,
2019 	.rss_hash_conf_get = mlx5_rss_hash_conf_get,
2020 	.flow_ops_get = mlx5_flow_ops_get,
2021 	.rxq_info_get = mlx5_rxq_info_get,
2022 	.txq_info_get = mlx5_txq_info_get,
2023 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2024 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2025 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2026 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2027 	.is_removed = mlx5_is_removed,
2028 	.udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
2029 	.get_module_info = mlx5_get_module_info,
2030 	.get_module_eeprom = mlx5_get_module_eeprom,
2031 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2032 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2033 	.hairpin_bind = mlx5_hairpin_bind,
2034 	.hairpin_unbind = mlx5_hairpin_unbind,
2035 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2036 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2037 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2038 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2039 	.get_monitor_addr = mlx5_get_monitor_addr,
2040 };
2041 
2042 /* Available operations from secondary process. */
2043 const struct eth_dev_ops mlx5_dev_sec_ops = {
2044 	.stats_get = mlx5_stats_get,
2045 	.stats_reset = mlx5_stats_reset,
2046 	.xstats_get = mlx5_xstats_get,
2047 	.xstats_reset = mlx5_xstats_reset,
2048 	.xstats_get_names = mlx5_xstats_get_names,
2049 	.fw_version_get = mlx5_fw_version_get,
2050 	.dev_infos_get = mlx5_dev_infos_get,
2051 	.representor_info_get = mlx5_representor_info_get,
2052 	.read_clock = mlx5_txpp_read_clock,
2053 	.rx_queue_start = mlx5_rx_queue_start,
2054 	.rx_queue_stop = mlx5_rx_queue_stop,
2055 	.tx_queue_start = mlx5_tx_queue_start,
2056 	.tx_queue_stop = mlx5_tx_queue_stop,
2057 	.rxq_info_get = mlx5_rxq_info_get,
2058 	.txq_info_get = mlx5_txq_info_get,
2059 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2060 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2061 	.get_module_info = mlx5_get_module_info,
2062 	.get_module_eeprom = mlx5_get_module_eeprom,
2063 };
2064 
2065 /* Available operations in flow isolated mode. */
2066 const struct eth_dev_ops mlx5_dev_ops_isolate = {
2067 	.dev_configure = mlx5_dev_configure,
2068 	.dev_start = mlx5_dev_start,
2069 	.dev_stop = mlx5_dev_stop,
2070 	.dev_set_link_down = mlx5_set_link_down,
2071 	.dev_set_link_up = mlx5_set_link_up,
2072 	.dev_close = mlx5_dev_close,
2073 	.promiscuous_enable = mlx5_promiscuous_enable,
2074 	.promiscuous_disable = mlx5_promiscuous_disable,
2075 	.allmulticast_enable = mlx5_allmulticast_enable,
2076 	.allmulticast_disable = mlx5_allmulticast_disable,
2077 	.link_update = mlx5_link_update,
2078 	.stats_get = mlx5_stats_get,
2079 	.stats_reset = mlx5_stats_reset,
2080 	.xstats_get = mlx5_xstats_get,
2081 	.xstats_reset = mlx5_xstats_reset,
2082 	.xstats_get_names = mlx5_xstats_get_names,
2083 	.fw_version_get = mlx5_fw_version_get,
2084 	.dev_infos_get = mlx5_dev_infos_get,
2085 	.representor_info_get = mlx5_representor_info_get,
2086 	.read_clock = mlx5_txpp_read_clock,
2087 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2088 	.vlan_filter_set = mlx5_vlan_filter_set,
2089 	.rx_queue_setup = mlx5_rx_queue_setup,
2090 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2091 	.tx_queue_setup = mlx5_tx_queue_setup,
2092 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2093 	.rx_queue_release = mlx5_rx_queue_release,
2094 	.tx_queue_release = mlx5_tx_queue_release,
2095 	.rx_queue_start = mlx5_rx_queue_start,
2096 	.rx_queue_stop = mlx5_rx_queue_stop,
2097 	.tx_queue_start = mlx5_tx_queue_start,
2098 	.tx_queue_stop = mlx5_tx_queue_stop,
2099 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2100 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2101 	.mac_addr_remove = mlx5_mac_addr_remove,
2102 	.mac_addr_add = mlx5_mac_addr_add,
2103 	.mac_addr_set = mlx5_mac_addr_set,
2104 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2105 	.mtu_set = mlx5_dev_set_mtu,
2106 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2107 	.vlan_offload_set = mlx5_vlan_offload_set,
2108 	.flow_ops_get = mlx5_flow_ops_get,
2109 	.rxq_info_get = mlx5_rxq_info_get,
2110 	.txq_info_get = mlx5_txq_info_get,
2111 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2112 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2113 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2114 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2115 	.is_removed = mlx5_is_removed,
2116 	.get_module_info = mlx5_get_module_info,
2117 	.get_module_eeprom = mlx5_get_module_eeprom,
2118 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2119 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2120 	.hairpin_bind = mlx5_hairpin_bind,
2121 	.hairpin_unbind = mlx5_hairpin_unbind,
2122 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2123 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2124 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2125 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2126 	.get_monitor_addr = mlx5_get_monitor_addr,
2127 };
2128 
2129 /**
2130  * Verify and store value for device argument.
2131  *
2132  * @param[in] key
2133  *   Key argument to verify.
2134  * @param[in] val
2135  *   Value associated with key.
2136  * @param opaque
2137  *   User data.
2138  *
2139  * @return
2140  *   0 on success, a negative errno value otherwise and rte_errno is set.
2141  */
2142 static int
2143 mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
2144 {
2145 	struct mlx5_port_config *config = opaque;
2146 	signed long tmp;
2147 
2148 	/* No-op, port representors are processed in mlx5_dev_spawn(). */
2149 	if (!strcmp(MLX5_REPRESENTOR, key))
2150 		return 0;
2151 	errno = 0;
2152 	tmp = strtol(val, NULL, 0);
2153 	if (errno) {
2154 		rte_errno = errno;
2155 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
2156 		return -rte_errno;
2157 	}
2158 	if (tmp < 0) {
2159 		/* Negative values are acceptable for some keys only. */
2160 		rte_errno = EINVAL;
2161 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
2162 		return -rte_errno;
2163 	}
2164 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
2165 		if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
2166 			DRV_LOG(ERR, "invalid CQE compression "
2167 				     "format parameter");
2168 			rte_errno = EINVAL;
2169 			return -rte_errno;
2170 		}
2171 		config->cqe_comp = !!tmp;
2172 		config->cqe_comp_fmt = tmp;
2173 	} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
2174 		config->hw_padding = !!tmp;
2175 	} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
2176 		config->mprq.enabled = !!tmp;
2177 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
2178 		config->mprq.log_stride_num = tmp;
2179 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
2180 		config->mprq.log_stride_size = tmp;
2181 	} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
2182 		config->mprq.max_memcpy_len = tmp;
2183 	} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
2184 		config->mprq.min_rxqs_num = tmp;
2185 	} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
2186 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2187 				 " converted to txq_inline_max", key);
2188 		config->txq_inline_max = tmp;
2189 	} else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
2190 		config->txq_inline_max = tmp;
2191 	} else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
2192 		config->txq_inline_min = tmp;
2193 	} else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
2194 		config->txq_inline_mpw = tmp;
2195 	} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
2196 		config->txqs_inline = tmp;
2197 	} else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
2198 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2199 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
2200 		config->mps = !!tmp;
2201 	} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
2202 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2203 	} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
2204 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2205 				 " converted to txq_inline_mpw", key);
2206 		config->txq_inline_mpw = tmp;
2207 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
2208 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2209 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
2210 		config->rx_vec_en = !!tmp;
2211 	} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
2212 		config->max_dump_files_num = tmp;
2213 	} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
2214 		config->lro_timeout = tmp;
2215 	} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
2216 		config->log_hp_size = tmp;
2217 	} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
2218 		config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
2219 		config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
2220 	}
2221 	return 0;
2222 }
2223 
2224 /**
2225  * Parse user port parameters and adjust them according to device capabilities.
2226  *
2227  * @param priv
2228  *   Pointer to shared device context.
2229  * @param mkvlist
2230  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2231  * @param config
2232  *   Pointer to port configuration structure.
2233  *
2234  * @return
2235  *   0 on success, a negative errno value otherwise and rte_errno is set.
2236  */
2237 int
2238 mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
2239 		      struct mlx5_port_config *config)
2240 {
2241 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
2242 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
2243 	bool devx = priv->sh->cdev->config.devx;
2244 	const char **params = (const char *[]){
2245 		MLX5_RXQ_CQE_COMP_EN,
2246 		MLX5_RXQ_PKT_PAD_EN,
2247 		MLX5_RX_MPRQ_EN,
2248 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2249 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2250 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2251 		MLX5_RXQS_MIN_MPRQ,
2252 		MLX5_TXQ_INLINE,
2253 		MLX5_TXQ_INLINE_MIN,
2254 		MLX5_TXQ_INLINE_MAX,
2255 		MLX5_TXQ_INLINE_MPW,
2256 		MLX5_TXQS_MIN_INLINE,
2257 		MLX5_TXQS_MAX_VEC,
2258 		MLX5_TXQ_MPW_EN,
2259 		MLX5_TXQ_MPW_HDR_DSEG_EN,
2260 		MLX5_TXQ_MAX_INLINE_LEN,
2261 		MLX5_TX_VEC_EN,
2262 		MLX5_RX_VEC_EN,
2263 		MLX5_REPRESENTOR,
2264 		MLX5_MAX_DUMP_FILES_NUM,
2265 		MLX5_LRO_TIMEOUT_USEC,
2266 		MLX5_HP_BUF_SIZE,
2267 		MLX5_DELAY_DROP,
2268 		NULL,
2269 	};
2270 	int ret = 0;
2271 
2272 	/* Default configuration. */
2273 	memset(config, 0, sizeof(*config));
2274 	config->mps = MLX5_ARG_UNSET;
2275 	config->cqe_comp = 1;
2276 	config->rx_vec_en = 1;
2277 	config->txq_inline_max = MLX5_ARG_UNSET;
2278 	config->txq_inline_min = MLX5_ARG_UNSET;
2279 	config->txq_inline_mpw = MLX5_ARG_UNSET;
2280 	config->txqs_inline = MLX5_ARG_UNSET;
2281 	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2282 	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2283 	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
2284 	config->log_hp_size = MLX5_ARG_UNSET;
2285 	config->std_delay_drop = 0;
2286 	config->hp_delay_drop = 0;
2287 	if (mkvlist != NULL) {
2288 		/* Process parameters. */
2289 		ret = mlx5_kvargs_process(mkvlist, params,
2290 					  mlx5_port_args_check_handler, config);
2291 		if (ret) {
2292 			DRV_LOG(ERR, "Failed to process port arguments: %s",
2293 				strerror(rte_errno));
2294 			return -rte_errno;
2295 		}
2296 	}
2297 	/* Adjust parameters according to device capabilities. */
2298 	if (config->hw_padding && !dev_cap->hw_padding) {
2299 		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
2300 		config->hw_padding = 0;
2301 	} else if (config->hw_padding) {
2302 		DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
2303 	}
2304 	/*
2305 	 * MPW is disabled by default, while the Enhanced MPW is enabled
2306 	 * by default.
2307 	 */
2308 	if (config->mps == MLX5_ARG_UNSET)
2309 		config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
2310 			      MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
2311 	else
2312 		config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
2313 	DRV_LOG(INFO, "%sMPS is %s",
2314 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
2315 		config->mps == MLX5_MPW ? "legacy " : "",
2316 		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2317 	/* LRO is supported only when DV flow enabled. */
2318 	if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
2319 		dev_cap->lro_supported = 0;
2320 	if (dev_cap->lro_supported) {
2321 		/*
2322 		 * If LRO timeout is not configured by application,
2323 		 * use the minimal supported value.
2324 		 */
2325 		if (!config->lro_timeout)
2326 			config->lro_timeout =
2327 				       hca_attr->lro_timer_supported_periods[0];
2328 		DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
2329 			config->lro_timeout);
2330 	}
2331 	if (config->cqe_comp && !dev_cap->cqe_comp) {
2332 		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
2333 		config->cqe_comp = 0;
2334 	}
2335 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
2336 	    (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
2337 		DRV_LOG(WARNING,
2338 			"Flow Tag CQE compression format isn't supported.");
2339 		config->cqe_comp = 0;
2340 	}
2341 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
2342 	    (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
2343 		DRV_LOG(WARNING,
2344 			"L3/L4 Header CQE compression format isn't supported.");
2345 		config->cqe_comp = 0;
2346 	}
2347 	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
2348 		config->cqe_comp ? "" : "not ");
2349 	if ((config->std_delay_drop || config->hp_delay_drop) &&
2350 	    !dev_cap->rq_delay_drop_en) {
2351 		config->std_delay_drop = 0;
2352 		config->hp_delay_drop = 0;
2353 		DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
2354 			priv->dev_port);
2355 	}
2356 	if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
2357 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
2358 		config->mprq.enabled = 0;
2359 	}
2360 	if (config->max_dump_files_num == 0)
2361 		config->max_dump_files_num = 128;
2362 	/* Detect minimal data bytes to inline. */
2363 	mlx5_set_min_inline(priv);
2364 	DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
2365 		config->hw_vlan_insert ? "" : "not ");
2366 	DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
2367 	DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
2368 	DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
2369 	DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
2370 	DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
2371 		config->std_delay_drop);
2372 	DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
2373 	DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
2374 		config->max_dump_files_num);
2375 	DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
2376 	DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
2377 	DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
2378 		config->mprq.log_stride_num);
2379 	DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
2380 		config->mprq.log_stride_size);
2381 	DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
2382 		config->mprq.max_memcpy_len);
2383 	DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
2384 	DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
2385 	DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
2386 	DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
2387 	DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
2388 	DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
2389 	DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
2390 	return 0;
2391 }
2392 
2393 /**
2394  * Print the key for device argument.
2395  *
2396  * It is "dummy" handler whose whole purpose is to enable using
2397  * mlx5_kvargs_process() function which set devargs as used.
2398  *
2399  * @param key
2400  *   Key argument.
2401  * @param val
2402  *   Value associated with key, unused.
2403  * @param opaque
2404  *   Unused, can be NULL.
2405  *
2406  * @return
2407  *   0 on success, function cannot fail.
2408  */
2409 static int
2410 mlx5_dummy_handler(const char *key, const char *val, void *opaque)
2411 {
2412 	DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
2413 	RTE_SET_USED(opaque);
2414 	RTE_SET_USED(val);
2415 	return 0;
2416 }
2417 
2418 /**
2419  * Set requested devargs as used when device is already spawned.
2420  *
2421  * It is necessary since it is valid to ask probe again for existing device,
2422  * if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
2423  *
2424  * @param name
2425  *   Name of the existing device.
2426  * @param port_id
2427  *   Port identifier of the device.
2428  * @param mkvlist
2429  *   Pointer to mlx5 kvargs control to sign as used.
2430  */
2431 void
2432 mlx5_port_args_set_used(const char *name, uint16_t port_id,
2433 			struct mlx5_kvargs_ctrl *mkvlist)
2434 {
2435 	const char **params = (const char *[]){
2436 		MLX5_RXQ_CQE_COMP_EN,
2437 		MLX5_RXQ_PKT_PAD_EN,
2438 		MLX5_RX_MPRQ_EN,
2439 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2440 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2441 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2442 		MLX5_RXQS_MIN_MPRQ,
2443 		MLX5_TXQ_INLINE,
2444 		MLX5_TXQ_INLINE_MIN,
2445 		MLX5_TXQ_INLINE_MAX,
2446 		MLX5_TXQ_INLINE_MPW,
2447 		MLX5_TXQS_MIN_INLINE,
2448 		MLX5_TXQS_MAX_VEC,
2449 		MLX5_TXQ_MPW_EN,
2450 		MLX5_TXQ_MPW_HDR_DSEG_EN,
2451 		MLX5_TXQ_MAX_INLINE_LEN,
2452 		MLX5_TX_VEC_EN,
2453 		MLX5_RX_VEC_EN,
2454 		MLX5_REPRESENTOR,
2455 		MLX5_MAX_DUMP_FILES_NUM,
2456 		MLX5_LRO_TIMEOUT_USEC,
2457 		MLX5_HP_BUF_SIZE,
2458 		MLX5_DELAY_DROP,
2459 		NULL,
2460 	};
2461 
2462 	/* Secondary process should not handle devargs. */
2463 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2464 		return;
2465 	MLX5_ASSERT(mkvlist != NULL);
2466 	DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
2467 		"already exists, set devargs as used:", name, port_id);
2468 	/* This function cannot fail with this handler. */
2469 	mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
2470 }
2471 
2472 /**
2473  * Check sibling device configurations when probing again.
2474  *
2475  * Sibling devices sharing infiniband device context should have compatible
2476  * configurations. This regards representors and bonding device.
2477  *
2478  * @param cdev
2479  *   Pointer to mlx5 device structure.
2480  * @param mkvlist
2481  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2482  *
2483  * @return
2484  *   0 on success, a negative errno value otherwise and rte_errno is set.
2485  */
2486 int
2487 mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
2488 			       struct mlx5_kvargs_ctrl *mkvlist)
2489 {
2490 	struct mlx5_dev_ctx_shared *sh = NULL;
2491 	struct mlx5_sh_config *config;
2492 	int ret;
2493 
2494 	/* Secondary process should not handle devargs. */
2495 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2496 		return 0;
2497 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
2498 	/* Search for IB context by common device pointer. */
2499 	LIST_FOREACH(sh, &mlx5_dev_ctx_list, next)
2500 		if (sh->cdev == cdev)
2501 			break;
2502 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
2503 	/* There is sh for this device -> it isn't probe again. */
2504 	if (sh == NULL)
2505 		return 0;
2506 	config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
2507 			     sizeof(struct mlx5_sh_config),
2508 			     RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2509 	if (config == NULL) {
2510 		rte_errno = -ENOMEM;
2511 		return -rte_errno;
2512 	}
2513 	/*
2514 	 * Creates a temporary IB context configure structure according to new
2515 	 * devargs attached in probing again.
2516 	 */
2517 	ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config);
2518 	if (ret) {
2519 		DRV_LOG(ERR, "Failed to process device configure: %s",
2520 			strerror(rte_errno));
2521 		mlx5_free(config);
2522 		return ret;
2523 	}
2524 	/*
2525 	 * Checks the match between the temporary structure and the existing
2526 	 * IB context structure.
2527 	 */
2528 	if (sh->config.dv_flow_en ^ config->dv_flow_en) {
2529 		DRV_LOG(ERR, "\"dv_flow_en\" "
2530 			"configuration mismatch for shared %s context.",
2531 			sh->ibdev_name);
2532 		goto error;
2533 	}
2534 	if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
2535 	    (sh->config.dv_miss_info ^ config->dv_miss_info)) {
2536 		DRV_LOG(ERR, "\"dv_xmeta_en\" "
2537 			"configuration mismatch for shared %s context.",
2538 			sh->ibdev_name);
2539 		goto error;
2540 	}
2541 	if (sh->config.dv_esw_en ^ config->dv_esw_en) {
2542 		DRV_LOG(ERR, "\"dv_esw_en\" "
2543 			"configuration mismatch for shared %s context.",
2544 			sh->ibdev_name);
2545 		goto error;
2546 	}
2547 	if (sh->config.reclaim_mode ^ config->reclaim_mode) {
2548 		DRV_LOG(ERR, "\"reclaim_mode\" "
2549 			"configuration mismatch for shared %s context.",
2550 			sh->ibdev_name);
2551 		goto error;
2552 	}
2553 	if (sh->config.allow_duplicate_pattern ^
2554 	    config->allow_duplicate_pattern) {
2555 		DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
2556 			"configuration mismatch for shared %s context.",
2557 			sh->ibdev_name);
2558 		goto error;
2559 	}
2560 	if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
2561 		DRV_LOG(ERR, "\"l3_vxlan_en\" "
2562 			"configuration mismatch for shared %s context.",
2563 			sh->ibdev_name);
2564 		goto error;
2565 	}
2566 	if (sh->config.decap_en ^ config->decap_en) {
2567 		DRV_LOG(ERR, "\"decap_en\" "
2568 			"configuration mismatch for shared %s context.",
2569 			sh->ibdev_name);
2570 		goto error;
2571 	}
2572 	if (sh->config.lacp_by_user ^ config->lacp_by_user) {
2573 		DRV_LOG(ERR, "\"lacp_by_user\" "
2574 			"configuration mismatch for shared %s context.",
2575 			sh->ibdev_name);
2576 		goto error;
2577 	}
2578 	if (sh->config.tx_pp ^ config->tx_pp) {
2579 		DRV_LOG(ERR, "\"tx_pp\" "
2580 			"configuration mismatch for shared %s context.",
2581 			sh->ibdev_name);
2582 		goto error;
2583 	}
2584 	if (sh->config.tx_skew ^ config->tx_skew) {
2585 		DRV_LOG(ERR, "\"tx_skew\" "
2586 			"configuration mismatch for shared %s context.",
2587 			sh->ibdev_name);
2588 		goto error;
2589 	}
2590 	mlx5_free(config);
2591 	return 0;
2592 error:
2593 	mlx5_free(config);
2594 	rte_errno = EINVAL;
2595 	return -rte_errno;
2596 }
2597 
2598 /**
2599  * Configures the minimal amount of data to inline into WQE
2600  * while sending packets.
2601  *
2602  * - the txq_inline_min has the maximal priority, if this
2603  *   key is specified in devargs
2604  * - if DevX is enabled the inline mode is queried from the
2605  *   device (HCA attributes and NIC vport context if needed).
2606  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
2607  *   and none (0 bytes) for other NICs
2608  *
2609  * @param priv
2610  *   Pointer to the private device data structure.
2611  */
2612 void
2613 mlx5_set_min_inline(struct mlx5_priv *priv)
2614 {
2615 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
2616 	struct mlx5_port_config *config = &priv->config;
2617 
2618 	if (config->txq_inline_min != MLX5_ARG_UNSET) {
2619 		/* Application defines size of inlined data explicitly. */
2620 		if (priv->pci_dev != NULL) {
2621 			switch (priv->pci_dev->id.device_id) {
2622 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
2623 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2624 				if (config->txq_inline_min <
2625 					       (int)MLX5_INLINE_HSIZE_L2) {
2626 					DRV_LOG(DEBUG,
2627 						"txq_inline_mix aligned to minimal ConnectX-4 required value %d",
2628 						(int)MLX5_INLINE_HSIZE_L2);
2629 					config->txq_inline_min =
2630 							MLX5_INLINE_HSIZE_L2;
2631 				}
2632 				break;
2633 			}
2634 		}
2635 		goto exit;
2636 	}
2637 	if (hca_attr->eth_net_offloads) {
2638 		/* We have DevX enabled, inline mode queried successfully. */
2639 		switch (hca_attr->wqe_inline_mode) {
2640 		case MLX5_CAP_INLINE_MODE_L2:
2641 			/* outer L2 header must be inlined. */
2642 			config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
2643 			goto exit;
2644 		case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2645 			/* No inline data are required by NIC. */
2646 			config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
2647 			config->hw_vlan_insert =
2648 				hca_attr->wqe_vlan_insert;
2649 			DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
2650 			goto exit;
2651 		case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2652 			/* inline mode is defined by NIC vport context. */
2653 			if (!hca_attr->eth_virt)
2654 				break;
2655 			switch (hca_attr->vport_inline_mode) {
2656 			case MLX5_INLINE_MODE_NONE:
2657 				config->txq_inline_min =
2658 					MLX5_INLINE_HSIZE_NONE;
2659 				goto exit;
2660 			case MLX5_INLINE_MODE_L2:
2661 				config->txq_inline_min =
2662 					MLX5_INLINE_HSIZE_L2;
2663 				goto exit;
2664 			case MLX5_INLINE_MODE_IP:
2665 				config->txq_inline_min =
2666 					MLX5_INLINE_HSIZE_L3;
2667 				goto exit;
2668 			case MLX5_INLINE_MODE_TCP_UDP:
2669 				config->txq_inline_min =
2670 					MLX5_INLINE_HSIZE_L4;
2671 				goto exit;
2672 			case MLX5_INLINE_MODE_INNER_L2:
2673 				config->txq_inline_min =
2674 					MLX5_INLINE_HSIZE_INNER_L2;
2675 				goto exit;
2676 			case MLX5_INLINE_MODE_INNER_IP:
2677 				config->txq_inline_min =
2678 					MLX5_INLINE_HSIZE_INNER_L3;
2679 				goto exit;
2680 			case MLX5_INLINE_MODE_INNER_TCP_UDP:
2681 				config->txq_inline_min =
2682 					MLX5_INLINE_HSIZE_INNER_L4;
2683 				goto exit;
2684 			}
2685 		}
2686 	}
2687 	if (priv->pci_dev == NULL) {
2688 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
2689 		goto exit;
2690 	}
2691 	/*
2692 	 * We get here if we are unable to deduce
2693 	 * inline data size with DevX. Try PCI ID
2694 	 * to determine old NICs.
2695 	 */
2696 	switch (priv->pci_dev->id.device_id) {
2697 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
2698 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2699 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
2700 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2701 		config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
2702 		config->hw_vlan_insert = 0;
2703 		break;
2704 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
2705 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2706 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
2707 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2708 		/*
2709 		 * These NICs support VLAN insertion from WQE and
2710 		 * report the wqe_vlan_insert flag. But there is the bug
2711 		 * and PFC control may be broken, so disable feature.
2712 		 */
2713 		config->hw_vlan_insert = 0;
2714 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
2715 		break;
2716 	default:
2717 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
2718 		break;
2719 	}
2720 exit:
2721 	DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
2722 }
2723 
2724 /**
2725  * Configures the metadata mask fields in the shared context.
2726  *
2727  * @param [in] dev
2728  *   Pointer to Ethernet device.
2729  */
2730 void
2731 mlx5_set_metadata_mask(struct rte_eth_dev *dev)
2732 {
2733 	struct mlx5_priv *priv = dev->data->dev_private;
2734 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2735 	uint32_t meta, mark, reg_c0;
2736 
2737 	reg_c0 = ~priv->vport_meta_mask;
2738 	switch (sh->config.dv_xmeta_en) {
2739 	case MLX5_XMETA_MODE_LEGACY:
2740 		meta = UINT32_MAX;
2741 		mark = MLX5_FLOW_MARK_MASK;
2742 		break;
2743 	case MLX5_XMETA_MODE_META16:
2744 		meta = reg_c0 >> rte_bsf32(reg_c0);
2745 		mark = MLX5_FLOW_MARK_MASK;
2746 		break;
2747 	case MLX5_XMETA_MODE_META32:
2748 		meta = UINT32_MAX;
2749 		mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
2750 		break;
2751 	default:
2752 		meta = 0;
2753 		mark = 0;
2754 		MLX5_ASSERT(false);
2755 		break;
2756 	}
2757 	if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
2758 		DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
2759 				 sh->dv_mark_mask, mark);
2760 	else
2761 		sh->dv_mark_mask = mark;
2762 	if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
2763 		DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
2764 				 sh->dv_meta_mask, meta);
2765 	else
2766 		sh->dv_meta_mask = meta;
2767 	if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
2768 		DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
2769 				 sh->dv_meta_mask, reg_c0);
2770 	else
2771 		sh->dv_regc0_mask = reg_c0;
2772 	DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
2773 	DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
2774 	DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
2775 	DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
2776 }
2777 
2778 int
2779 rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
2780 {
2781 	static const char *const dynf_names[] = {
2782 		RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
2783 		RTE_MBUF_DYNFLAG_METADATA_NAME,
2784 		RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
2785 	};
2786 	unsigned int i;
2787 
2788 	if (n < RTE_DIM(dynf_names))
2789 		return -ENOMEM;
2790 	for (i = 0; i < RTE_DIM(dynf_names); i++) {
2791 		if (names[i] == NULL)
2792 			return -EINVAL;
2793 		strcpy(names[i], dynf_names[i]);
2794 	}
2795 	return RTE_DIM(dynf_names);
2796 }
2797 
2798 /**
2799  * Look for the ethernet device belonging to mlx5 driver.
2800  *
2801  * @param[in] port_id
2802  *   port_id to start looking for device.
2803  * @param[in] odev
2804  *   Pointer to the hint device. When device is being probed
2805  *   the its siblings (master and preceding representors might
2806  *   not have assigned driver yet (because the mlx5_os_pci_probe()
2807  *   is not completed yet, for this case match on hint
2808  *   device may be used to detect sibling device.
2809  *
2810  * @return
2811  *   port_id of found device, RTE_MAX_ETHPORT if not found.
2812  */
2813 uint16_t
2814 mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
2815 {
2816 	while (port_id < RTE_MAX_ETHPORTS) {
2817 		struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2818 
2819 		if (dev->state != RTE_ETH_DEV_UNUSED &&
2820 		    dev->device &&
2821 		    (dev->device == odev ||
2822 		     (dev->device->driver &&
2823 		     dev->device->driver->name &&
2824 		     ((strcmp(dev->device->driver->name,
2825 			      MLX5_PCI_DRIVER_NAME) == 0) ||
2826 		      (strcmp(dev->device->driver->name,
2827 			      MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
2828 			break;
2829 		port_id++;
2830 	}
2831 	if (port_id >= RTE_MAX_ETHPORTS)
2832 		return RTE_MAX_ETHPORTS;
2833 	return port_id;
2834 }
2835 
2836 /**
2837  * Callback to remove a device.
2838  *
2839  * This function removes all Ethernet devices belong to a given device.
2840  *
2841  * @param[in] cdev
2842  *   Pointer to the generic device.
2843  *
2844  * @return
2845  *   0 on success, the function cannot fail.
2846  */
2847 int
2848 mlx5_net_remove(struct mlx5_common_device *cdev)
2849 {
2850 	uint16_t port_id;
2851 	int ret = 0;
2852 
2853 	RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
2854 		/*
2855 		 * mlx5_dev_close() is not registered to secondary process,
2856 		 * call the close function explicitly for secondary process.
2857 		 */
2858 		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
2859 			ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
2860 		else
2861 			ret |= rte_eth_dev_close(port_id);
2862 	}
2863 	return ret == 0 ? 0 : -EIO;
2864 }
2865 
2866 static const struct rte_pci_id mlx5_pci_id_map[] = {
2867 	{
2868 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2869 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4)
2870 	},
2871 	{
2872 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2873 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
2874 	},
2875 	{
2876 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2877 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
2878 	},
2879 	{
2880 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2881 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
2882 	},
2883 	{
2884 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2885 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5)
2886 	},
2887 	{
2888 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2889 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
2890 	},
2891 	{
2892 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2893 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
2894 	},
2895 	{
2896 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2897 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
2898 	},
2899 	{
2900 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2901 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
2902 	},
2903 	{
2904 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2905 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
2906 	},
2907 	{
2908 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2909 				PCI_DEVICE_ID_MELLANOX_CONNECTX6)
2910 	},
2911 	{
2912 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2913 				PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
2914 	},
2915 	{
2916 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2917 				PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
2918 	},
2919 	{
2920 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2921 				PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
2922 	},
2923 	{
2924 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2925 				PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
2926 	},
2927 	{
2928 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2929 				PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
2930 	},
2931 	{
2932 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2933 				PCI_DEVICE_ID_MELLANOX_CONNECTX7)
2934 	},
2935 	{
2936 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2937 				PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
2938 	},
2939 	{
2940 		.vendor_id = 0
2941 	}
2942 };
2943 
2944 static struct mlx5_class_driver mlx5_net_driver = {
2945 	.drv_class = MLX5_CLASS_ETH,
2946 	.name = RTE_STR(MLX5_ETH_DRIVER_NAME),
2947 	.id_table = mlx5_pci_id_map,
2948 	.probe = mlx5_os_net_probe,
2949 	.remove = mlx5_net_remove,
2950 	.probe_again = 1,
2951 	.intr_lsc = 1,
2952 	.intr_rmv = 1,
2953 };
2954 
2955 /* Initialize driver log type. */
2956 RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
2957 
2958 /**
2959  * Driver initialization routine.
2960  */
2961 RTE_INIT(rte_mlx5_pmd_init)
2962 {
2963 	pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
2964 	mlx5_common_init();
2965 	/* Build the static tables for Verbs conversion. */
2966 	mlx5_set_ptype_table();
2967 	mlx5_set_cksum_table();
2968 	mlx5_set_swp_types_table();
2969 	if (mlx5_glue)
2970 		mlx5_class_driver_register(&mlx5_net_driver);
2971 }
2972 
2973 RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__);
2974 RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map);
2975 RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");
2976