xref: /dpdk/drivers/net/mlx5/mlx5.c (revision 1fbb3977cb4cc95a88a383825b188398659883ea)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 #include <fcntl.h>
13 
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_pci.h>
17 #include <bus_pci_driver.h>
18 #include <rte_common.h>
19 #include <rte_kvargs.h>
20 #include <rte_rwlock.h>
21 #include <rte_spinlock.h>
22 #include <rte_string_fns.h>
23 #include <rte_eal_paging.h>
24 #include <rte_alarm.h>
25 #include <rte_cycles.h>
26 #include <rte_interrupts.h>
27 
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_common.h>
31 #include <mlx5_common_os.h>
32 #include <mlx5_common_mp.h>
33 #include <mlx5_malloc.h>
34 
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "mlx5_autoconf.h"
42 #include "mlx5_flow.h"
43 #include "mlx5_flow_os.h"
44 #include "rte_pmd_mlx5.h"
45 
46 #define MLX5_ETH_DRIVER_NAME mlx5_eth
47 
48 /* Device parameter to enable RX completion queue compression. */
49 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
50 
51 /* Device parameter to enable padding Rx packet to cacheline size. */
52 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
53 
54 /* Device parameter to enable Multi-Packet Rx queue. */
55 #define MLX5_RX_MPRQ_EN "mprq_en"
56 
57 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
58 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
59 
60 /* Device parameter to configure log 2 of the stride size for MPRQ. */
61 #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size"
62 
63 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
64 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
65 
66 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
67 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
68 
69 /* Device parameter to configure inline send. Deprecated, ignored.*/
70 #define MLX5_TXQ_INLINE "txq_inline"
71 
72 /* Device parameter to limit packet size to inline with ordinary SEND. */
73 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
74 
75 /* Device parameter to configure minimal data size to inline. */
76 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
77 
78 /* Device parameter to limit packet size to inline with Enhanced MPW. */
79 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
80 
81 /*
82  * Device parameter to configure the number of TX queues threshold for
83  * enabling inline send.
84  */
85 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
86 
87 /*
88  * Device parameter to configure the number of TX queues threshold for
89  * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
90  */
91 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
92 
93 /* Device parameter to enable multi-packet send WQEs. */
94 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
95 
96 /*
97  * Device parameter to include 2 dsegs in the title WQEBB.
98  * Deprecated, ignored.
99  */
100 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
101 
102 /*
103  * Device parameter to limit the size of inlining packet.
104  * Deprecated, ignored.
105  */
106 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
107 
108 /*
109  * Device parameter to enable Tx scheduling on timestamps
110  * and specify the packet pacing granularity in nanoseconds.
111  */
112 #define MLX5_TX_PP "tx_pp"
113 
114 /*
115  * Device parameter to specify skew in nanoseconds on Tx datapath,
116  * it represents the time between SQ start WQE processing and
117  * appearing actual packet data on the wire.
118  */
119 #define MLX5_TX_SKEW "tx_skew"
120 
121 /*
122  * Device parameter to enable hardware Tx vector.
123  * Deprecated, ignored (no vectorized Tx routines anymore).
124  */
125 #define MLX5_TX_VEC_EN "tx_vec_en"
126 
127 /* Device parameter to enable hardware Rx vector. */
128 #define MLX5_RX_VEC_EN "rx_vec_en"
129 
130 /* Allow L3 VXLAN flow creation. */
131 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
132 
133 /* Activate DV E-Switch flow steering. */
134 #define MLX5_DV_ESW_EN "dv_esw_en"
135 
136 /* Activate DV flow steering. */
137 #define MLX5_DV_FLOW_EN "dv_flow_en"
138 
139 /* Enable extensive flow metadata support. */
140 #define MLX5_DV_XMETA_EN "dv_xmeta_en"
141 
142 /* Device parameter to let the user manage the lacp traffic of bonding device */
143 #define MLX5_LACP_BY_USER "lacp_by_user"
144 
145 /* Activate Netlink support in VF mode. */
146 #define MLX5_VF_NL_EN "vf_nl_en"
147 
148 /* Select port representors to instantiate. */
149 #define MLX5_REPRESENTOR "representor"
150 
151 /* Device parameter to configure the maximum number of dump files per queue. */
152 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
153 
154 /* Configure timeout of LRO session (in microseconds). */
155 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
156 
157 /*
158  * Device parameter to configure the total data buffer size for a single
159  * hairpin queue (logarithm value).
160  */
161 #define MLX5_HP_BUF_SIZE "hp_buf_log_sz"
162 
163 /* Flow memory reclaim mode. */
164 #define MLX5_RECLAIM_MEM "reclaim_mem_mode"
165 
166 /* Decap will be used or not. */
167 #define MLX5_DECAP_EN "decap_en"
168 
169 /* Device parameter to configure allow or prevent duplicate rules pattern. */
170 #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
171 
172 /* Device parameter to configure the delay drop when creating Rxqs. */
173 #define MLX5_DELAY_DROP "delay_drop"
174 
175 /* Device parameter to create the fdb default rule in PMD */
176 #define MLX5_FDB_DEFAULT_RULE_EN "fdb_def_rule_en"
177 
178 /* HW steering counter configuration. */
179 #define MLX5_HWS_CNT_SERVICE_CORE "service_core"
180 
181 /* HW steering counter's query interval. */
182 #define MLX5_HWS_CNT_CYCLE_TIME "svc_cycle_time"
183 
184 /* Device parameter to control representor matching in ingress/egress flows with HWS. */
185 #define MLX5_REPR_MATCHING_EN "repr_matching_en"
186 
187 /* Shared memory between primary and secondary processes. */
188 struct mlx5_shared_data *mlx5_shared_data;
189 
190 /** Driver-specific log messages type. */
191 int mlx5_logtype;
192 
193 static LIST_HEAD(mlx5_dev_ctx_list, mlx5_dev_ctx_shared) dev_ctx_list = LIST_HEAD_INITIALIZER();
194 static LIST_HEAD(mlx5_phdev_list, mlx5_physical_device) phdev_list = LIST_HEAD_INITIALIZER();
195 static pthread_mutex_t mlx5_dev_ctx_list_mutex;
196 
197 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
198 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
199 	[MLX5_IPOOL_DECAP_ENCAP] = {
200 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
201 		.trunk_size = 64,
202 		.grow_trunk = 3,
203 		.grow_shift = 2,
204 		.need_lock = 1,
205 		.release_mem_en = 1,
206 		.malloc = mlx5_malloc,
207 		.free = mlx5_free,
208 		.type = "mlx5_encap_decap_ipool",
209 	},
210 	[MLX5_IPOOL_PUSH_VLAN] = {
211 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
212 		.trunk_size = 64,
213 		.grow_trunk = 3,
214 		.grow_shift = 2,
215 		.need_lock = 1,
216 		.release_mem_en = 1,
217 		.malloc = mlx5_malloc,
218 		.free = mlx5_free,
219 		.type = "mlx5_push_vlan_ipool",
220 	},
221 	[MLX5_IPOOL_TAG] = {
222 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
223 		.trunk_size = 64,
224 		.grow_trunk = 3,
225 		.grow_shift = 2,
226 		.need_lock = 1,
227 		.release_mem_en = 0,
228 		.per_core_cache = (1 << 16),
229 		.malloc = mlx5_malloc,
230 		.free = mlx5_free,
231 		.type = "mlx5_tag_ipool",
232 	},
233 	[MLX5_IPOOL_PORT_ID] = {
234 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
235 		.trunk_size = 64,
236 		.grow_trunk = 3,
237 		.grow_shift = 2,
238 		.need_lock = 1,
239 		.release_mem_en = 1,
240 		.malloc = mlx5_malloc,
241 		.free = mlx5_free,
242 		.type = "mlx5_port_id_ipool",
243 	},
244 	[MLX5_IPOOL_JUMP] = {
245 		/*
246 		 * MLX5_IPOOL_JUMP ipool entry size depends on selected flow engine.
247 		 * When HW steering is enabled mlx5_flow_group struct is used.
248 		 * Otherwise mlx5_flow_tbl_data_entry struct is used.
249 		 */
250 		.size = 0,
251 		.trunk_size = 64,
252 		.grow_trunk = 3,
253 		.grow_shift = 2,
254 		.need_lock = 1,
255 		.release_mem_en = 1,
256 		.malloc = mlx5_malloc,
257 		.free = mlx5_free,
258 		.type = "mlx5_jump_ipool",
259 	},
260 	[MLX5_IPOOL_SAMPLE] = {
261 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
262 		.trunk_size = 64,
263 		.grow_trunk = 3,
264 		.grow_shift = 2,
265 		.need_lock = 1,
266 		.release_mem_en = 1,
267 		.malloc = mlx5_malloc,
268 		.free = mlx5_free,
269 		.type = "mlx5_sample_ipool",
270 	},
271 	[MLX5_IPOOL_DEST_ARRAY] = {
272 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
273 		.trunk_size = 64,
274 		.grow_trunk = 3,
275 		.grow_shift = 2,
276 		.need_lock = 1,
277 		.release_mem_en = 1,
278 		.malloc = mlx5_malloc,
279 		.free = mlx5_free,
280 		.type = "mlx5_dest_array_ipool",
281 	},
282 	[MLX5_IPOOL_TUNNEL_ID] = {
283 		.size = sizeof(struct mlx5_flow_tunnel),
284 		.trunk_size = MLX5_MAX_TUNNELS,
285 		.need_lock = 1,
286 		.release_mem_en = 1,
287 		.type = "mlx5_tunnel_offload",
288 	},
289 	[MLX5_IPOOL_TNL_TBL_ID] = {
290 		.size = 0,
291 		.need_lock = 1,
292 		.type = "mlx5_flow_tnl_tbl_ipool",
293 	},
294 #endif
295 	[MLX5_IPOOL_MTR] = {
296 		/**
297 		 * The ipool index should grow continually from small to big,
298 		 * for meter idx, so not set grow_trunk to avoid meter index
299 		 * not jump continually.
300 		 */
301 		.size = sizeof(struct mlx5_legacy_flow_meter),
302 		.trunk_size = 64,
303 		.need_lock = 1,
304 		.release_mem_en = 1,
305 		.malloc = mlx5_malloc,
306 		.free = mlx5_free,
307 		.type = "mlx5_meter_ipool",
308 	},
309 	[MLX5_IPOOL_MCP] = {
310 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
311 		.trunk_size = 64,
312 		.grow_trunk = 3,
313 		.grow_shift = 2,
314 		.need_lock = 1,
315 		.release_mem_en = 1,
316 		.malloc = mlx5_malloc,
317 		.free = mlx5_free,
318 		.type = "mlx5_mcp_ipool",
319 	},
320 	[MLX5_IPOOL_HRXQ] = {
321 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
322 		.trunk_size = 64,
323 		.grow_trunk = 3,
324 		.grow_shift = 2,
325 		.need_lock = 1,
326 		.release_mem_en = 1,
327 		.malloc = mlx5_malloc,
328 		.free = mlx5_free,
329 		.type = "mlx5_hrxq_ipool",
330 	},
331 	[MLX5_IPOOL_MLX5_FLOW] = {
332 		/*
333 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
334 		 * It set in run time according to PCI function configuration.
335 		 */
336 		.size = 0,
337 		.trunk_size = 64,
338 		.grow_trunk = 3,
339 		.grow_shift = 2,
340 		.need_lock = 1,
341 		.release_mem_en = 0,
342 		.per_core_cache = 1 << 19,
343 		.malloc = mlx5_malloc,
344 		.free = mlx5_free,
345 		.type = "mlx5_flow_handle_ipool",
346 	},
347 	[MLX5_IPOOL_RTE_FLOW] = {
348 		.size = sizeof(struct rte_flow),
349 		.trunk_size = 4096,
350 		.need_lock = 1,
351 		.release_mem_en = 1,
352 		.malloc = mlx5_malloc,
353 		.free = mlx5_free,
354 		.type = "rte_flow_ipool",
355 	},
356 	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
357 		.size = 0,
358 		.need_lock = 1,
359 		.type = "mlx5_flow_rss_id_ipool",
360 	},
361 	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
362 		.size = sizeof(struct mlx5_shared_action_rss),
363 		.trunk_size = 64,
364 		.grow_trunk = 3,
365 		.grow_shift = 2,
366 		.need_lock = 1,
367 		.release_mem_en = 1,
368 		.malloc = mlx5_malloc,
369 		.free = mlx5_free,
370 		.type = "mlx5_shared_action_rss",
371 	},
372 	[MLX5_IPOOL_MTR_POLICY] = {
373 		/**
374 		 * The ipool index should grow continually from small to big,
375 		 * for policy idx, so not set grow_trunk to avoid policy index
376 		 * not jump continually.
377 		 */
378 		.size = sizeof(struct mlx5_flow_meter_sub_policy),
379 		.trunk_size = 64,
380 		.need_lock = 1,
381 		.release_mem_en = 1,
382 		.malloc = mlx5_malloc,
383 		.free = mlx5_free,
384 		.type = "mlx5_meter_policy_ipool",
385 	},
386 };
387 
388 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
389 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
390 
391 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024
392 
393 #define MLX5_RXQ_ENH_CQE_COMP_MASK 0x80
394 
395 /**
396  * Decide whether representor ID is a HPF(host PF) port on BF2.
397  *
398  * @param dev
399  *   Pointer to Ethernet device structure.
400  *
401  * @return
402  *   Non-zero if HPF, otherwise 0.
403  */
404 bool
405 mlx5_is_hpf(struct rte_eth_dev *dev)
406 {
407 	struct mlx5_priv *priv = dev->data->dev_private;
408 	uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id);
409 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
410 
411 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF &&
412 	       MLX5_REPRESENTOR_REPR(-1) == repr;
413 }
414 
415 /**
416  * Decide whether representor ID is a SF port representor.
417  *
418  * @param dev
419  *   Pointer to Ethernet device structure.
420  *
421  * @return
422  *   Non-zero if HPF, otherwise 0.
423  */
424 bool
425 mlx5_is_sf_repr(struct rte_eth_dev *dev)
426 {
427 	struct mlx5_priv *priv = dev->data->dev_private;
428 	int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
429 
430 	return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
431 }
432 
433 /**
434  * Initialize the ASO aging management structure.
435  *
436  * @param[in] sh
437  *   Pointer to mlx5_dev_ctx_shared object to free
438  *
439  * @return
440  *   0 on success, a negative errno value otherwise and rte_errno is set.
441  */
442 int
443 mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
444 {
445 	int err;
446 
447 	if (sh->aso_age_mng)
448 		return 0;
449 	sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
450 				      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
451 	if (!sh->aso_age_mng) {
452 		DRV_LOG(ERR, "aso_age_mng allocation was failed.");
453 		rte_errno = ENOMEM;
454 		return -ENOMEM;
455 	}
456 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT, 1);
457 	if (err) {
458 		mlx5_free(sh->aso_age_mng);
459 		return -1;
460 	}
461 	rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
462 	rte_spinlock_init(&sh->aso_age_mng->free_sl);
463 	LIST_INIT(&sh->aso_age_mng->free);
464 	return 0;
465 }
466 
467 /**
468  * Close and release all the resources of the ASO aging management structure.
469  *
470  * @param[in] sh
471  *   Pointer to mlx5_dev_ctx_shared object to free.
472  */
473 static void
474 mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
475 {
476 	int i, j;
477 
478 	mlx5_aso_flow_hit_queue_poll_stop(sh);
479 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
480 	if (sh->aso_age_mng->pools) {
481 		struct mlx5_aso_age_pool *pool;
482 
483 		for (i = 0; i < sh->aso_age_mng->next; ++i) {
484 			pool = sh->aso_age_mng->pools[i];
485 			claim_zero(mlx5_devx_cmd_destroy
486 						(pool->flow_hit_aso_obj));
487 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
488 				if (pool->actions[j].dr_action)
489 					claim_zero
490 					    (mlx5_flow_os_destroy_flow_action
491 					      (pool->actions[j].dr_action));
492 			mlx5_free(pool);
493 		}
494 		mlx5_free(sh->aso_age_mng->pools);
495 	}
496 	mlx5_free(sh->aso_age_mng);
497 }
498 
499 /**
500  * Initialize the shared aging list information per port.
501  *
502  * @param[in] sh
503  *   Pointer to mlx5_dev_ctx_shared object.
504  */
505 static void
506 mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
507 {
508 	uint32_t i;
509 	struct mlx5_age_info *age_info;
510 
511 	/*
512 	 * In HW steering, aging information structure is initialized later
513 	 * during configure function.
514 	 */
515 	if (sh->config.dv_flow_en == 2)
516 		return;
517 	for (i = 0; i < sh->max_port; i++) {
518 		age_info = &sh->port[i].age_info;
519 		age_info->flags = 0;
520 		TAILQ_INIT(&age_info->aged_counters);
521 		LIST_INIT(&age_info->aged_aso);
522 		rte_spinlock_init(&age_info->aged_sl);
523 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
524 	}
525 }
526 
527 /**
528  * DV flow counter mode detect and config.
529  *
530  * @param dev
531  *   Pointer to rte_eth_dev structure.
532  *
533  */
534 void
535 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
536 {
537 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
538 	struct mlx5_priv *priv = dev->data->dev_private;
539 	struct mlx5_dev_ctx_shared *sh = priv->sh;
540 	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
541 	bool fallback;
542 
543 #ifndef HAVE_IBV_DEVX_ASYNC
544 	fallback = true;
545 #else
546 	fallback = false;
547 	if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
548 	    !hca_attr->flow_counters_dump ||
549 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
550 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
551 		fallback = true;
552 #endif
553 	if (fallback)
554 		DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
555 			"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
556 			hca_attr->flow_counters_dump,
557 			hca_attr->flow_counter_bulk_alloc_bitmap);
558 	/* Initialize fallback mode only on the port initializes sh. */
559 	if (sh->refcnt == 1)
560 		sh->sws_cmng.counter_fallback = fallback;
561 	else if (fallback != sh->sws_cmng.counter_fallback)
562 		DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
563 			"with others:%d.", PORT_ID(priv), fallback);
564 #endif
565 }
566 
567 /**
568  * Initialize the counters management structure.
569  *
570  * @param[in] sh
571  *   Pointer to mlx5_dev_ctx_shared object to free
572  *
573  * @return
574  *   0 on success, otherwise negative errno value and rte_errno is set.
575  */
576 static int
577 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
578 {
579 	int i, j;
580 
581 	if (sh->config.dv_flow_en < 2) {
582 		void *pools;
583 
584 		pools = mlx5_malloc(MLX5_MEM_ZERO,
585 				    sizeof(struct mlx5_flow_counter_pool *) *
586 				    MLX5_COUNTER_POOLS_MAX_NUM,
587 				    0, SOCKET_ID_ANY);
588 		if (!pools) {
589 			DRV_LOG(ERR,
590 				"Counter management allocation was failed.");
591 			rte_errno = ENOMEM;
592 			return -rte_errno;
593 		}
594 		memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
595 		TAILQ_INIT(&sh->sws_cmng.flow_counters);
596 		sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
597 		sh->sws_cmng.max_id = -1;
598 		sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
599 		sh->sws_cmng.pools = pools;
600 		rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
601 		for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
602 			TAILQ_INIT(&sh->sws_cmng.counters[i]);
603 			rte_spinlock_init(&sh->sws_cmng.csl[i]);
604 		}
605 	} else {
606 		struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
607 		uint32_t fw_max_nb_cnts = attr->max_flow_counter;
608 		uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;
609 		uint32_t max_nb_cnts = 0;
610 
611 		for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {
612 			int log_dcs_i = log_dcs - i;
613 
614 			if (log_dcs_i < 0)
615 				break;
616 			if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >
617 			    fw_max_nb_cnts)
618 				continue;
619 			max_nb_cnts |= RTE_BIT32(log_dcs_i);
620 			j++;
621 		}
622 		sh->hws_max_log_bulk_sz = log_dcs;
623 		sh->hws_max_nb_counters = max_nb_cnts;
624 	}
625 	return 0;
626 }
627 
628 /**
629  * Destroy all the resources allocated for a counter memory management.
630  *
631  * @param[in] mng
632  *   Pointer to the memory management structure.
633  */
634 static void
635 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
636 {
637 	uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
638 
639 	LIST_REMOVE(mng, next);
640 	mlx5_os_wrapped_mkey_destroy(&mng->wm);
641 	mlx5_free(mem);
642 }
643 
644 /**
645  * Close and release all the resources of the counters management.
646  *
647  * @param[in] sh
648  *   Pointer to mlx5_dev_ctx_shared object to free.
649  */
650 static void
651 mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
652 {
653 	struct mlx5_counter_stats_mem_mng *mng;
654 	int i, j;
655 	int retries = 1024;
656 
657 	rte_errno = 0;
658 	while (--retries) {
659 		rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
660 		if (rte_errno != EINPROGRESS)
661 			break;
662 		rte_pause();
663 	}
664 
665 	if (sh->sws_cmng.pools) {
666 		struct mlx5_flow_counter_pool *pool;
667 		uint16_t n_valid = sh->sws_cmng.n_valid;
668 		bool fallback = sh->sws_cmng.counter_fallback;
669 
670 		for (i = 0; i < n_valid; ++i) {
671 			pool = sh->sws_cmng.pools[i];
672 			if (!fallback && pool->min_dcs)
673 				claim_zero(mlx5_devx_cmd_destroy
674 							       (pool->min_dcs));
675 			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
676 				struct mlx5_flow_counter *cnt =
677 						MLX5_POOL_GET_CNT(pool, j);
678 
679 				if (cnt->action)
680 					claim_zero
681 					 (mlx5_flow_os_destroy_flow_action
682 					  (cnt->action));
683 				if (fallback && cnt->dcs_when_free)
684 					claim_zero(mlx5_devx_cmd_destroy
685 						   (cnt->dcs_when_free));
686 			}
687 			mlx5_free(pool);
688 		}
689 		mlx5_free(sh->sws_cmng.pools);
690 	}
691 	mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
692 	while (mng) {
693 		mlx5_flow_destroy_counter_stat_mem_mng(mng);
694 		mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
695 	}
696 	memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
697 }
698 
699 /**
700  * Initialize the aso flow meters management structure.
701  *
702  * @param[in] sh
703  *   Pointer to mlx5_dev_ctx_shared object to free
704  */
705 int
706 mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
707 {
708 	if (!sh->mtrmng) {
709 		sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
710 			sizeof(*sh->mtrmng),
711 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
712 		if (!sh->mtrmng) {
713 			DRV_LOG(ERR,
714 			"meter management allocation was failed.");
715 			rte_errno = ENOMEM;
716 			return -ENOMEM;
717 		}
718 		if (sh->meter_aso_en) {
719 			rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
720 			rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
721 			LIST_INIT(&sh->mtrmng->pools_mng.meters);
722 		}
723 		sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
724 	}
725 	return 0;
726 }
727 
728 /**
729  * Close and release all the resources of
730  * the ASO flow meter management structure.
731  *
732  * @param[in] sh
733  *   Pointer to mlx5_dev_ctx_shared object to free.
734  */
735 static void
736 mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
737 {
738 	struct mlx5_aso_mtr_pool *mtr_pool;
739 	struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
740 	uint32_t idx;
741 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
742 	struct mlx5_aso_mtr *aso_mtr;
743 	int i;
744 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
745 
746 	if (sh->meter_aso_en) {
747 		mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
748 		idx = mtrmng->pools_mng.n_valid;
749 		while (idx--) {
750 			mtr_pool = mtrmng->pools_mng.pools[idx];
751 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
752 			for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
753 				aso_mtr = &mtr_pool->mtrs[i];
754 				if (aso_mtr->fm.meter_action_g)
755 					claim_zero
756 					(mlx5_glue->destroy_flow_action
757 					(aso_mtr->fm.meter_action_g));
758 				if (aso_mtr->fm.meter_action_y)
759 					claim_zero
760 					(mlx5_glue->destroy_flow_action
761 					(aso_mtr->fm.meter_action_y));
762 			}
763 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
764 			claim_zero(mlx5_devx_cmd_destroy
765 						(mtr_pool->devx_obj));
766 			mtrmng->pools_mng.n_valid--;
767 			mlx5_free(mtr_pool);
768 		}
769 		mlx5_free(sh->mtrmng->pools_mng.pools);
770 	}
771 	mlx5_free(sh->mtrmng);
772 	sh->mtrmng = NULL;
773 }
774 
775 /* Send FLOW_AGED event if needed. */
776 void
777 mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
778 {
779 	struct mlx5_age_info *age_info;
780 	uint32_t i;
781 
782 	for (i = 0; i < sh->max_port; i++) {
783 		age_info = &sh->port[i].age_info;
784 		if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
785 			continue;
786 		MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW);
787 		if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) {
788 			MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER);
789 			rte_eth_dev_callback_process
790 				(&rte_eth_devices[sh->port[i].devx_ih_port_id],
791 				RTE_ETH_EVENT_FLOW_AGED, NULL);
792 		}
793 	}
794 }
795 
796 /*
797  * Initialize the ASO connection tracking structure.
798  *
799  * @param[in] sh
800  *   Pointer to mlx5_dev_ctx_shared object.
801  *
802  * @return
803  *   0 on success, a negative errno value otherwise and rte_errno is set.
804  */
805 int
806 mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)
807 {
808 	int err;
809 
810 	if (sh->ct_mng)
811 		return 0;
812 	sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng) +
813 				 sizeof(struct mlx5_aso_sq) * MLX5_ASO_CT_SQ_NUM,
814 				 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
815 	if (!sh->ct_mng) {
816 		DRV_LOG(ERR, "ASO CT management allocation failed.");
817 		rte_errno = ENOMEM;
818 		return -rte_errno;
819 	}
820 	err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING, MLX5_ASO_CT_SQ_NUM);
821 	if (err) {
822 		mlx5_free(sh->ct_mng);
823 		/* rte_errno should be extracted from the failure. */
824 		rte_errno = EINVAL;
825 		return -rte_errno;
826 	}
827 	rte_spinlock_init(&sh->ct_mng->ct_sl);
828 	rte_rwlock_init(&sh->ct_mng->resize_rwl);
829 	LIST_INIT(&sh->ct_mng->free_cts);
830 	return 0;
831 }
832 
833 /*
834  * Close and release all the resources of the
835  * ASO connection tracking management structure.
836  *
837  * @param[in] sh
838  *   Pointer to mlx5_dev_ctx_shared object to free.
839  */
840 static void
841 mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
842 {
843 	struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
844 	struct mlx5_aso_ct_pool *ct_pool;
845 	struct mlx5_aso_ct_action *ct;
846 	uint32_t idx;
847 	uint32_t val;
848 	uint32_t cnt;
849 	int i;
850 
851 	mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING);
852 	idx = mng->next;
853 	while (idx--) {
854 		cnt = 0;
855 		ct_pool = mng->pools[idx];
856 		for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
857 			ct = &ct_pool->actions[i];
858 			val = __atomic_fetch_sub(&ct->refcnt, 1,
859 						 __ATOMIC_RELAXED);
860 			MLX5_ASSERT(val == 1);
861 			if (val > 1)
862 				cnt++;
863 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
864 			if (ct->dr_action_orig)
865 				claim_zero(mlx5_glue->destroy_flow_action
866 							(ct->dr_action_orig));
867 			if (ct->dr_action_rply)
868 				claim_zero(mlx5_glue->destroy_flow_action
869 							(ct->dr_action_rply));
870 #endif
871 		}
872 		claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj));
873 		if (cnt) {
874 			DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u",
875 				cnt, i);
876 		}
877 		mlx5_free(ct_pool);
878 		/* in case of failure. */
879 		mng->next--;
880 	}
881 	mlx5_free(mng->pools);
882 	mlx5_free(mng);
883 	/* Management structure must be cleared to 0s during allocation. */
884 	sh->ct_mng = NULL;
885 }
886 
887 /**
888  * Initialize the flow resources' indexed mempool.
889  *
890  * @param[in] sh
891  *   Pointer to mlx5_dev_ctx_shared object.
892  */
893 static void
894 mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
895 {
896 	uint8_t i;
897 	struct mlx5_indexed_pool_config cfg;
898 
899 	for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
900 		cfg = mlx5_ipool_cfg[i];
901 		switch (i) {
902 		default:
903 			break;
904 		/*
905 		 * Set MLX5_IPOOL_MLX5_FLOW ipool size
906 		 * according to PCI function flow configuration.
907 		 */
908 		case MLX5_IPOOL_MLX5_FLOW:
909 			cfg.size = sh->config.dv_flow_en ?
910 				sizeof(struct mlx5_flow_handle) :
911 				MLX5_FLOW_HANDLE_VERBS_SIZE;
912 			break;
913 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
914 		/* Set MLX5_IPOOL_JUMP ipool entry size depending on selected flow engine. */
915 		case MLX5_IPOOL_JUMP:
916 			cfg.size = sh->config.dv_flow_en == 2 ?
917 				sizeof(struct mlx5_flow_group) :
918 				sizeof(struct mlx5_flow_tbl_data_entry);
919 			break;
920 #endif
921 		}
922 		if (sh->config.reclaim_mode) {
923 			cfg.release_mem_en = 1;
924 			cfg.per_core_cache = 0;
925 		} else {
926 			cfg.release_mem_en = 0;
927 		}
928 		sh->ipool[i] = mlx5_ipool_create(&cfg);
929 	}
930 }
931 
932 
933 /**
934  * Release the flow resources' indexed mempool.
935  *
936  * @param[in] sh
937  *   Pointer to mlx5_dev_ctx_shared object.
938  */
939 static void
940 mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
941 {
942 	uint8_t i;
943 
944 	for (i = 0; i < MLX5_IPOOL_MAX; ++i)
945 		mlx5_ipool_destroy(sh->ipool[i]);
946 	for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)
947 		if (sh->mdh_ipools[i])
948 			mlx5_ipool_destroy(sh->mdh_ipools[i]);
949 }
950 
951 /*
952  * Check if dynamic flex parser for eCPRI already exists.
953  *
954  * @param dev
955  *   Pointer to Ethernet device structure.
956  *
957  * @return
958  *   true on exists, false on not.
959  */
960 bool
961 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
962 {
963 	struct mlx5_priv *priv = dev->data->dev_private;
964 	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
965 
966 	return !!prf->obj;
967 }
968 
969 /*
970  * Allocation of a flex parser for eCPRI. Once created, this parser related
971  * resources will be held until the device is closed.
972  *
973  * @param dev
974  *   Pointer to Ethernet device structure.
975  *
976  * @return
977  *   0 on success, a negative errno value otherwise and rte_errno is set.
978  */
979 int
980 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
981 {
982 	struct mlx5_priv *priv = dev->data->dev_private;
983 	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
984 	struct mlx5_devx_graph_node_attr node = {
985 		.modify_field_select = 0,
986 	};
987 	uint32_t ids[8];
988 	int ret;
989 
990 	if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
991 		DRV_LOG(ERR, "Dynamic flex parser is not supported "
992 			"for device %s.", priv->dev_data->name);
993 		return -ENOTSUP;
994 	}
995 	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
996 	/* 8 bytes now: 4B common header + 4B message body header. */
997 	node.header_length_base_value = 0x8;
998 	/* After MAC layer: Ether / VLAN. */
999 	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
1000 	/* Type of compared condition should be 0xAEFE in the L2 layer. */
1001 	node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
1002 	/* Sample #0: type in common header. */
1003 	node.sample[0].flow_match_sample_en = 1;
1004 	/* Fixed offset. */
1005 	node.sample[0].flow_match_sample_offset_mode = 0x0;
1006 	/* Only the 2nd byte will be used. */
1007 	node.sample[0].flow_match_sample_field_base_offset = 0x0;
1008 	/* Sample #1: message payload. */
1009 	node.sample[1].flow_match_sample_en = 1;
1010 	/* Fixed offset. */
1011 	node.sample[1].flow_match_sample_offset_mode = 0x0;
1012 	/*
1013 	 * Only the first two bytes will be used right now, and its offset will
1014 	 * start after the common header that with the length of a DW(u32).
1015 	 */
1016 	node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
1017 	prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
1018 	if (!prf->obj) {
1019 		DRV_LOG(ERR, "Failed to create flex parser node object.");
1020 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
1021 	}
1022 	prf->num = 2;
1023 	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, NULL);
1024 	if (ret) {
1025 		DRV_LOG(ERR, "Failed to query sample IDs.");
1026 		goto error;
1027 	}
1028 	prf->offset[0] = 0x0;
1029 	prf->offset[1] = sizeof(uint32_t);
1030 	prf->ids[0] = ids[0];
1031 	prf->ids[1] = ids[1];
1032 	return 0;
1033 error:
1034 	mlx5_devx_cmd_destroy(prf->obj);
1035 	return (rte_errno == 0) ? -ENODEV : -rte_errno;
1036 }
1037 
1038 /*
1039  * Destroy the flex parser node, including the parser itself, input / output
1040  * arcs and DW samples. Resources could be reused then.
1041  *
1042  * @param dev
1043  *   Pointer to Ethernet device structure.
1044  */
1045 static void
1046 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
1047 {
1048 	struct mlx5_priv *priv = dev->data->dev_private;
1049 	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
1050 
1051 	if (prf->obj)
1052 		mlx5_devx_cmd_destroy(prf->obj);
1053 	prf->obj = NULL;
1054 }
1055 
1056 /*
1057  * Allocation of a flex parser for srh. Once refcnt is zero, the resources held
1058  * by this parser will be freed.
1059  * @param dev
1060  *   Pointer to Ethernet device structure.
1061  *
1062  * @return
1063  *   0 on success, a negative errno value otherwise and rte_errno is set.
1064  */
1065 int
1066 mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev)
1067 {
1068 	struct mlx5_devx_graph_node_attr node = {
1069 		.modify_field_select = 0,
1070 	};
1071 	uint32_t i;
1072 	uint32_t ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
1073 	struct mlx5_priv *priv = dev->data->dev_private;
1074 	struct mlx5_common_dev_config *config = &priv->sh->cdev->config;
1075 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1076 	void *fp = NULL, *ibv_ctx = priv->sh->cdev->ctx;
1077 	int ret;
1078 
1079 	memset(ids, 0xff, sizeof(ids));
1080 	if (!config->hca_attr.parse_graph_flex_node ||
1081 	    !config->hca_attr.flex.query_match_sample_info) {
1082 		DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
1083 		return -ENOTSUP;
1084 	}
1085 	if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
1086 		return 0;
1087 	priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
1088 			sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
1089 	if (!priv->sh->srh_flex_parser.flex.devx_fp)
1090 		return -ENOMEM;
1091 	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
1092 	/* Srv6 first two DW are not counted in. */
1093 	node.header_length_base_value = 0x8;
1094 	/* The unit is uint64_t. */
1095 	node.header_length_field_shift = 0x3;
1096 	/* Header length is the 2nd byte. */
1097 	node.header_length_field_offset = 0x8;
1098 	if (attr->header_length_mask_width < 8)
1099 		node.header_length_field_offset += 8 - attr->header_length_mask_width;
1100 	node.header_length_field_mask = 0xF;
1101 	/* One byte next header protocol. */
1102 	node.next_header_field_size = 0x8;
1103 	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IP;
1104 	node.in[0].compare_condition_value = IPPROTO_ROUTING;
1105 	/* Final IPv6 address. */
1106 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1107 		node.sample[i].flow_match_sample_en = 1;
1108 		node.sample[i].flow_match_sample_offset_mode =
1109 					MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
1110 		/* First come first serve no matter inner or outer. */
1111 		node.sample[i].flow_match_sample_tunnel_mode =
1112 					MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
1113 		node.sample[i].flow_match_sample_field_base_offset =
1114 					(i + 1) * sizeof(uint32_t); /* in bytes */
1115 	}
1116 	node.sample[0].flow_match_sample_field_base_offset = 0;
1117 	node.out[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_TCP;
1118 	node.out[0].compare_condition_value = IPPROTO_TCP;
1119 	node.out[1].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_UDP;
1120 	node.out[1].compare_condition_value = IPPROTO_UDP;
1121 	node.out[2].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IPV6;
1122 	node.out[2].compare_condition_value = IPPROTO_IPV6;
1123 	fp = mlx5_devx_cmd_create_flex_parser(ibv_ctx, &node);
1124 	if (!fp) {
1125 		DRV_LOG(ERR, "Failed to create flex parser node object.");
1126 		goto error;
1127 	}
1128 	priv->sh->srh_flex_parser.flex.devx_fp->devx_obj = fp;
1129 	priv->sh->srh_flex_parser.flex.mapnum = MLX5_SRV6_SAMPLE_NUM;
1130 	priv->sh->srh_flex_parser.flex.devx_fp->num_samples = MLX5_SRV6_SAMPLE_NUM;
1131 
1132 	ret = mlx5_devx_cmd_query_parse_samples(fp, ids, priv->sh->srh_flex_parser.flex.mapnum,
1133 						&priv->sh->srh_flex_parser.flex.devx_fp->anchor_id);
1134 	if (ret) {
1135 		DRV_LOG(ERR, "Failed to query sample IDs.");
1136 		goto error;
1137 	}
1138 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1139 		ret = mlx5_devx_cmd_match_sample_info_query(ibv_ctx, ids[i],
1140 					&priv->sh->srh_flex_parser.flex.devx_fp->sample_info[i]);
1141 		if (ret) {
1142 			DRV_LOG(ERR, "Failed to query sample id %u information.", ids[i]);
1143 			goto error;
1144 		}
1145 	}
1146 	for (i = 0; i <= MLX5_SRV6_SAMPLE_NUM - 1 && i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
1147 		priv->sh->srh_flex_parser.flex.devx_fp->sample_ids[i] = ids[i];
1148 		priv->sh->srh_flex_parser.flex.map[i].width = sizeof(uint32_t) * CHAR_BIT;
1149 		priv->sh->srh_flex_parser.flex.map[i].reg_id = i;
1150 		priv->sh->srh_flex_parser.flex.map[i].shift =
1151 						(i + 1) * sizeof(uint32_t) * CHAR_BIT;
1152 	}
1153 	priv->sh->srh_flex_parser.flex.map[0].shift = 0;
1154 	return 0;
1155 error:
1156 	if (fp)
1157 		mlx5_devx_cmd_destroy(fp);
1158 	if (priv->sh->srh_flex_parser.flex.devx_fp)
1159 		mlx5_free(priv->sh->srh_flex_parser.flex.devx_fp);
1160 	return (rte_errno == 0) ? -ENODEV : -rte_errno;
1161 }
1162 
1163 /*
1164  * Destroy the flex parser node, including the parser itself, input / output
1165  * arcs and DW samples. Resources could be reused then.
1166  *
1167  * @param dev
1168  *   Pointer to Ethernet device structure
1169  */
1170 void
1171 mlx5_free_srh_flex_parser(struct rte_eth_dev *dev)
1172 {
1173 	struct mlx5_priv *priv = dev->data->dev_private;
1174 	struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
1175 
1176 	if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
1177 		return;
1178 	mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
1179 	mlx5_free(fp->flex.devx_fp);
1180 	fp->flex.devx_fp = NULL;
1181 }
1182 
1183 uint32_t
1184 mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
1185 {
1186 	uint32_t sw_parsing_offloads = 0;
1187 
1188 	if (attr->swp) {
1189 		sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
1190 		if (attr->swp_csum)
1191 			sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
1192 
1193 		if (attr->swp_lso)
1194 			sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
1195 	}
1196 	return sw_parsing_offloads;
1197 }
1198 
1199 uint32_t
1200 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
1201 {
1202 	uint32_t tn_offloads = 0;
1203 
1204 	if (attr->tunnel_stateless_vxlan)
1205 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
1206 	if (attr->tunnel_stateless_gre)
1207 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
1208 	if (attr->tunnel_stateless_geneve_rx)
1209 		tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
1210 	return tn_offloads;
1211 }
1212 
1213 /* Fill all fields of UAR structure. */
1214 static int
1215 mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
1216 {
1217 	int ret;
1218 
1219 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
1220 	if (ret) {
1221 		DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
1222 		return -rte_errno;
1223 	}
1224 	MLX5_ASSERT(sh->tx_uar.obj);
1225 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
1226 	ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
1227 	if (ret) {
1228 		DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
1229 		mlx5_devx_uar_release(&sh->tx_uar);
1230 		return -rte_errno;
1231 	}
1232 	MLX5_ASSERT(sh->rx_uar.obj);
1233 	MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
1234 	return 0;
1235 }
1236 
1237 static void
1238 mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
1239 {
1240 	mlx5_devx_uar_release(&sh->rx_uar);
1241 	mlx5_devx_uar_release(&sh->tx_uar);
1242 }
1243 
1244 /**
1245  * rte_mempool_walk() callback to unregister Rx mempools.
1246  * It used when implicit mempool registration is disabled.
1247  *
1248  * @param mp
1249  *   The mempool being walked.
1250  * @param arg
1251  *   Pointer to the device shared context.
1252  */
1253 static void
1254 mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
1255 {
1256 	struct mlx5_dev_ctx_shared *sh = arg;
1257 
1258 	mlx5_dev_mempool_unregister(sh->cdev, mp);
1259 }
1260 
1261 /**
1262  * Callback used when implicit mempool registration is disabled
1263  * in order to track Rx mempool destruction.
1264  *
1265  * @param event
1266  *   Mempool life cycle event.
1267  * @param mp
1268  *   An Rx mempool registered explicitly when the port is started.
1269  * @param arg
1270  *   Pointer to a device shared context.
1271  */
1272 static void
1273 mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
1274 					struct rte_mempool *mp, void *arg)
1275 {
1276 	struct mlx5_dev_ctx_shared *sh = arg;
1277 
1278 	if (event == RTE_MEMPOOL_EVENT_DESTROY)
1279 		mlx5_dev_mempool_unregister(sh->cdev, mp);
1280 }
1281 
1282 int
1283 mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
1284 {
1285 	struct mlx5_priv *priv = dev->data->dev_private;
1286 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1287 	int ret;
1288 
1289 	/* Check if we only need to track Rx mempool destruction. */
1290 	if (!sh->cdev->config.mr_mempool_reg_en) {
1291 		ret = rte_mempool_event_callback_register
1292 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
1293 		return ret == 0 || rte_errno == EEXIST ? 0 : ret;
1294 	}
1295 	return mlx5_dev_mempool_subscribe(sh->cdev);
1296 }
1297 
1298 /**
1299  * Set up multiple TISs with different affinities according to
1300  * number of bonding ports
1301  *
1302  * @param priv
1303  * Pointer of shared context.
1304  *
1305  * @return
1306  * Zero on success, -1 otherwise.
1307  */
1308 static int
1309 mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
1310 {
1311 	struct mlx5_devx_lag_context lag_ctx = { 0 };
1312 	struct mlx5_devx_tis_attr tis_attr = { 0 };
1313 	int i;
1314 
1315 	tis_attr.transport_domain = sh->td->id;
1316 	if (sh->bond.n_port) {
1317 		if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
1318 			sh->lag.tx_remap_affinity[0] =
1319 				lag_ctx.tx_remap_affinity_1;
1320 			sh->lag.tx_remap_affinity[1] =
1321 				lag_ctx.tx_remap_affinity_2;
1322 			sh->lag.affinity_mode = lag_ctx.port_select_mode;
1323 		} else {
1324 			DRV_LOG(ERR, "Failed to query lag affinity.");
1325 			return -1;
1326 		}
1327 		if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS)
1328 			DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
1329 				sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
1330 				lag_ctx.tx_remap_affinity_2);
1331 		else if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
1332 			DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
1333 					sh->ibdev_name);
1334 	}
1335 	for (i = 0; i <= sh->bond.n_port; i++) {
1336 		/*
1337 		 * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.
1338 		 * Each TIS binds to one PF by setting lag_tx_port_affinity (> 0).
1339 		 * Once LAG enabled, we create multiple TISs and bind each one to
1340 		 * different PFs, then TIS[i+1] gets affinity i+1 and goes to PF i+1.
1341 		 * TIS[0] is reserved for HW Hash mode.
1342 		 */
1343 		tis_attr.lag_tx_port_affinity = i;
1344 		sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
1345 		if (!sh->tis[i]) {
1346 			DRV_LOG(ERR, "Failed to create TIS %d/%d for [bonding] device"
1347 				" %s.", i, sh->bond.n_port,
1348 				sh->ibdev_name);
1349 			return -1;
1350 		}
1351 	}
1352 	return 0;
1353 }
1354 
1355 /**
1356  * Verify and store value for share device argument.
1357  *
1358  * @param[in] key
1359  *   Key argument to verify.
1360  * @param[in] val
1361  *   Value associated with key.
1362  * @param opaque
1363  *   User data.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368 static int
1369 mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
1370 {
1371 	struct mlx5_sh_config *config = opaque;
1372 	signed long tmp;
1373 
1374 	errno = 0;
1375 	tmp = strtol(val, NULL, 0);
1376 	if (errno) {
1377 		rte_errno = errno;
1378 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1379 		return -rte_errno;
1380 	}
1381 	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
1382 		/* Negative values are acceptable for some keys only. */
1383 		rte_errno = EINVAL;
1384 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
1385 		return -rte_errno;
1386 	}
1387 	if (strcmp(MLX5_TX_PP, key) == 0) {
1388 		unsigned long mod = tmp >= 0 ? tmp : -tmp;
1389 
1390 		if (!mod) {
1391 			DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
1392 			rte_errno = EINVAL;
1393 			return -rte_errno;
1394 		}
1395 		config->tx_pp = tmp;
1396 	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
1397 		config->tx_skew = tmp;
1398 	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1399 		config->l3_vxlan_en = !!tmp;
1400 	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1401 		config->vf_nl_en = !!tmp;
1402 	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1403 		config->dv_esw_en = !!tmp;
1404 	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1405 		if (tmp > 2) {
1406 			DRV_LOG(ERR, "Invalid %s parameter.", key);
1407 			rte_errno = EINVAL;
1408 			return -rte_errno;
1409 		}
1410 		config->dv_flow_en = tmp;
1411 	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
1412 		if (tmp != MLX5_XMETA_MODE_LEGACY &&
1413 		    tmp != MLX5_XMETA_MODE_META16 &&
1414 		    tmp != MLX5_XMETA_MODE_META32 &&
1415 		    tmp != MLX5_XMETA_MODE_MISS_INFO &&
1416 		    tmp != MLX5_XMETA_MODE_META32_HWS) {
1417 			DRV_LOG(ERR, "Invalid extensive metadata parameter.");
1418 			rte_errno = EINVAL;
1419 			return -rte_errno;
1420 		}
1421 		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
1422 			config->dv_xmeta_en = tmp;
1423 		else
1424 			config->dv_miss_info = 1;
1425 	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
1426 		config->lacp_by_user = !!tmp;
1427 	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
1428 		if (tmp != MLX5_RCM_NONE &&
1429 		    tmp != MLX5_RCM_LIGHT &&
1430 		    tmp != MLX5_RCM_AGGR) {
1431 			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
1432 			rte_errno = EINVAL;
1433 			return -rte_errno;
1434 		}
1435 		config->reclaim_mode = tmp;
1436 	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
1437 		config->decap_en = !!tmp;
1438 	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
1439 		config->allow_duplicate_pattern = !!tmp;
1440 	} else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) {
1441 		config->fdb_def_rule = !!tmp;
1442 	} else if (strcmp(MLX5_HWS_CNT_SERVICE_CORE, key) == 0) {
1443 		config->cnt_svc.service_core = tmp;
1444 	} else if (strcmp(MLX5_HWS_CNT_CYCLE_TIME, key) == 0) {
1445 		config->cnt_svc.cycle_time = tmp;
1446 	} else if (strcmp(MLX5_REPR_MATCHING_EN, key) == 0) {
1447 		config->repr_matching = !!tmp;
1448 	}
1449 	return 0;
1450 }
1451 
1452 /**
1453  * Parse user device parameters and adjust them according to device
1454  * capabilities.
1455  *
1456  * @param sh
1457  *   Pointer to shared device context.
1458  * @param mkvlist
1459  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1460  * @param config
1461  *   Pointer to shared device configuration structure.
1462  *
1463  * @return
1464  *   0 on success, a negative errno value otherwise and rte_errno is set.
1465  */
1466 static int
1467 mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
1468 				struct mlx5_kvargs_ctrl *mkvlist,
1469 				struct mlx5_sh_config *config)
1470 {
1471 	const char **params = (const char *[]){
1472 		MLX5_TX_PP,
1473 		MLX5_TX_SKEW,
1474 		MLX5_L3_VXLAN_EN,
1475 		MLX5_VF_NL_EN,
1476 		MLX5_DV_ESW_EN,
1477 		MLX5_DV_FLOW_EN,
1478 		MLX5_DV_XMETA_EN,
1479 		MLX5_LACP_BY_USER,
1480 		MLX5_RECLAIM_MEM,
1481 		MLX5_DECAP_EN,
1482 		MLX5_ALLOW_DUPLICATE_PATTERN,
1483 		MLX5_FDB_DEFAULT_RULE_EN,
1484 		MLX5_HWS_CNT_SERVICE_CORE,
1485 		MLX5_HWS_CNT_CYCLE_TIME,
1486 		MLX5_REPR_MATCHING_EN,
1487 		NULL,
1488 	};
1489 	int ret = 0;
1490 
1491 	/* Default configuration. */
1492 	memset(config, 0, sizeof(*config));
1493 	config->vf_nl_en = 1;
1494 	config->dv_esw_en = 1;
1495 	config->dv_flow_en = 1;
1496 	config->decap_en = 1;
1497 	config->allow_duplicate_pattern = 1;
1498 	config->fdb_def_rule = 1;
1499 	config->cnt_svc.cycle_time = MLX5_CNT_SVC_CYCLE_TIME_DEFAULT;
1500 	config->cnt_svc.service_core = rte_get_main_lcore();
1501 	config->repr_matching = 1;
1502 	if (mkvlist != NULL) {
1503 		/* Process parameters. */
1504 		ret = mlx5_kvargs_process(mkvlist, params,
1505 					  mlx5_dev_args_check_handler, config);
1506 		if (ret) {
1507 			DRV_LOG(ERR, "Failed to process device arguments: %s",
1508 				strerror(rte_errno));
1509 			return -rte_errno;
1510 		}
1511 	}
1512 	/* Adjust parameters according to device capabilities. */
1513 	if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
1514 		DRV_LOG(WARNING, "DV flow is not supported.");
1515 		config->dv_flow_en = 0;
1516 	}
1517 	if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
1518 		DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
1519 		config->dv_esw_en = 0;
1520 	}
1521 	if (config->dv_esw_en && !config->dv_flow_en) {
1522 		DRV_LOG(DEBUG,
1523 			"E-Switch DV flow is supported only when DV flow is enabled.");
1524 		config->dv_esw_en = 0;
1525 	}
1526 	if (config->dv_miss_info && config->dv_esw_en)
1527 		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
1528 	if (!config->dv_esw_en &&
1529 	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1530 		DRV_LOG(WARNING,
1531 			"Metadata mode %u is not supported (no E-Switch).",
1532 			config->dv_xmeta_en);
1533 		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1534 	}
1535 	if (config->dv_flow_en != 2 && !config->repr_matching) {
1536 		DRV_LOG(DEBUG, "Disabling representor matching is valid only "
1537 			       "when HW Steering is enabled.");
1538 		config->repr_matching = 1;
1539 	}
1540 	if (config->tx_pp && !sh->dev_cap.txpp_en) {
1541 		DRV_LOG(ERR, "Packet pacing is not supported.");
1542 		rte_errno = ENODEV;
1543 		return -rte_errno;
1544 	}
1545 	if (!config->tx_pp && config->tx_skew &&
1546 	    !sh->cdev->config.hca_attr.wait_on_time) {
1547 		DRV_LOG(WARNING,
1548 			"\"tx_skew\" doesn't affect without \"tx_pp\".");
1549 	}
1550 	/* Check for LRO support. */
1551 	if (mlx5_devx_obj_ops_en(sh) && sh->cdev->config.hca_attr.lro_cap) {
1552 		/* TBD check tunnel lro caps. */
1553 		config->lro_allowed = 1;
1554 		DRV_LOG(DEBUG, "LRO is allowed.");
1555 		DRV_LOG(DEBUG,
1556 			"LRO minimal size of TCP segment required for coalescing is %d bytes.",
1557 			sh->cdev->config.hca_attr.lro_min_mss_size);
1558 	}
1559 	/*
1560 	 * If HW has bug working with tunnel packet decapsulation and scatter
1561 	 * FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
1562 	 * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1563 	 */
1564 	if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en)
1565 		config->hw_fcs_strip = 0;
1566 	else
1567 		config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
1568 	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1569 		(config->hw_fcs_strip ? "" : "not "));
1570 	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
1571 	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
1572 	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
1573 	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
1574 	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
1575 	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
1576 	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
1577 	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
1578 	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
1579 	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
1580 	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
1581 	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
1582 		config->allow_duplicate_pattern);
1583 	DRV_LOG(DEBUG, "\"fdb_def_rule_en\" is %u.", config->fdb_def_rule);
1584 	DRV_LOG(DEBUG, "\"repr_matching_en\" is %u.", config->repr_matching);
1585 	return 0;
1586 }
1587 
1588 /**
1589  * Configure realtime timestamp format.
1590  *
1591  * @param sh
1592  *   Pointer to mlx5_dev_ctx_shared object.
1593  * @param hca_attr
1594  *   Pointer to DevX HCA capabilities structure.
1595  */
1596 void
1597 mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
1598 			 struct mlx5_hca_attr *hca_attr)
1599 {
1600 	uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
1601 	uint32_t reg[dw_cnt];
1602 	int ret = ENOTSUP;
1603 
1604 	if (hca_attr->access_register_user)
1605 		ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
1606 						  MLX5_REGISTER_ID_MTUTC, 0,
1607 						  reg, dw_cnt);
1608 	if (!ret) {
1609 		uint32_t ts_mode;
1610 
1611 		/* MTUTC register is read successfully. */
1612 		ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
1613 		if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1614 			sh->dev_cap.rt_timestamp = 1;
1615 	} else {
1616 		/* Kernel does not support register reading. */
1617 		if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
1618 			sh->dev_cap.rt_timestamp = 1;
1619 	}
1620 }
1621 
1622 static void
1623 mlx5_init_hws_flow_tags_registers(struct mlx5_dev_ctx_shared *sh)
1624 {
1625 	struct mlx5_dev_registers *reg = &sh->registers;
1626 	uint32_t meta_mode = sh->config.dv_xmeta_en;
1627 	uint16_t masks = (uint16_t)sh->cdev->config.hca_attr.set_reg_c;
1628 	uint16_t unset = 0;
1629 	uint32_t i, j;
1630 
1631 	/*
1632 	 * The CAPA is global for common device but only used in net.
1633 	 * It is shared per eswitch domain.
1634 	 */
1635 	if (reg->aso_reg != REG_NON)
1636 		unset |= 1 << mlx5_regc_index(reg->aso_reg);
1637 	unset |= 1 << mlx5_regc_index(REG_C_6);
1638 	if (sh->config.dv_esw_en)
1639 		unset |= 1 << mlx5_regc_index(REG_C_0);
1640 	if (meta_mode == MLX5_XMETA_MODE_META32_HWS)
1641 		unset |= 1 << mlx5_regc_index(REG_C_1);
1642 	masks &= ~unset;
1643 	for (i = 0, j = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {
1644 		if (!!((1 << i) & masks))
1645 			reg->hw_avl_tags[j++] = mlx5_regc_value(i);
1646 	}
1647 	/*
1648 	 * Set the registers for NAT64 usage internally. REG_C_6 is always used.
1649 	 * The other 2 registers will be fetched from right to left, at least 2
1650 	 * tag registers should be available.
1651 	 */
1652 	MLX5_ASSERT(j >= (MLX5_FLOW_NAT64_REGS_MAX - 1));
1653 	reg->nat64_regs[0] = REG_C_6;
1654 	reg->nat64_regs[1] = reg->hw_avl_tags[j - 2];
1655 	reg->nat64_regs[2] = reg->hw_avl_tags[j - 1];
1656 }
1657 
1658 static void
1659 mlx5_init_aso_register(struct mlx5_dev_ctx_shared *sh)
1660 {
1661 #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT)
1662 	const struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
1663 	const struct mlx5_hca_qos_attr *qos =  &hca_attr->qos;
1664 	uint8_t reg_c_mask = qos->flow_meter_reg_c_ids & 0xfc;
1665 
1666 	if (!(qos->sup && qos->flow_meter_old && sh->config.dv_flow_en))
1667 		return;
1668 	/*
1669 	 * Meter needs two REG_C's for color match and pre-sfx
1670 	 * flow match. Here get the REG_C for color match.
1671 	 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1672 	 */
1673 	if (rte_popcount32(reg_c_mask) > 0) {
1674 		/*
1675 		 * The meter color register is used by the
1676 		 * flow-hit feature as well.
1677 		 * The flow-hit feature must use REG_C_3
1678 		 * Prefer REG_C_3 if it is available.
1679 		 */
1680 		if (reg_c_mask & (1 << mlx5_regc_index(REG_C_3)))
1681 			sh->registers.aso_reg = REG_C_3;
1682 		else
1683 			sh->registers.aso_reg =
1684 				mlx5_regc_value(ffs(reg_c_mask) - 1);
1685 	}
1686 #else
1687 	RTE_SET_USED(sh);
1688 #endif
1689 }
1690 
1691 static void
1692 mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh)
1693 {
1694 	if (sh->cdev->config.devx)
1695 		mlx5_init_aso_register(sh);
1696 	if (sh->registers.aso_reg != REG_NON) {
1697 		DRV_LOG(DEBUG, "ASO register: REG_C%d",
1698 			mlx5_regc_index(sh->registers.aso_reg));
1699 	} else {
1700 		DRV_LOG(DEBUG, "ASO register: NONE");
1701 	}
1702 	if (sh->config.dv_flow_en == 2)
1703 		mlx5_init_hws_flow_tags_registers(sh);
1704 }
1705 
1706 static struct mlx5_physical_device *
1707 mlx5_get_physical_device(struct mlx5_common_device *cdev)
1708 {
1709 	struct mlx5_physical_device *phdev;
1710 	struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
1711 
1712 	/* Search for physical device by system_image_guid. */
1713 	LIST_FOREACH(phdev, &phdev_list, next) {
1714 		if (phdev->guid == attr->system_image_guid) {
1715 			phdev->refcnt++;
1716 			return phdev;
1717 		}
1718 	}
1719 	phdev = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1720 			    sizeof(struct mlx5_physical_device),
1721 			    RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1722 	if (!phdev) {
1723 		DRV_LOG(ERR, "Physical device allocation failure.");
1724 		rte_errno = ENOMEM;
1725 		return NULL;
1726 	}
1727 	phdev->guid = attr->system_image_guid;
1728 	phdev->refcnt = 1;
1729 	LIST_INSERT_HEAD(&phdev_list, phdev, next);
1730 	DRV_LOG(DEBUG, "Physical device is created, guid=%" PRIu64 ".",
1731 		phdev->guid);
1732 	return phdev;
1733 }
1734 
1735 struct mlx5_physical_device *
1736 mlx5_get_locked_physical_device(struct mlx5_priv *priv)
1737 {
1738 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
1739 	return priv->sh->phdev;
1740 }
1741 
1742 void
1743 mlx5_unlock_physical_device(void)
1744 {
1745 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1746 }
1747 
1748 static void
1749 mlx5_physical_device_destroy(struct mlx5_physical_device *phdev)
1750 {
1751 #ifdef RTE_LIBRTE_MLX5_DEBUG
1752 	/* Check the object presence in the list. */
1753 	struct mlx5_physical_device *lphdev;
1754 
1755 	LIST_FOREACH(lphdev, &phdev_list, next)
1756 		if (lphdev == phdev)
1757 			break;
1758 	MLX5_ASSERT(lphdev);
1759 	if (lphdev != phdev) {
1760 		DRV_LOG(ERR, "Freeing non-existing physical device");
1761 		return;
1762 	}
1763 #endif
1764 	MLX5_ASSERT(phdev);
1765 	MLX5_ASSERT(phdev->refcnt);
1766 	if (--phdev->refcnt)
1767 		return;
1768 	/* Remove physical device from the global device list. */
1769 	LIST_REMOVE(phdev, next);
1770 	mlx5_free(phdev);
1771 }
1772 
1773 /**
1774  * Allocate shared device context. If there is multiport device the
1775  * master and representors will share this context, if there is single
1776  * port dedicated device, the context will be used by only given
1777  * port due to unification.
1778  *
1779  * Routine first searches the context for the specified device name,
1780  * if found the shared context assumed and reference counter is incremented.
1781  * If no context found the new one is created and initialized with specified
1782  * device context and parameters.
1783  *
1784  * @param[in] spawn
1785  *   Pointer to the device attributes (name, port, etc).
1786  * @param mkvlist
1787  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1788  *
1789  * @return
1790  *   Pointer to mlx5_dev_ctx_shared object on success,
1791  *   otherwise NULL and rte_errno is set.
1792  */
1793 struct mlx5_dev_ctx_shared *
1794 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
1795 			  struct mlx5_kvargs_ctrl *mkvlist)
1796 {
1797 	struct mlx5_dev_ctx_shared *sh;
1798 	int err = 0;
1799 	uint32_t i;
1800 
1801 	MLX5_ASSERT(spawn);
1802 	/* Secondary process should not create the shared context. */
1803 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1804 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
1805 	/* Search for IB context by device name. */
1806 	LIST_FOREACH(sh, &dev_ctx_list, next) {
1807 		if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
1808 			sh->refcnt++;
1809 			goto exit;
1810 		}
1811 	}
1812 	/* No device found, we have to create new shared context. */
1813 	MLX5_ASSERT(spawn->max_port);
1814 	sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1815 			 sizeof(struct mlx5_dev_ctx_shared) +
1816 			 spawn->max_port * sizeof(struct mlx5_dev_shared_port),
1817 			 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1818 	if (!sh) {
1819 		DRV_LOG(ERR, "Shared context allocation failure.");
1820 		rte_errno = ENOMEM;
1821 		goto exit;
1822 	}
1823 	pthread_mutex_init(&sh->txpp.mutex, NULL);
1824 	sh->numa_node = spawn->cdev->dev->numa_node;
1825 	sh->cdev = spawn->cdev;
1826 	sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
1827 	if (spawn->bond_info)
1828 		sh->bond = *spawn->bond_info;
1829 	err = mlx5_os_capabilities_prepare(sh);
1830 	if (err) {
1831 		DRV_LOG(ERR, "Fail to configure device capabilities.");
1832 		goto error;
1833 	}
1834 	err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config);
1835 	if (err) {
1836 		DRV_LOG(ERR, "Failed to process device configure: %s",
1837 			strerror(rte_errno));
1838 		goto error;
1839 	}
1840 	sh->refcnt = 1;
1841 	sh->max_port = spawn->max_port;
1842 	strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
1843 		sizeof(sh->ibdev_name) - 1);
1844 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
1845 		sizeof(sh->ibdev_path) - 1);
1846 	sh->phdev = mlx5_get_physical_device(sh->cdev);
1847 	if (!sh->phdev)
1848 		goto error;
1849 	/*
1850 	 * Setting port_id to max unallowed value means there is no interrupt
1851 	 * subhandler installed for the given port index i.
1852 	 */
1853 	for (i = 0; i < sh->max_port; i++) {
1854 		sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
1855 		sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
1856 		sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
1857 	}
1858 	if (sh->cdev->config.devx) {
1859 		sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
1860 		if (!sh->td) {
1861 			DRV_LOG(ERR, "TD allocation failure");
1862 			rte_errno = ENOMEM;
1863 			goto error;
1864 		}
1865 		if (mlx5_setup_tis(sh)) {
1866 			DRV_LOG(ERR, "TIS allocation failure");
1867 			rte_errno = ENOMEM;
1868 			goto error;
1869 		}
1870 		err = mlx5_rxtx_uars_prepare(sh);
1871 		if (err)
1872 			goto error;
1873 #ifndef RTE_ARCH_64
1874 	} else {
1875 		/* Initialize UAR access locks for 32bit implementations. */
1876 		rte_spinlock_init(&sh->uar_lock_cq);
1877 		for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1878 			rte_spinlock_init(&sh->uar_lock[i]);
1879 #endif
1880 	}
1881 	mlx5_os_dev_shared_handler_install(sh);
1882 	if (LIST_EMPTY(&dev_ctx_list)) {
1883 		err = mlx5_flow_os_init_workspace_once();
1884 		if (err)
1885 			goto error;
1886 	}
1887 	err = mlx5_flow_counters_mng_init(sh);
1888 	if (err) {
1889 		DRV_LOG(ERR, "Fail to initialize counters manage.");
1890 		goto error;
1891 	}
1892 	mlx5_flow_aging_init(sh);
1893 	mlx5_flow_ipool_create(sh);
1894 	/* Add context to the global device list. */
1895 	LIST_INSERT_HEAD(&dev_ctx_list, sh, next);
1896 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
1897 	mlx5_init_shared_dev_registers(sh);
1898 	/* Init counter pool list header and lock. */
1899 	LIST_INIT(&sh->hws_cpool_list);
1900 	rte_spinlock_init(&sh->cpool_lock);
1901 exit:
1902 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1903 	return sh;
1904 error:
1905 	err = rte_errno;
1906 	pthread_mutex_destroy(&sh->txpp.mutex);
1907 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
1908 	MLX5_ASSERT(sh);
1909 	mlx5_rxtx_uars_release(sh);
1910 	i = 0;
1911 	do {
1912 		if (sh->tis[i])
1913 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
1914 	} while (++i <= (uint32_t)sh->bond.n_port);
1915 	if (sh->td)
1916 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
1917 	if (sh->phdev)
1918 		mlx5_physical_device_destroy(sh->phdev);
1919 	mlx5_free(sh);
1920 	rte_errno = err;
1921 	return NULL;
1922 }
1923 
1924 /**
1925  * Create LWM event_channel and interrupt handle for shared device
1926  * context. All rxqs sharing the device context share the event_channel.
1927  * A callback is registered in interrupt thread to receive the LWM event.
1928  *
1929  * @param[in] priv
1930  *   Pointer to mlx5_priv instance.
1931  *
1932  * @return
1933  *   0 on success, negative with rte_errno set.
1934  */
1935 int
1936 mlx5_lwm_setup(struct mlx5_priv *priv)
1937 {
1938 	int fd_lwm;
1939 
1940 	pthread_mutex_init(&priv->sh->lwm_config_lock, NULL);
1941 	priv->sh->devx_channel_lwm = mlx5_os_devx_create_event_channel
1942 			(priv->sh->cdev->ctx,
1943 			 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
1944 	if (!priv->sh->devx_channel_lwm)
1945 		goto err;
1946 	fd_lwm = mlx5_os_get_devx_channel_fd(priv->sh->devx_channel_lwm);
1947 	priv->sh->intr_handle_lwm = mlx5_os_interrupt_handler_create
1948 		(RTE_INTR_INSTANCE_F_SHARED, true,
1949 		 fd_lwm, mlx5_dev_interrupt_handler_lwm, priv);
1950 	if (!priv->sh->intr_handle_lwm)
1951 		goto err;
1952 	return 0;
1953 err:
1954 	if (priv->sh->devx_channel_lwm) {
1955 		mlx5_os_devx_destroy_event_channel
1956 			(priv->sh->devx_channel_lwm);
1957 		priv->sh->devx_channel_lwm = NULL;
1958 	}
1959 	pthread_mutex_destroy(&priv->sh->lwm_config_lock);
1960 	return -rte_errno;
1961 }
1962 
1963 /**
1964  * Destroy LWM event_channel and interrupt handle for shared device
1965  * context before free this context. The interrupt handler is also
1966  * unregistered.
1967  *
1968  * @param[in] sh
1969  *   Pointer to shared device context.
1970  */
1971 void
1972 mlx5_lwm_unset(struct mlx5_dev_ctx_shared *sh)
1973 {
1974 	if (sh->intr_handle_lwm) {
1975 		mlx5_os_interrupt_handler_destroy(sh->intr_handle_lwm,
1976 			mlx5_dev_interrupt_handler_lwm, (void *)-1);
1977 		sh->intr_handle_lwm = NULL;
1978 	}
1979 	if (sh->devx_channel_lwm) {
1980 		mlx5_os_devx_destroy_event_channel
1981 			(sh->devx_channel_lwm);
1982 		sh->devx_channel_lwm = NULL;
1983 	}
1984 	pthread_mutex_destroy(&sh->lwm_config_lock);
1985 }
1986 
1987 /**
1988  * Free shared IB device context. Decrement counter and if zero free
1989  * all allocated resources and close handles.
1990  *
1991  * @param[in] sh
1992  *   Pointer to mlx5_dev_ctx_shared object to free
1993  */
1994 void
1995 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
1996 {
1997 	int ret;
1998 	int i = 0;
1999 
2000 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
2001 #ifdef RTE_LIBRTE_MLX5_DEBUG
2002 	/* Check the object presence in the list. */
2003 	struct mlx5_dev_ctx_shared *lctx;
2004 
2005 	LIST_FOREACH(lctx, &dev_ctx_list, next)
2006 		if (lctx == sh)
2007 			break;
2008 	MLX5_ASSERT(lctx);
2009 	if (lctx != sh) {
2010 		DRV_LOG(ERR, "Freeing non-existing shared IB context");
2011 		goto exit;
2012 	}
2013 #endif
2014 	MLX5_ASSERT(sh);
2015 	MLX5_ASSERT(sh->refcnt);
2016 	/* Secondary process should not free the shared context. */
2017 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2018 	if (--sh->refcnt)
2019 		goto exit;
2020 	/* Stop watching for mempool events and unregister all mempools. */
2021 	if (!sh->cdev->config.mr_mempool_reg_en) {
2022 		ret = rte_mempool_event_callback_unregister
2023 				(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
2024 		if (ret == 0)
2025 			rte_mempool_walk
2026 			     (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
2027 	}
2028 	/* Remove context from the global device list. */
2029 	LIST_REMOVE(sh, next);
2030 	/* Release resources on the last device removal. */
2031 	if (LIST_EMPTY(&dev_ctx_list)) {
2032 		mlx5_os_net_cleanup();
2033 		mlx5_flow_os_release_workspace();
2034 	}
2035 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
2036 	if (sh->flex_parsers_dv) {
2037 		mlx5_list_destroy(sh->flex_parsers_dv);
2038 		sh->flex_parsers_dv = NULL;
2039 	}
2040 	/*
2041 	 *  Ensure there is no async event handler installed.
2042 	 *  Only primary process handles async device events.
2043 	 **/
2044 	mlx5_flow_counters_mng_close(sh);
2045 	if (sh->ct_mng)
2046 		mlx5_flow_aso_ct_mng_close(sh);
2047 	if (sh->aso_age_mng) {
2048 		mlx5_flow_aso_age_mng_close(sh);
2049 		sh->aso_age_mng = NULL;
2050 	}
2051 	if (sh->mtrmng)
2052 		mlx5_aso_flow_mtrs_mng_close(sh);
2053 	mlx5_flow_ipool_destroy(sh);
2054 	mlx5_os_dev_shared_handler_uninstall(sh);
2055 	mlx5_rxtx_uars_release(sh);
2056 	do {
2057 		if (sh->tis[i])
2058 			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
2059 	} while (++i <= sh->bond.n_port);
2060 	if (sh->td)
2061 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
2062 	MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
2063 	pthread_mutex_destroy(&sh->txpp.mutex);
2064 	mlx5_lwm_unset(sh);
2065 	mlx5_physical_device_destroy(sh->phdev);
2066 	mlx5_free(sh);
2067 	return;
2068 exit:
2069 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
2070 }
2071 
2072 /**
2073  * Destroy table hash list.
2074  *
2075  * @param[in] priv
2076  *   Pointer to the private device data structure.
2077  */
2078 void
2079 mlx5_free_table_hash_list(struct mlx5_priv *priv)
2080 {
2081 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2082 	struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
2083 				   &sh->groups : &sh->flow_tbls;
2084 	if (*tbls == NULL)
2085 		return;
2086 	mlx5_hlist_destroy(*tbls);
2087 	*tbls = NULL;
2088 }
2089 
2090 #ifdef HAVE_MLX5_HWS_SUPPORT
2091 /**
2092  * Allocate HW steering group hash list.
2093  *
2094  * @param[in] priv
2095  *   Pointer to the private device data structure.
2096  */
2097 static int
2098 mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
2099 {
2100 	int err = 0;
2101 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2102 	char s[MLX5_NAME_SIZE];
2103 
2104 	MLX5_ASSERT(sh);
2105 	snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
2106 	sh->groups = mlx5_hlist_create
2107 			(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
2108 			 false, true, sh,
2109 			 flow_hw_grp_create_cb,
2110 			 flow_hw_grp_match_cb,
2111 			 flow_hw_grp_remove_cb,
2112 			 flow_hw_grp_clone_cb,
2113 			 flow_hw_grp_clone_free_cb);
2114 	if (!sh->groups) {
2115 		DRV_LOG(ERR, "flow groups with hash creation failed.");
2116 		err = ENOMEM;
2117 	}
2118 	return err;
2119 }
2120 #endif
2121 
2122 
2123 /**
2124  * Initialize flow table hash list and create the root tables entry
2125  * for each domain.
2126  *
2127  * @param[in] priv
2128  *   Pointer to the private device data structure.
2129  *
2130  * @return
2131  *   Zero on success, positive error code otherwise.
2132  */
2133 int
2134 mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
2135 {
2136 	int err = 0;
2137 
2138 	/* Tables are only used in DV and DR modes. */
2139 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
2140 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2141 	char s[MLX5_NAME_SIZE];
2142 
2143 #ifdef HAVE_MLX5_HWS_SUPPORT
2144 	if (priv->sh->config.dv_flow_en == 2)
2145 		return mlx5_alloc_hw_group_hash_list(priv);
2146 #endif
2147 	MLX5_ASSERT(sh);
2148 	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
2149 	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
2150 					  false, true, sh,
2151 					  flow_dv_tbl_create_cb,
2152 					  flow_dv_tbl_match_cb,
2153 					  flow_dv_tbl_remove_cb,
2154 					  flow_dv_tbl_clone_cb,
2155 					  flow_dv_tbl_clone_free_cb);
2156 	if (!sh->flow_tbls) {
2157 		DRV_LOG(ERR, "flow tables with hash creation failed.");
2158 		err = ENOMEM;
2159 		return err;
2160 	}
2161 #ifndef HAVE_MLX5DV_DR
2162 	struct rte_flow_error error;
2163 	struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
2164 
2165 	/*
2166 	 * In case we have not DR support, the zero tables should be created
2167 	 * because DV expect to see them even if they cannot be created by
2168 	 * RDMA-CORE.
2169 	 */
2170 	if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0,
2171 		NULL, 0, 1, 0, &error) ||
2172 	    !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0,
2173 		NULL, 0, 1, 0, &error) ||
2174 	    !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0,
2175 		NULL, 0, 1, 0, &error)) {
2176 		err = ENOMEM;
2177 		goto error;
2178 	}
2179 	return err;
2180 error:
2181 	mlx5_free_table_hash_list(priv);
2182 #endif /* HAVE_MLX5DV_DR */
2183 #endif
2184 	return err;
2185 }
2186 
2187 /**
2188  * Retrieve integer value from environment variable.
2189  *
2190  * @param[in] name
2191  *   Environment variable name.
2192  *
2193  * @return
2194  *   Integer value, 0 if the variable is not set.
2195  */
2196 int
2197 mlx5_getenv_int(const char *name)
2198 {
2199 	const char *val = getenv(name);
2200 
2201 	if (val == NULL)
2202 		return 0;
2203 	return atoi(val);
2204 }
2205 
2206 /**
2207  * DPDK callback to add udp tunnel port
2208  *
2209  * @param[in] dev
2210  *   A pointer to eth_dev
2211  * @param[in] udp_tunnel
2212  *   A pointer to udp tunnel
2213  *
2214  * @return
2215  *   0 on valid udp ports and tunnels, -ENOTSUP otherwise.
2216  */
2217 int
2218 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
2219 			 struct rte_eth_udp_tunnel *udp_tunnel)
2220 {
2221 	MLX5_ASSERT(udp_tunnel != NULL);
2222 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
2223 	    udp_tunnel->udp_port == 4789)
2224 		return 0;
2225 	if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
2226 	    udp_tunnel->udp_port == 4790)
2227 		return 0;
2228 	return -ENOTSUP;
2229 }
2230 
2231 /**
2232  * Initialize process private data structure.
2233  *
2234  * @param dev
2235  *   Pointer to Ethernet device structure.
2236  *
2237  * @return
2238  *   0 on success, a negative errno value otherwise and rte_errno is set.
2239  */
2240 int
2241 mlx5_proc_priv_init(struct rte_eth_dev *dev)
2242 {
2243 	struct mlx5_priv *priv = dev->data->dev_private;
2244 	struct mlx5_proc_priv *ppriv;
2245 	size_t ppriv_size;
2246 
2247 	mlx5_proc_priv_uninit(dev);
2248 	/*
2249 	 * UAR register table follows the process private structure. BlueFlame
2250 	 * registers for Tx queues are stored in the table.
2251 	 */
2252 	ppriv_size = sizeof(struct mlx5_proc_priv) +
2253 		     priv->txqs_n * sizeof(struct mlx5_uar_data);
2254 	ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
2255 			    RTE_CACHE_LINE_SIZE, dev->device->numa_node);
2256 	if (!ppriv) {
2257 		rte_errno = ENOMEM;
2258 		return -rte_errno;
2259 	}
2260 	ppriv->uar_table_sz = priv->txqs_n;
2261 	dev->process_private = ppriv;
2262 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2263 		priv->sh->pppriv = ppriv;
2264 	return 0;
2265 }
2266 
2267 /**
2268  * Un-initialize process private data structure.
2269  *
2270  * @param dev
2271  *   Pointer to Ethernet device structure.
2272  */
2273 void
2274 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
2275 {
2276 	struct mlx5_proc_priv *ppriv = dev->process_private;
2277 
2278 	if (!ppriv)
2279 		return;
2280 	if (ppriv->hca_bar)
2281 		mlx5_txpp_unmap_hca_bar(dev);
2282 	mlx5_free(dev->process_private);
2283 	dev->process_private = NULL;
2284 }
2285 
2286 /**
2287  * DPDK callback to close the device.
2288  *
2289  * Destroy all queues and objects, free memory.
2290  *
2291  * @param dev
2292  *   Pointer to Ethernet device structure.
2293  */
2294 int
2295 mlx5_dev_close(struct rte_eth_dev *dev)
2296 {
2297 	struct mlx5_priv *priv = dev->data->dev_private;
2298 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2299 	unsigned int i;
2300 	int ret;
2301 
2302 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2303 		/* Check if process_private released. */
2304 		if (!dev->process_private)
2305 			return 0;
2306 		mlx5_tx_uar_uninit_secondary(dev);
2307 		mlx5_proc_priv_uninit(dev);
2308 		rte_eth_dev_release_port(dev);
2309 		return 0;
2310 	}
2311 	if (!sh)
2312 		return 0;
2313 	if (priv->shared_refcnt) {
2314 		DRV_LOG(ERR, "port %u is shared host in use (%u)",
2315 			dev->data->port_id, priv->shared_refcnt);
2316 		rte_errno = EBUSY;
2317 		return -EBUSY;
2318 	}
2319 #ifdef HAVE_MLX5_HWS_SUPPORT
2320 	/* Check if shared GENEVE options created on context being closed. */
2321 	ret = mlx5_geneve_tlv_options_check_busy(priv);
2322 	if (ret) {
2323 		DRV_LOG(ERR, "port %u maintains shared GENEVE TLV options",
2324 			dev->data->port_id);
2325 		return ret;
2326 	}
2327 #endif
2328 	DRV_LOG(DEBUG, "port %u closing device \"%s\"",
2329 		dev->data->port_id,
2330 		((priv->sh->cdev->ctx != NULL) ?
2331 		mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
2332 	/*
2333 	 * If default mreg copy action is removed at the stop stage,
2334 	 * the search will return none and nothing will be done anymore.
2335 	 */
2336 	if (priv->sh->config.dv_flow_en != 2)
2337 		mlx5_flow_stop_default(dev);
2338 	mlx5_traffic_disable(dev);
2339 	/*
2340 	 * If all the flows are already flushed in the device stop stage,
2341 	 * then this will return directly without any action.
2342 	 */
2343 	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
2344 	mlx5_action_handle_flush(dev);
2345 	mlx5_flow_meter_flush(dev, NULL);
2346 	/* Prevent crashes when queues are still in use. */
2347 	dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
2348 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
2349 	rte_wmb();
2350 	/* Disable datapath on secondary process. */
2351 	mlx5_mp_os_req_stop_rxtx(dev);
2352 	/* Free the eCPRI flex parser resource. */
2353 	mlx5_flex_parser_ecpri_release(dev);
2354 	mlx5_flex_item_port_cleanup(dev);
2355 	mlx5_indirect_list_handles_release(dev);
2356 #ifdef HAVE_MLX5_HWS_SUPPORT
2357 	flow_hw_destroy_vport_action(dev);
2358 	flow_hw_resource_release(dev);
2359 	flow_hw_clear_port_info(dev);
2360 	if (priv->tlv_options != NULL) {
2361 		/* Free the GENEVE TLV parser resource. */
2362 		claim_zero(mlx5_geneve_tlv_options_destroy(priv->tlv_options, sh->phdev));
2363 		priv->tlv_options = NULL;
2364 	}
2365 #endif
2366 	if (priv->rxq_privs != NULL) {
2367 		/* XXX race condition if mlx5_rx_burst() is still running. */
2368 		rte_delay_us_sleep(1000);
2369 		for (i = 0; (i != priv->rxqs_n); ++i)
2370 			mlx5_rxq_release(dev, i);
2371 		priv->rxqs_n = 0;
2372 		mlx5_free(priv->rxq_privs);
2373 		priv->rxq_privs = NULL;
2374 	}
2375 	if (priv->txqs != NULL && dev->data->tx_queues != NULL) {
2376 		/* XXX race condition if mlx5_tx_burst() is still running. */
2377 		rte_delay_us_sleep(1000);
2378 		for (i = 0; (i != priv->txqs_n); ++i)
2379 			mlx5_txq_release(dev, i);
2380 		priv->txqs_n = 0;
2381 		priv->txqs = NULL;
2382 	}
2383 	mlx5_proc_priv_uninit(dev);
2384 	if (priv->q_counters) {
2385 		mlx5_devx_cmd_destroy(priv->q_counters);
2386 		priv->q_counters = NULL;
2387 	}
2388 	if (priv->drop_queue.hrxq)
2389 		mlx5_drop_action_destroy(dev);
2390 	if (priv->mreg_cp_tbl)
2391 		mlx5_hlist_destroy(priv->mreg_cp_tbl);
2392 	mlx5_mprq_free_mp(dev);
2393 	mlx5_os_free_shared_dr(priv);
2394 	if (priv->rss_conf.rss_key != NULL)
2395 		mlx5_free(priv->rss_conf.rss_key);
2396 	if (priv->reta_idx != NULL)
2397 		mlx5_free(priv->reta_idx);
2398 	if (priv->sh->dev_cap.vf)
2399 		mlx5_os_mac_addr_flush(dev);
2400 	if (priv->nl_socket_route >= 0)
2401 		close(priv->nl_socket_route);
2402 	if (priv->nl_socket_rdma >= 0)
2403 		close(priv->nl_socket_rdma);
2404 	if (priv->vmwa_context)
2405 		mlx5_vlan_vmwa_exit(priv->vmwa_context);
2406 	ret = mlx5_hrxq_verify(dev);
2407 	if (ret)
2408 		DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
2409 			dev->data->port_id);
2410 	ret = mlx5_ind_table_obj_verify(dev);
2411 	if (ret)
2412 		DRV_LOG(WARNING, "port %u some indirection table still remain",
2413 			dev->data->port_id);
2414 	ret = mlx5_rxq_obj_verify(dev);
2415 	if (ret)
2416 		DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
2417 			dev->data->port_id);
2418 	ret = mlx5_ext_rxq_verify(dev);
2419 	if (ret)
2420 		DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
2421 			dev->data->port_id);
2422 	ret = mlx5_rxq_verify(dev);
2423 	if (ret)
2424 		DRV_LOG(WARNING, "port %u some Rx queues still remain",
2425 			dev->data->port_id);
2426 	ret = mlx5_txq_obj_verify(dev);
2427 	if (ret)
2428 		DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
2429 			dev->data->port_id);
2430 	ret = mlx5_txq_verify(dev);
2431 	if (ret)
2432 		DRV_LOG(WARNING, "port %u some Tx queues still remain",
2433 			dev->data->port_id);
2434 	ret = mlx5_flow_verify(dev);
2435 	if (ret)
2436 		DRV_LOG(WARNING, "port %u some flows still remain",
2437 			dev->data->port_id);
2438 	if (priv->hrxqs)
2439 		mlx5_list_destroy(priv->hrxqs);
2440 	mlx5_free(priv->ext_rxqs);
2441 	priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
2442 	/*
2443 	 * The interrupt handler port id must be reset before priv is reset
2444 	 * since 'mlx5_dev_interrupt_nl_cb' uses priv.
2445 	 */
2446 	rte_io_wmb();
2447 	/*
2448 	 * Free the shared context in last turn, because the cleanup
2449 	 * routines above may use some shared fields, like
2450 	 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
2451 	 * ifindex if Netlink fails.
2452 	 */
2453 	mlx5_free_shared_dev_ctx(priv->sh);
2454 	if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
2455 		unsigned int c = 0;
2456 		uint16_t port_id;
2457 
2458 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
2459 			struct mlx5_priv *opriv =
2460 				rte_eth_devices[port_id].data->dev_private;
2461 
2462 			if (!opriv ||
2463 			    opriv->domain_id != priv->domain_id ||
2464 			    &rte_eth_devices[port_id] == dev)
2465 				continue;
2466 			++c;
2467 			break;
2468 		}
2469 		if (!c)
2470 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
2471 	}
2472 	memset(priv, 0, sizeof(*priv));
2473 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2474 	/*
2475 	 * Reset mac_addrs to NULL such that it is not freed as part of
2476 	 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
2477 	 * it is freed when dev_private is freed.
2478 	 */
2479 	dev->data->mac_addrs = NULL;
2480 	return 0;
2481 }
2482 
2483 const struct eth_dev_ops mlx5_dev_ops = {
2484 	.dev_configure = mlx5_dev_configure,
2485 	.dev_start = mlx5_dev_start,
2486 	.dev_stop = mlx5_dev_stop,
2487 	.dev_set_link_down = mlx5_set_link_down,
2488 	.dev_set_link_up = mlx5_set_link_up,
2489 	.dev_close = mlx5_dev_close,
2490 	.promiscuous_enable = mlx5_promiscuous_enable,
2491 	.promiscuous_disable = mlx5_promiscuous_disable,
2492 	.allmulticast_enable = mlx5_allmulticast_enable,
2493 	.allmulticast_disable = mlx5_allmulticast_disable,
2494 	.link_update = mlx5_link_update,
2495 	.stats_get = mlx5_stats_get,
2496 	.stats_reset = mlx5_stats_reset,
2497 	.xstats_get = mlx5_xstats_get,
2498 	.xstats_reset = mlx5_xstats_reset,
2499 	.xstats_get_names = mlx5_xstats_get_names,
2500 	.fw_version_get = mlx5_fw_version_get,
2501 	.dev_infos_get = mlx5_dev_infos_get,
2502 	.representor_info_get = mlx5_representor_info_get,
2503 	.read_clock = mlx5_txpp_read_clock,
2504 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2505 	.vlan_filter_set = mlx5_vlan_filter_set,
2506 	.rx_queue_setup = mlx5_rx_queue_setup,
2507 	.rx_queue_avail_thresh_set = mlx5_rx_queue_lwm_set,
2508 	.rx_queue_avail_thresh_query = mlx5_rx_queue_lwm_query,
2509 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2510 	.tx_queue_setup = mlx5_tx_queue_setup,
2511 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2512 	.rx_queue_release = mlx5_rx_queue_release,
2513 	.tx_queue_release = mlx5_tx_queue_release,
2514 	.rx_queue_start = mlx5_rx_queue_start,
2515 	.rx_queue_stop = mlx5_rx_queue_stop,
2516 	.tx_queue_start = mlx5_tx_queue_start,
2517 	.tx_queue_stop = mlx5_tx_queue_stop,
2518 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2519 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2520 	.mac_addr_remove = mlx5_mac_addr_remove,
2521 	.mac_addr_add = mlx5_mac_addr_add,
2522 	.mac_addr_set = mlx5_mac_addr_set,
2523 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2524 	.mtu_set = mlx5_dev_set_mtu,
2525 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2526 	.vlan_offload_set = mlx5_vlan_offload_set,
2527 	.reta_update = mlx5_dev_rss_reta_update,
2528 	.reta_query = mlx5_dev_rss_reta_query,
2529 	.rss_hash_update = mlx5_rss_hash_update,
2530 	.rss_hash_conf_get = mlx5_rss_hash_conf_get,
2531 	.flow_ops_get = mlx5_flow_ops_get,
2532 	.rxq_info_get = mlx5_rxq_info_get,
2533 	.txq_info_get = mlx5_txq_info_get,
2534 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2535 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2536 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2537 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2538 	.is_removed = mlx5_is_removed,
2539 	.udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
2540 	.get_module_info = mlx5_get_module_info,
2541 	.get_module_eeprom = mlx5_get_module_eeprom,
2542 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2543 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2544 	.hairpin_bind = mlx5_hairpin_bind,
2545 	.hairpin_unbind = mlx5_hairpin_unbind,
2546 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2547 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2548 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2549 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2550 	.get_monitor_addr = mlx5_get_monitor_addr,
2551 	.count_aggr_ports = mlx5_count_aggr_ports,
2552 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
2553 	.rx_metadata_negotiate = mlx5_flow_rx_metadata_negotiate,
2554 };
2555 
2556 /* Available operations from secondary process. */
2557 const struct eth_dev_ops mlx5_dev_sec_ops = {
2558 	.stats_get = mlx5_stats_get,
2559 	.stats_reset = mlx5_stats_reset,
2560 	.xstats_get = mlx5_xstats_get,
2561 	.xstats_reset = mlx5_xstats_reset,
2562 	.xstats_get_names = mlx5_xstats_get_names,
2563 	.fw_version_get = mlx5_fw_version_get,
2564 	.dev_infos_get = mlx5_dev_infos_get,
2565 	.representor_info_get = mlx5_representor_info_get,
2566 	.read_clock = mlx5_txpp_read_clock,
2567 	.rx_queue_start = mlx5_rx_queue_start,
2568 	.rx_queue_stop = mlx5_rx_queue_stop,
2569 	.tx_queue_start = mlx5_tx_queue_start,
2570 	.tx_queue_stop = mlx5_tx_queue_stop,
2571 	.rxq_info_get = mlx5_rxq_info_get,
2572 	.txq_info_get = mlx5_txq_info_get,
2573 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2574 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2575 	.get_module_info = mlx5_get_module_info,
2576 	.get_module_eeprom = mlx5_get_module_eeprom,
2577 	.count_aggr_ports = mlx5_count_aggr_ports,
2578 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
2579 	.rx_metadata_negotiate = mlx5_flow_rx_metadata_negotiate,
2580 };
2581 
2582 /* Available operations in flow isolated mode. */
2583 const struct eth_dev_ops mlx5_dev_ops_isolate = {
2584 	.dev_configure = mlx5_dev_configure,
2585 	.dev_start = mlx5_dev_start,
2586 	.dev_stop = mlx5_dev_stop,
2587 	.dev_set_link_down = mlx5_set_link_down,
2588 	.dev_set_link_up = mlx5_set_link_up,
2589 	.dev_close = mlx5_dev_close,
2590 	.promiscuous_enable = mlx5_promiscuous_enable,
2591 	.promiscuous_disable = mlx5_promiscuous_disable,
2592 	.allmulticast_enable = mlx5_allmulticast_enable,
2593 	.allmulticast_disable = mlx5_allmulticast_disable,
2594 	.link_update = mlx5_link_update,
2595 	.stats_get = mlx5_stats_get,
2596 	.stats_reset = mlx5_stats_reset,
2597 	.xstats_get = mlx5_xstats_get,
2598 	.xstats_reset = mlx5_xstats_reset,
2599 	.xstats_get_names = mlx5_xstats_get_names,
2600 	.fw_version_get = mlx5_fw_version_get,
2601 	.dev_infos_get = mlx5_dev_infos_get,
2602 	.representor_info_get = mlx5_representor_info_get,
2603 	.read_clock = mlx5_txpp_read_clock,
2604 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2605 	.vlan_filter_set = mlx5_vlan_filter_set,
2606 	.rx_queue_setup = mlx5_rx_queue_setup,
2607 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2608 	.tx_queue_setup = mlx5_tx_queue_setup,
2609 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2610 	.rx_queue_release = mlx5_rx_queue_release,
2611 	.tx_queue_release = mlx5_tx_queue_release,
2612 	.rx_queue_start = mlx5_rx_queue_start,
2613 	.rx_queue_stop = mlx5_rx_queue_stop,
2614 	.tx_queue_start = mlx5_tx_queue_start,
2615 	.tx_queue_stop = mlx5_tx_queue_stop,
2616 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2617 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2618 	.mac_addr_remove = mlx5_mac_addr_remove,
2619 	.mac_addr_add = mlx5_mac_addr_add,
2620 	.mac_addr_set = mlx5_mac_addr_set,
2621 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2622 	.mtu_set = mlx5_dev_set_mtu,
2623 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2624 	.vlan_offload_set = mlx5_vlan_offload_set,
2625 	.flow_ops_get = mlx5_flow_ops_get,
2626 	.rxq_info_get = mlx5_rxq_info_get,
2627 	.txq_info_get = mlx5_txq_info_get,
2628 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2629 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2630 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2631 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2632 	.is_removed = mlx5_is_removed,
2633 	.get_module_info = mlx5_get_module_info,
2634 	.get_module_eeprom = mlx5_get_module_eeprom,
2635 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2636 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2637 	.hairpin_bind = mlx5_hairpin_bind,
2638 	.hairpin_unbind = mlx5_hairpin_unbind,
2639 	.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2640 	.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2641 	.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2642 	.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2643 	.get_monitor_addr = mlx5_get_monitor_addr,
2644 	.count_aggr_ports = mlx5_count_aggr_ports,
2645 	.map_aggr_tx_affinity = mlx5_map_aggr_tx_affinity,
2646 };
2647 
2648 /**
2649  * Verify and store value for device argument.
2650  *
2651  * @param[in] key
2652  *   Key argument to verify.
2653  * @param[in] val
2654  *   Value associated with key.
2655  * @param opaque
2656  *   User data.
2657  *
2658  * @return
2659  *   0 on success, a negative errno value otherwise and rte_errno is set.
2660  */
2661 static int
2662 mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
2663 {
2664 	struct mlx5_port_config *config = opaque;
2665 	signed long tmp;
2666 
2667 	/* No-op, port representors are processed in mlx5_dev_spawn(). */
2668 	if (!strcmp(MLX5_REPRESENTOR, key))
2669 		return 0;
2670 	errno = 0;
2671 	tmp = strtol(val, NULL, 0);
2672 	if (errno) {
2673 		rte_errno = errno;
2674 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
2675 		return -rte_errno;
2676 	}
2677 	if (tmp < 0) {
2678 		/* Negative values are acceptable for some keys only. */
2679 		rte_errno = EINVAL;
2680 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
2681 		return -rte_errno;
2682 	}
2683 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
2684 		if ((tmp & ~MLX5_RXQ_ENH_CQE_COMP_MASK) >
2685 		    MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
2686 			DRV_LOG(ERR, "invalid CQE compression "
2687 				     "format parameter");
2688 			rte_errno = EINVAL;
2689 			return -rte_errno;
2690 		}
2691 		config->cqe_comp = !!tmp;
2692 		config->cqe_comp_fmt = tmp & ~MLX5_RXQ_ENH_CQE_COMP_MASK;
2693 		config->enh_cqe_comp = !!(tmp & MLX5_RXQ_ENH_CQE_COMP_MASK);
2694 	} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
2695 		config->hw_padding = !!tmp;
2696 	} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
2697 		config->mprq.enabled = !!tmp;
2698 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
2699 		config->mprq.log_stride_num = tmp;
2700 	} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
2701 		config->mprq.log_stride_size = tmp;
2702 	} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
2703 		config->mprq.max_memcpy_len = tmp;
2704 	} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
2705 		config->mprq.min_rxqs_num = tmp;
2706 	} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
2707 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2708 				 " converted to txq_inline_max", key);
2709 		config->txq_inline_max = tmp;
2710 	} else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
2711 		config->txq_inline_max = tmp;
2712 	} else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
2713 		config->txq_inline_min = tmp;
2714 	} else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
2715 		config->txq_inline_mpw = tmp;
2716 	} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
2717 		config->txqs_inline = tmp;
2718 	} else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
2719 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2720 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
2721 		config->mps = !!tmp;
2722 	} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
2723 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2724 	} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
2725 		DRV_LOG(WARNING, "%s: deprecated parameter,"
2726 				 " converted to txq_inline_mpw", key);
2727 		config->txq_inline_mpw = tmp;
2728 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
2729 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
2730 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
2731 		config->rx_vec_en = !!tmp;
2732 	} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
2733 		config->max_dump_files_num = tmp;
2734 	} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
2735 		config->lro_timeout = tmp;
2736 	} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
2737 		config->log_hp_size = tmp;
2738 	} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
2739 		config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
2740 		config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
2741 	}
2742 	return 0;
2743 }
2744 
2745 /**
2746  * Parse user port parameters and adjust them according to device capabilities.
2747  *
2748  * @param priv
2749  *   Pointer to shared device context.
2750  * @param mkvlist
2751  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2752  * @param config
2753  *   Pointer to port configuration structure.
2754  *
2755  * @return
2756  *   0 on success, a negative errno value otherwise and rte_errno is set.
2757  */
2758 int
2759 mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
2760 		      struct mlx5_port_config *config)
2761 {
2762 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
2763 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
2764 	bool devx = priv->sh->cdev->config.devx;
2765 	const char **params = (const char *[]){
2766 		MLX5_RXQ_CQE_COMP_EN,
2767 		MLX5_RXQ_PKT_PAD_EN,
2768 		MLX5_RX_MPRQ_EN,
2769 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2770 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2771 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2772 		MLX5_RXQS_MIN_MPRQ,
2773 		MLX5_TXQ_INLINE,
2774 		MLX5_TXQ_INLINE_MIN,
2775 		MLX5_TXQ_INLINE_MAX,
2776 		MLX5_TXQ_INLINE_MPW,
2777 		MLX5_TXQS_MIN_INLINE,
2778 		MLX5_TXQS_MAX_VEC,
2779 		MLX5_TXQ_MPW_EN,
2780 		MLX5_TXQ_MPW_HDR_DSEG_EN,
2781 		MLX5_TXQ_MAX_INLINE_LEN,
2782 		MLX5_TX_VEC_EN,
2783 		MLX5_RX_VEC_EN,
2784 		MLX5_REPRESENTOR,
2785 		MLX5_MAX_DUMP_FILES_NUM,
2786 		MLX5_LRO_TIMEOUT_USEC,
2787 		MLX5_HP_BUF_SIZE,
2788 		MLX5_DELAY_DROP,
2789 		NULL,
2790 	};
2791 	int ret = 0;
2792 
2793 	/* Default configuration. */
2794 	memset(config, 0, sizeof(*config));
2795 	config->mps = MLX5_ARG_UNSET;
2796 	config->cqe_comp = 1;
2797 	config->rx_vec_en = 1;
2798 	config->txq_inline_max = MLX5_ARG_UNSET;
2799 	config->txq_inline_min = MLX5_ARG_UNSET;
2800 	config->txq_inline_mpw = MLX5_ARG_UNSET;
2801 	config->txqs_inline = MLX5_ARG_UNSET;
2802 	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2803 	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2804 	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
2805 	config->mprq.log_stride_size = MLX5_ARG_UNSET;
2806 	config->log_hp_size = MLX5_ARG_UNSET;
2807 	config->std_delay_drop = 0;
2808 	config->hp_delay_drop = 0;
2809 	if (mkvlist != NULL) {
2810 		/* Process parameters. */
2811 		ret = mlx5_kvargs_process(mkvlist, params,
2812 					  mlx5_port_args_check_handler, config);
2813 		if (ret) {
2814 			DRV_LOG(ERR, "Failed to process port arguments: %s",
2815 				strerror(rte_errno));
2816 			return -rte_errno;
2817 		}
2818 	}
2819 	/* Adjust parameters according to device capabilities. */
2820 	if (config->hw_padding && !dev_cap->hw_padding) {
2821 		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
2822 		config->hw_padding = 0;
2823 	} else if (config->hw_padding) {
2824 		DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
2825 	}
2826 	/*
2827 	 * MPW is disabled by default, while the Enhanced MPW is enabled
2828 	 * by default.
2829 	 */
2830 	if (config->mps == MLX5_ARG_UNSET)
2831 		config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
2832 			      MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
2833 	else
2834 		config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
2835 	DRV_LOG(INFO, "%sMPS is %s",
2836 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
2837 		config->mps == MLX5_MPW ? "legacy " : "",
2838 		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
2839 	if (priv->sh->config.lro_allowed) {
2840 		/*
2841 		 * If LRO timeout is not configured by application,
2842 		 * use the minimal supported value.
2843 		 */
2844 		if (!config->lro_timeout)
2845 			config->lro_timeout =
2846 				       hca_attr->lro_timer_supported_periods[0];
2847 		DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
2848 			config->lro_timeout);
2849 	}
2850 	if (config->cqe_comp && !dev_cap->cqe_comp) {
2851 		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
2852 		config->cqe_comp = 0;
2853 	}
2854 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
2855 	    (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
2856 		DRV_LOG(WARNING,
2857 			"Flow Tag CQE compression format isn't supported.");
2858 		config->cqe_comp = 0;
2859 	}
2860 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
2861 	    (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
2862 		DRV_LOG(WARNING,
2863 			"L3/L4 Header CQE compression format isn't supported.");
2864 		config->cqe_comp = 0;
2865 	}
2866 	if (config->enh_cqe_comp && !hca_attr->enhanced_cqe_compression) {
2867 		DRV_LOG(WARNING,
2868 			"Enhanced CQE compression isn't supported.");
2869 		config->enh_cqe_comp = 0;
2870 	}
2871 	DRV_LOG(DEBUG, "%sRx CQE compression is %ssupported.",
2872 		config->enh_cqe_comp ? "Enhanced " : "",
2873 		config->cqe_comp ? "" : "not ");
2874 	if ((config->std_delay_drop || config->hp_delay_drop) &&
2875 	    !dev_cap->rq_delay_drop_en) {
2876 		config->std_delay_drop = 0;
2877 		config->hp_delay_drop = 0;
2878 		DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
2879 			priv->dev_port);
2880 	}
2881 	if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
2882 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
2883 		config->mprq.enabled = 0;
2884 	}
2885 	if (config->max_dump_files_num == 0)
2886 		config->max_dump_files_num = 128;
2887 	/* Detect minimal data bytes to inline. */
2888 	mlx5_set_min_inline(priv);
2889 	DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
2890 		config->hw_vlan_insert ? "" : "not ");
2891 	DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
2892 	DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
2893 	DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
2894 	DRV_LOG(DEBUG, "\"enh_cqe_comp\" is %u.", config->enh_cqe_comp);
2895 	DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
2896 	DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
2897 		config->std_delay_drop);
2898 	DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
2899 	DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
2900 		config->max_dump_files_num);
2901 	DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
2902 	DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
2903 	DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
2904 		config->mprq.log_stride_num);
2905 	DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
2906 		config->mprq.log_stride_size);
2907 	DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
2908 		config->mprq.max_memcpy_len);
2909 	DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
2910 	DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
2911 	DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
2912 	DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
2913 	DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
2914 	DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
2915 	DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
2916 	return 0;
2917 }
2918 
2919 /**
2920  * Print the key for device argument.
2921  *
2922  * It is "dummy" handler whose whole purpose is to enable using
2923  * mlx5_kvargs_process() function which set devargs as used.
2924  *
2925  * @param key
2926  *   Key argument.
2927  * @param val
2928  *   Value associated with key, unused.
2929  * @param opaque
2930  *   Unused, can be NULL.
2931  *
2932  * @return
2933  *   0 on success, function cannot fail.
2934  */
2935 static int
2936 mlx5_dummy_handler(const char *key, const char *val, void *opaque)
2937 {
2938 	DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
2939 	RTE_SET_USED(opaque);
2940 	RTE_SET_USED(val);
2941 	return 0;
2942 }
2943 
2944 /**
2945  * Set requested devargs as used when device is already spawned.
2946  *
2947  * It is necessary since it is valid to ask probe again for existing device,
2948  * if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
2949  *
2950  * @param name
2951  *   Name of the existing device.
2952  * @param port_id
2953  *   Port identifier of the device.
2954  * @param mkvlist
2955  *   Pointer to mlx5 kvargs control to sign as used.
2956  */
2957 void
2958 mlx5_port_args_set_used(const char *name, uint16_t port_id,
2959 			struct mlx5_kvargs_ctrl *mkvlist)
2960 {
2961 	const char **params = (const char *[]){
2962 		MLX5_RXQ_CQE_COMP_EN,
2963 		MLX5_RXQ_PKT_PAD_EN,
2964 		MLX5_RX_MPRQ_EN,
2965 		MLX5_RX_MPRQ_LOG_STRIDE_NUM,
2966 		MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
2967 		MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
2968 		MLX5_RXQS_MIN_MPRQ,
2969 		MLX5_TXQ_INLINE,
2970 		MLX5_TXQ_INLINE_MIN,
2971 		MLX5_TXQ_INLINE_MAX,
2972 		MLX5_TXQ_INLINE_MPW,
2973 		MLX5_TXQS_MIN_INLINE,
2974 		MLX5_TXQS_MAX_VEC,
2975 		MLX5_TXQ_MPW_EN,
2976 		MLX5_TXQ_MPW_HDR_DSEG_EN,
2977 		MLX5_TXQ_MAX_INLINE_LEN,
2978 		MLX5_TX_VEC_EN,
2979 		MLX5_RX_VEC_EN,
2980 		MLX5_REPRESENTOR,
2981 		MLX5_MAX_DUMP_FILES_NUM,
2982 		MLX5_LRO_TIMEOUT_USEC,
2983 		MLX5_HP_BUF_SIZE,
2984 		MLX5_DELAY_DROP,
2985 		NULL,
2986 	};
2987 
2988 	/* Secondary process should not handle devargs. */
2989 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2990 		return;
2991 	MLX5_ASSERT(mkvlist != NULL);
2992 	DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
2993 		"already exists, set devargs as used:", name, port_id);
2994 	/* This function cannot fail with this handler. */
2995 	mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
2996 }
2997 
2998 /**
2999  * Check sibling device configurations when probing again.
3000  *
3001  * Sibling devices sharing infiniband device context should have compatible
3002  * configurations. This regards representors and bonding device.
3003  *
3004  * @param cdev
3005  *   Pointer to mlx5 device structure.
3006  * @param mkvlist
3007  *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
3008  *
3009  * @return
3010  *   0 on success, a negative errno value otherwise and rte_errno is set.
3011  */
3012 int
3013 mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
3014 			       struct mlx5_kvargs_ctrl *mkvlist)
3015 {
3016 	struct mlx5_dev_ctx_shared *sh = NULL;
3017 	struct mlx5_sh_config *config;
3018 	int ret;
3019 
3020 	/* Secondary process should not handle devargs. */
3021 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3022 		return 0;
3023 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
3024 	/* Search for IB context by common device pointer. */
3025 	LIST_FOREACH(sh, &dev_ctx_list, next)
3026 		if (sh->cdev == cdev)
3027 			break;
3028 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
3029 	/* There is sh for this device -> it isn't probe again. */
3030 	if (sh == NULL)
3031 		return 0;
3032 	config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
3033 			     sizeof(struct mlx5_sh_config),
3034 			     RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
3035 	if (config == NULL) {
3036 		rte_errno = -ENOMEM;
3037 		return -rte_errno;
3038 	}
3039 	/*
3040 	 * Creates a temporary IB context configure structure according to new
3041 	 * devargs attached in probing again.
3042 	 */
3043 	ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config);
3044 	if (ret) {
3045 		DRV_LOG(ERR, "Failed to process device configure: %s",
3046 			strerror(rte_errno));
3047 		mlx5_free(config);
3048 		return ret;
3049 	}
3050 	/*
3051 	 * Checks the match between the temporary structure and the existing
3052 	 * IB context structure.
3053 	 */
3054 	if (sh->config.dv_flow_en ^ config->dv_flow_en) {
3055 		DRV_LOG(ERR, "\"dv_flow_en\" "
3056 			"configuration mismatch for shared %s context.",
3057 			sh->ibdev_name);
3058 		goto error;
3059 	}
3060 	if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
3061 	    (sh->config.dv_miss_info ^ config->dv_miss_info)) {
3062 		DRV_LOG(ERR, "\"dv_xmeta_en\" "
3063 			"configuration mismatch for shared %s context.",
3064 			sh->ibdev_name);
3065 		goto error;
3066 	}
3067 	if (sh->config.dv_esw_en ^ config->dv_esw_en) {
3068 		DRV_LOG(ERR, "\"dv_esw_en\" "
3069 			"configuration mismatch for shared %s context.",
3070 			sh->ibdev_name);
3071 		goto error;
3072 	}
3073 	if (sh->config.reclaim_mode ^ config->reclaim_mode) {
3074 		DRV_LOG(ERR, "\"reclaim_mode\" "
3075 			"configuration mismatch for shared %s context.",
3076 			sh->ibdev_name);
3077 		goto error;
3078 	}
3079 	if (sh->config.allow_duplicate_pattern ^
3080 	    config->allow_duplicate_pattern) {
3081 		DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
3082 			"configuration mismatch for shared %s context.",
3083 			sh->ibdev_name);
3084 		goto error;
3085 	}
3086 	if (sh->config.fdb_def_rule ^ config->fdb_def_rule) {
3087 		DRV_LOG(ERR, "\"fdb_def_rule_en\" configuration mismatch for shared %s context.",
3088 			sh->ibdev_name);
3089 		goto error;
3090 	}
3091 	if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
3092 		DRV_LOG(ERR, "\"l3_vxlan_en\" "
3093 			"configuration mismatch for shared %s context.",
3094 			sh->ibdev_name);
3095 		goto error;
3096 	}
3097 	if (sh->config.decap_en ^ config->decap_en) {
3098 		DRV_LOG(ERR, "\"decap_en\" "
3099 			"configuration mismatch for shared %s context.",
3100 			sh->ibdev_name);
3101 		goto error;
3102 	}
3103 	if (sh->config.lacp_by_user ^ config->lacp_by_user) {
3104 		DRV_LOG(ERR, "\"lacp_by_user\" "
3105 			"configuration mismatch for shared %s context.",
3106 			sh->ibdev_name);
3107 		goto error;
3108 	}
3109 	if (sh->config.tx_pp ^ config->tx_pp) {
3110 		DRV_LOG(ERR, "\"tx_pp\" "
3111 			"configuration mismatch for shared %s context.",
3112 			sh->ibdev_name);
3113 		goto error;
3114 	}
3115 	if (sh->config.tx_skew ^ config->tx_skew) {
3116 		DRV_LOG(ERR, "\"tx_skew\" "
3117 			"configuration mismatch for shared %s context.",
3118 			sh->ibdev_name);
3119 		goto error;
3120 	}
3121 	mlx5_free(config);
3122 	return 0;
3123 error:
3124 	mlx5_free(config);
3125 	rte_errno = EINVAL;
3126 	return -rte_errno;
3127 }
3128 
3129 /**
3130  * Configures the minimal amount of data to inline into WQE
3131  * while sending packets.
3132  *
3133  * - the txq_inline_min has the maximal priority, if this
3134  *   key is specified in devargs
3135  * - if DevX is enabled the inline mode is queried from the
3136  *   device (HCA attributes and NIC vport context if needed).
3137  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
3138  *   and none (0 bytes) for other NICs
3139  *
3140  * @param priv
3141  *   Pointer to the private device data structure.
3142  */
3143 void
3144 mlx5_set_min_inline(struct mlx5_priv *priv)
3145 {
3146 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
3147 	struct mlx5_port_config *config = &priv->config;
3148 
3149 	if (config->txq_inline_min != MLX5_ARG_UNSET) {
3150 		/* Application defines size of inlined data explicitly. */
3151 		if (priv->pci_dev != NULL) {
3152 			switch (priv->pci_dev->id.device_id) {
3153 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
3154 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
3155 				if (config->txq_inline_min <
3156 					       (int)MLX5_INLINE_HSIZE_L2) {
3157 					DRV_LOG(DEBUG,
3158 						"txq_inline_mix aligned to minimal ConnectX-4 required value %d",
3159 						(int)MLX5_INLINE_HSIZE_L2);
3160 					config->txq_inline_min =
3161 							MLX5_INLINE_HSIZE_L2;
3162 				}
3163 				break;
3164 			}
3165 		}
3166 		goto exit;
3167 	}
3168 	if (hca_attr->eth_net_offloads) {
3169 		/* We have DevX enabled, inline mode queried successfully. */
3170 		switch (hca_attr->wqe_inline_mode) {
3171 		case MLX5_CAP_INLINE_MODE_L2:
3172 			/* outer L2 header must be inlined. */
3173 			config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
3174 			goto exit;
3175 		case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3176 			/* No inline data are required by NIC. */
3177 			config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
3178 			config->hw_vlan_insert =
3179 				hca_attr->wqe_vlan_insert;
3180 			DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
3181 			goto exit;
3182 		case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3183 			/* inline mode is defined by NIC vport context. */
3184 			if (!hca_attr->eth_virt)
3185 				break;
3186 			switch (hca_attr->vport_inline_mode) {
3187 			case MLX5_INLINE_MODE_NONE:
3188 				config->txq_inline_min =
3189 					MLX5_INLINE_HSIZE_NONE;
3190 				goto exit;
3191 			case MLX5_INLINE_MODE_L2:
3192 				config->txq_inline_min =
3193 					MLX5_INLINE_HSIZE_L2;
3194 				goto exit;
3195 			case MLX5_INLINE_MODE_IP:
3196 				config->txq_inline_min =
3197 					MLX5_INLINE_HSIZE_L3;
3198 				goto exit;
3199 			case MLX5_INLINE_MODE_TCP_UDP:
3200 				config->txq_inline_min =
3201 					MLX5_INLINE_HSIZE_L4;
3202 				goto exit;
3203 			case MLX5_INLINE_MODE_INNER_L2:
3204 				config->txq_inline_min =
3205 					MLX5_INLINE_HSIZE_INNER_L2;
3206 				goto exit;
3207 			case MLX5_INLINE_MODE_INNER_IP:
3208 				config->txq_inline_min =
3209 					MLX5_INLINE_HSIZE_INNER_L3;
3210 				goto exit;
3211 			case MLX5_INLINE_MODE_INNER_TCP_UDP:
3212 				config->txq_inline_min =
3213 					MLX5_INLINE_HSIZE_INNER_L4;
3214 				goto exit;
3215 			}
3216 		}
3217 	}
3218 	if (priv->pci_dev == NULL) {
3219 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
3220 		goto exit;
3221 	}
3222 	/*
3223 	 * We get here if we are unable to deduce
3224 	 * inline data size with DevX. Try PCI ID
3225 	 * to determine old NICs.
3226 	 */
3227 	switch (priv->pci_dev->id.device_id) {
3228 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
3229 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
3230 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
3231 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
3232 		config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
3233 		config->hw_vlan_insert = 0;
3234 		break;
3235 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
3236 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
3237 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
3238 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
3239 		/*
3240 		 * These NICs support VLAN insertion from WQE and
3241 		 * report the wqe_vlan_insert flag. But there is the bug
3242 		 * and PFC control may be broken, so disable feature.
3243 		 */
3244 		config->hw_vlan_insert = 0;
3245 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
3246 		break;
3247 	default:
3248 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
3249 		break;
3250 	}
3251 exit:
3252 	DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
3253 }
3254 
3255 /**
3256  * Configures the metadata mask fields in the shared context.
3257  *
3258  * @param [in] dev
3259  *   Pointer to Ethernet device.
3260  */
3261 void
3262 mlx5_set_metadata_mask(struct rte_eth_dev *dev)
3263 {
3264 	struct mlx5_priv *priv = dev->data->dev_private;
3265 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3266 	uint32_t meta, mark, reg_c0;
3267 
3268 	reg_c0 = ~priv->vport_meta_mask;
3269 	switch (sh->config.dv_xmeta_en) {
3270 	case MLX5_XMETA_MODE_LEGACY:
3271 		meta = UINT32_MAX;
3272 		mark = MLX5_FLOW_MARK_MASK;
3273 		break;
3274 	case MLX5_XMETA_MODE_META16:
3275 		meta = reg_c0 >> rte_bsf32(reg_c0);
3276 		mark = MLX5_FLOW_MARK_MASK;
3277 		break;
3278 	case MLX5_XMETA_MODE_META32:
3279 		meta = UINT32_MAX;
3280 		mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
3281 		break;
3282 	case MLX5_XMETA_MODE_META32_HWS:
3283 		meta = UINT32_MAX;
3284 		mark = MLX5_FLOW_MARK_MASK;
3285 		break;
3286 	default:
3287 		meta = 0;
3288 		mark = 0;
3289 		MLX5_ASSERT(false);
3290 		break;
3291 	}
3292 	if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
3293 		DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
3294 				 sh->dv_mark_mask, mark);
3295 	else
3296 		sh->dv_mark_mask = mark;
3297 	if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
3298 		DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
3299 				 sh->dv_meta_mask, meta);
3300 	else
3301 		sh->dv_meta_mask = meta;
3302 	if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
3303 		DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
3304 				 sh->dv_meta_mask, reg_c0);
3305 	else
3306 		sh->dv_regc0_mask = reg_c0;
3307 	DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
3308 	DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
3309 	DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
3310 	DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
3311 }
3312 
3313 int
3314 rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
3315 {
3316 	static const char *const dynf_names[] = {
3317 		RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
3318 		RTE_MBUF_DYNFLAG_METADATA_NAME,
3319 		RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
3320 	};
3321 	unsigned int i;
3322 
3323 	if (n < RTE_DIM(dynf_names))
3324 		return -ENOMEM;
3325 	for (i = 0; i < RTE_DIM(dynf_names); i++) {
3326 		if (names[i] == NULL)
3327 			return -EINVAL;
3328 		strcpy(names[i], dynf_names[i]);
3329 	}
3330 	return RTE_DIM(dynf_names);
3331 }
3332 
3333 /**
3334  * Look for the ethernet device belonging to mlx5 driver.
3335  *
3336  * @param[in] port_id
3337  *   port_id to start looking for device.
3338  * @param[in] odev
3339  *   Pointer to the hint device. When device is being probed
3340  *   the its siblings (master and preceding representors might
3341  *   not have assigned driver yet (because the mlx5_os_pci_probe()
3342  *   is not completed yet, for this case match on hint
3343  *   device may be used to detect sibling device.
3344  *
3345  * @return
3346  *   port_id of found device, RTE_MAX_ETHPORT if not found.
3347  */
3348 uint16_t
3349 mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
3350 {
3351 	while (port_id < RTE_MAX_ETHPORTS) {
3352 		struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3353 
3354 		if (dev->state != RTE_ETH_DEV_UNUSED &&
3355 		    dev->device &&
3356 		    (dev->device == odev ||
3357 		     (dev->device->driver &&
3358 		     dev->device->driver->name &&
3359 		     ((strcmp(dev->device->driver->name,
3360 			      MLX5_PCI_DRIVER_NAME) == 0) ||
3361 		      (strcmp(dev->device->driver->name,
3362 			      MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
3363 			break;
3364 		port_id++;
3365 	}
3366 	if (port_id >= RTE_MAX_ETHPORTS)
3367 		return RTE_MAX_ETHPORTS;
3368 	return port_id;
3369 }
3370 
3371 /**
3372  * Callback to remove a device.
3373  *
3374  * This function removes all Ethernet devices belong to a given device.
3375  *
3376  * @param[in] cdev
3377  *   Pointer to the generic device.
3378  *
3379  * @return
3380  *   0 on success, the function cannot fail.
3381  */
3382 int
3383 mlx5_net_remove(struct mlx5_common_device *cdev)
3384 {
3385 	uint16_t port_id;
3386 	int ret = 0;
3387 
3388 	RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
3389 		/*
3390 		 * mlx5_dev_close() is not registered to secondary process,
3391 		 * call the close function explicitly for secondary process.
3392 		 */
3393 		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3394 			ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
3395 		else
3396 			ret |= rte_eth_dev_close(port_id);
3397 	}
3398 	return ret == 0 ? 0 : -EIO;
3399 }
3400 
3401 static const struct rte_pci_id mlx5_pci_id_map[] = {
3402 	{
3403 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3404 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4)
3405 	},
3406 	{
3407 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3408 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
3409 	},
3410 	{
3411 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3412 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
3413 	},
3414 	{
3415 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3416 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
3417 	},
3418 	{
3419 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3420 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5)
3421 	},
3422 	{
3423 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3424 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
3425 	},
3426 	{
3427 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3428 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
3429 	},
3430 	{
3431 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3432 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
3433 	},
3434 	{
3435 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3436 			       PCI_DEVICE_ID_MELLANOX_BLUEFIELD)
3437 	},
3438 	{
3439 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3440 			       PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF)
3441 	},
3442 	{
3443 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3444 				PCI_DEVICE_ID_MELLANOX_CONNECTX6)
3445 	},
3446 	{
3447 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3448 				PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
3449 	},
3450 	{
3451 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3452 				PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
3453 	},
3454 	{
3455 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3456 				PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
3457 	},
3458 	{
3459 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3460 				PCI_DEVICE_ID_MELLANOX_BLUEFIELD2)
3461 	},
3462 	{
3463 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3464 				PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
3465 	},
3466 	{
3467 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3468 				PCI_DEVICE_ID_MELLANOX_CONNECTX7)
3469 	},
3470 	{
3471 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
3472 				PCI_DEVICE_ID_MELLANOX_BLUEFIELD3)
3473 	},
3474 	{
3475 		.vendor_id = 0
3476 	}
3477 };
3478 
3479 static struct mlx5_class_driver mlx5_net_driver = {
3480 	.drv_class = MLX5_CLASS_ETH,
3481 	.name = RTE_STR(MLX5_ETH_DRIVER_NAME),
3482 	.id_table = mlx5_pci_id_map,
3483 	.probe = mlx5_os_net_probe,
3484 	.remove = mlx5_net_remove,
3485 	.probe_again = 1,
3486 	.intr_lsc = 1,
3487 	.intr_rmv = 1,
3488 };
3489 
3490 /* Initialize driver log type. */
3491 RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
3492 
3493 /**
3494  * Driver initialization routine.
3495  */
3496 RTE_INIT(rte_mlx5_pmd_init)
3497 {
3498 	pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
3499 	mlx5_common_init();
3500 	/* Build the static tables for Verbs conversion. */
3501 	mlx5_set_ptype_table();
3502 	mlx5_set_cksum_table();
3503 	mlx5_set_swp_types_table();
3504 	if (mlx5_glue)
3505 		mlx5_class_driver_register(&mlx5_net_driver);
3506 }
3507 
3508 RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__);
3509 RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map);
3510 RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");
3511