xref: /dpdk/drivers/net/mlx5/linux/mlx5_os.c (revision 55509e3a49fb28317c1e56a534cdcc4a3849df79)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2020 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <unistd.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 #include <net/if.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
16 #include <fcntl.h>
17 
18 #include <rte_malloc.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_common.h>
24 #include <rte_kvargs.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_string_fns.h>
28 #include <rte_alarm.h>
29 #include <rte_eal_paging.h>
30 
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
34 #include <mlx5_common_mp.h>
35 #include <mlx5_common_mr.h>
36 #include <mlx5_malloc.h>
37 
38 #include "mlx5_defs.h"
39 #include "mlx5.h"
40 #include "mlx5_common_os.h"
41 #include "mlx5_utils.h"
42 #include "mlx5_rxtx.h"
43 #include "mlx5_autoconf.h"
44 #include "mlx5_mr.h"
45 #include "mlx5_flow.h"
46 #include "rte_pmd_mlx5.h"
47 #include "mlx5_verbs.h"
48 #include "mlx5_nl.h"
49 #include "mlx5_devx.h"
50 
51 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
52 
53 #ifndef HAVE_IBV_MLX5_MOD_MPW
54 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
55 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
56 #endif
57 
58 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
59 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
60 #endif
61 
62 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
63 
64 /* Spinlock for mlx5_shared_data allocation. */
65 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
66 
67 /* Process local data for secondary processes. */
68 static struct mlx5_local_data mlx5_local_data;
69 
70 /**
71  * Set the completion channel file descriptor interrupt as non-blocking.
72  *
73  * @param[in] rxq_obj
74  *   Pointer to RQ channel object, which includes the channel fd
75  *
76  * @param[out] fd
77  *   The file descriptor (representing the intetrrupt) used in this channel.
78  *
79  * @return
80  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
81  */
82 int
83 mlx5_os_set_nonblock_channel_fd(int fd)
84 {
85 	int flags;
86 
87 	flags = fcntl(fd, F_GETFL);
88 	return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
89 }
90 
91 /**
92  * Get mlx5 device attributes. The glue function query_device_ex() is called
93  * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
94  * device attributes from the glue out parameter.
95  *
96  * @param dev
97  *   Pointer to ibv context.
98  *
99  * @param device_attr
100  *   Pointer to mlx5 device attributes.
101  *
102  * @return
103  *   0 on success, non zero error number otherwise
104  */
105 int
106 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
107 {
108 	int err;
109 	struct ibv_device_attr_ex attr_ex;
110 	memset(device_attr, 0, sizeof(*device_attr));
111 	err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
112 	if (err)
113 		return err;
114 
115 	device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
116 	device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
117 	device_attr->max_sge = attr_ex.orig_attr.max_sge;
118 	device_attr->max_cq = attr_ex.orig_attr.max_cq;
119 	device_attr->max_qp = attr_ex.orig_attr.max_qp;
120 	device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
121 	device_attr->max_rwq_indirection_table_size =
122 		attr_ex.rss_caps.max_rwq_indirection_table_size;
123 	device_attr->max_tso = attr_ex.tso_caps.max_tso;
124 	device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
125 
126 	struct mlx5dv_context dv_attr = { .comp_mask = 0 };
127 	err = mlx5_glue->dv_query_device(ctx, &dv_attr);
128 	if (err)
129 		return err;
130 
131 	device_attr->flags = dv_attr.flags;
132 	device_attr->comp_mask = dv_attr.comp_mask;
133 #ifdef HAVE_IBV_MLX5_MOD_SWP
134 	device_attr->sw_parsing_offloads =
135 		dv_attr.sw_parsing_caps.sw_parsing_offloads;
136 #endif
137 	device_attr->min_single_stride_log_num_of_bytes =
138 		dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
139 	device_attr->max_single_stride_log_num_of_bytes =
140 		dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
141 	device_attr->min_single_wqe_log_num_of_strides =
142 		dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
143 	device_attr->max_single_wqe_log_num_of_strides =
144 		dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
145 	device_attr->stride_supported_qpts =
146 		dv_attr.striding_rq_caps.supported_qpts;
147 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
148 	device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
149 #endif
150 
151 	return err;
152 }
153 
154 /**
155  * Verbs callback to allocate a memory. This function should allocate the space
156  * according to the size provided residing inside a huge page.
157  * Please note that all allocation must respect the alignment from libmlx5
158  * (i.e. currently rte_mem_page_size()).
159  *
160  * @param[in] size
161  *   The size in bytes of the memory to allocate.
162  * @param[in] data
163  *   A pointer to the callback data.
164  *
165  * @return
166  *   Allocated buffer, NULL otherwise and rte_errno is set.
167  */
168 static void *
169 mlx5_alloc_verbs_buf(size_t size, void *data)
170 {
171 	struct mlx5_priv *priv = data;
172 	void *ret;
173 	unsigned int socket = SOCKET_ID_ANY;
174 	size_t alignment = rte_mem_page_size();
175 	if (alignment == (size_t)-1) {
176 		DRV_LOG(ERR, "Failed to get mem page size");
177 		rte_errno = ENOMEM;
178 		return NULL;
179 	}
180 
181 	if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
182 		const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
183 
184 		socket = ctrl->socket;
185 	} else if (priv->verbs_alloc_ctx.type ==
186 		   MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
187 		const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
188 
189 		socket = ctrl->socket;
190 	}
191 	MLX5_ASSERT(data != NULL);
192 	ret = mlx5_malloc(0, size, alignment, socket);
193 	if (!ret && size)
194 		rte_errno = ENOMEM;
195 	return ret;
196 }
197 
198 /**
199  * Verbs callback to free a memory.
200  *
201  * @param[in] ptr
202  *   A pointer to the memory to free.
203  * @param[in] data
204  *   A pointer to the callback data.
205  */
206 static void
207 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
208 {
209 	MLX5_ASSERT(data != NULL);
210 	mlx5_free(ptr);
211 }
212 
213 /**
214  * Initialize DR related data within private structure.
215  * Routine checks the reference counter and does actual
216  * resources creation/initialization only if counter is zero.
217  *
218  * @param[in] priv
219  *   Pointer to the private device data structure.
220  *
221  * @return
222  *   Zero on success, positive error code otherwise.
223  */
224 static int
225 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
226 {
227 	struct mlx5_dev_ctx_shared *sh = priv->sh;
228 	char s[MLX5_HLIST_NAMESIZE];
229 	int err = 0;
230 
231 	if (!sh->flow_tbls)
232 		err = mlx5_alloc_table_hash_list(priv);
233 	else
234 		DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
235 			(void *)sh->flow_tbls);
236 	if (err)
237 		return err;
238 	/* Create tags hash list table. */
239 	snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
240 	sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
241 	if (!sh->tag_table) {
242 		DRV_LOG(ERR, "tags with hash creation failed.");
243 		err = ENOMEM;
244 		goto error;
245 	}
246 	snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name);
247 	sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ);
248 	if (!sh->modify_cmds) {
249 		DRV_LOG(ERR, "hdr modify hash creation failed");
250 		err = ENOMEM;
251 		goto error;
252 	}
253 	snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
254 	sh->encaps_decaps = mlx5_hlist_create(s,
255 					      MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ);
256 	if (!sh->encaps_decaps) {
257 		DRV_LOG(ERR, "encap decap hash creation failed");
258 		err = ENOMEM;
259 		goto error;
260 	}
261 #ifdef HAVE_MLX5DV_DR
262 	void *domain;
263 
264 	if (sh->dv_refcnt) {
265 		/* Shared DV/DR structures is already initialized. */
266 		sh->dv_refcnt++;
267 		priv->dr_shared = 1;
268 		return 0;
269 	}
270 	/* Reference counter is zero, we should initialize structures. */
271 	domain = mlx5_glue->dr_create_domain(sh->ctx,
272 					     MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
273 	if (!domain) {
274 		DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
275 		err = errno;
276 		goto error;
277 	}
278 	sh->rx_domain = domain;
279 	domain = mlx5_glue->dr_create_domain(sh->ctx,
280 					     MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
281 	if (!domain) {
282 		DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
283 		err = errno;
284 		goto error;
285 	}
286 	pthread_mutex_init(&sh->dv_mutex, NULL);
287 	sh->tx_domain = domain;
288 #ifdef HAVE_MLX5DV_DR_ESWITCH
289 	if (priv->config.dv_esw_en) {
290 		domain  = mlx5_glue->dr_create_domain
291 			(sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
292 		if (!domain) {
293 			DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
294 			err = errno;
295 			goto error;
296 		}
297 		sh->fdb_domain = domain;
298 		sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
299 	}
300 #endif
301 	if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
302 		mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
303 		mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
304 		if (sh->fdb_domain)
305 			mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
306 	}
307 	sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
308 #endif /* HAVE_MLX5DV_DR */
309 	sh->dv_refcnt++;
310 	priv->dr_shared = 1;
311 	return 0;
312 error:
313 	/* Rollback the created objects. */
314 	if (sh->rx_domain) {
315 		mlx5_glue->dr_destroy_domain(sh->rx_domain);
316 		sh->rx_domain = NULL;
317 	}
318 	if (sh->tx_domain) {
319 		mlx5_glue->dr_destroy_domain(sh->tx_domain);
320 		sh->tx_domain = NULL;
321 	}
322 	if (sh->fdb_domain) {
323 		mlx5_glue->dr_destroy_domain(sh->fdb_domain);
324 		sh->fdb_domain = NULL;
325 	}
326 	if (sh->esw_drop_action) {
327 		mlx5_glue->destroy_flow_action(sh->esw_drop_action);
328 		sh->esw_drop_action = NULL;
329 	}
330 	if (sh->pop_vlan_action) {
331 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
332 		sh->pop_vlan_action = NULL;
333 	}
334 	if (sh->encaps_decaps) {
335 		mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL);
336 		sh->encaps_decaps = NULL;
337 	}
338 	if (sh->modify_cmds) {
339 		mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL);
340 		sh->modify_cmds = NULL;
341 	}
342 	if (sh->tag_table) {
343 		/* tags should be destroyed with flow before. */
344 		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
345 		sh->tag_table = NULL;
346 	}
347 	mlx5_free_table_hash_list(priv);
348 	return err;
349 }
350 
351 /**
352  * Destroy DR related data within private structure.
353  *
354  * @param[in] priv
355  *   Pointer to the private device data structure.
356  */
357 void
358 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
359 {
360 	struct mlx5_dev_ctx_shared *sh;
361 
362 	if (!priv->dr_shared)
363 		return;
364 	priv->dr_shared = 0;
365 	sh = priv->sh;
366 	MLX5_ASSERT(sh);
367 #ifdef HAVE_MLX5DV_DR
368 	MLX5_ASSERT(sh->dv_refcnt);
369 	if (sh->dv_refcnt && --sh->dv_refcnt)
370 		return;
371 	if (sh->rx_domain) {
372 		mlx5_glue->dr_destroy_domain(sh->rx_domain);
373 		sh->rx_domain = NULL;
374 	}
375 	if (sh->tx_domain) {
376 		mlx5_glue->dr_destroy_domain(sh->tx_domain);
377 		sh->tx_domain = NULL;
378 	}
379 #ifdef HAVE_MLX5DV_DR_ESWITCH
380 	if (sh->fdb_domain) {
381 		mlx5_glue->dr_destroy_domain(sh->fdb_domain);
382 		sh->fdb_domain = NULL;
383 	}
384 	if (sh->esw_drop_action) {
385 		mlx5_glue->destroy_flow_action(sh->esw_drop_action);
386 		sh->esw_drop_action = NULL;
387 	}
388 #endif
389 	if (sh->pop_vlan_action) {
390 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
391 		sh->pop_vlan_action = NULL;
392 	}
393 	pthread_mutex_destroy(&sh->dv_mutex);
394 #endif /* HAVE_MLX5DV_DR */
395 	if (sh->encaps_decaps) {
396 		mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL);
397 		sh->encaps_decaps = NULL;
398 	}
399 	if (sh->modify_cmds) {
400 		mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL);
401 		sh->modify_cmds = NULL;
402 	}
403 	if (sh->tag_table) {
404 		/* tags should be destroyed with flow before. */
405 		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
406 		sh->tag_table = NULL;
407 	}
408 	mlx5_free_table_hash_list(priv);
409 }
410 
411 /**
412  * Initialize shared data between primary and secondary process.
413  *
414  * A memzone is reserved by primary process and secondary processes attach to
415  * the memzone.
416  *
417  * @return
418  *   0 on success, a negative errno value otherwise and rte_errno is set.
419  */
420 static int
421 mlx5_init_shared_data(void)
422 {
423 	const struct rte_memzone *mz;
424 	int ret = 0;
425 
426 	rte_spinlock_lock(&mlx5_shared_data_lock);
427 	if (mlx5_shared_data == NULL) {
428 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
429 			/* Allocate shared memory. */
430 			mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
431 						 sizeof(*mlx5_shared_data),
432 						 SOCKET_ID_ANY, 0);
433 			if (mz == NULL) {
434 				DRV_LOG(ERR,
435 					"Cannot allocate mlx5 shared data");
436 				ret = -rte_errno;
437 				goto error;
438 			}
439 			mlx5_shared_data = mz->addr;
440 			memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
441 			rte_spinlock_init(&mlx5_shared_data->lock);
442 		} else {
443 			/* Lookup allocated shared memory. */
444 			mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
445 			if (mz == NULL) {
446 				DRV_LOG(ERR,
447 					"Cannot attach mlx5 shared data");
448 				ret = -rte_errno;
449 				goto error;
450 			}
451 			mlx5_shared_data = mz->addr;
452 			memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
453 		}
454 	}
455 error:
456 	rte_spinlock_unlock(&mlx5_shared_data_lock);
457 	return ret;
458 }
459 
460 /**
461  * PMD global initialization.
462  *
463  * Independent from individual device, this function initializes global
464  * per-PMD data structures distinguishing primary and secondary processes.
465  * Hence, each initialization is called once per a process.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 mlx5_init_once(void)
472 {
473 	struct mlx5_shared_data *sd;
474 	struct mlx5_local_data *ld = &mlx5_local_data;
475 	int ret = 0;
476 
477 	if (mlx5_init_shared_data())
478 		return -rte_errno;
479 	sd = mlx5_shared_data;
480 	MLX5_ASSERT(sd);
481 	rte_spinlock_lock(&sd->lock);
482 	switch (rte_eal_process_type()) {
483 	case RTE_PROC_PRIMARY:
484 		if (sd->init_done)
485 			break;
486 		LIST_INIT(&sd->mem_event_cb_list);
487 		rte_rwlock_init(&sd->mem_event_rwlock);
488 		rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
489 						mlx5_mr_mem_event_cb, NULL);
490 		ret = mlx5_mp_init_primary(MLX5_MP_NAME,
491 					   mlx5_mp_os_primary_handle);
492 		if (ret)
493 			goto out;
494 		sd->init_done = true;
495 		break;
496 	case RTE_PROC_SECONDARY:
497 		if (ld->init_done)
498 			break;
499 		ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
500 					     mlx5_mp_os_secondary_handle);
501 		if (ret)
502 			goto out;
503 		++sd->secondary_cnt;
504 		ld->init_done = true;
505 		break;
506 	default:
507 		break;
508 	}
509 out:
510 	rte_spinlock_unlock(&sd->lock);
511 	return ret;
512 }
513 
514 /**
515  * Create the Tx queue DevX/Verbs object.
516  *
517  * @param dev
518  *   Pointer to Ethernet device.
519  * @param idx
520  *   Queue index in DPDK Tx queue array.
521  *
522  * @return
523  *   0 on success, a negative errno value otherwise and rte_errno is set.
524  */
525 static int
526 mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
527 {
528 	struct mlx5_priv *priv = dev->data->dev_private;
529 	struct mlx5_dev_config *config = &priv->config;
530 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
531 	struct mlx5_txq_ctrl *txq_ctrl =
532 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
533 
534 	/*
535 	 * When DevX is supported and DV flow is enable, and dest tir is enable,
536 	 * hairpin functions use DevX API.
537 	 * When, in addition, DV E-Switch is enable and DevX uar offset is
538 	 * supported, all Tx functions also use DevX API.
539 	 * Otherwise, all Tx functions use Verbs API.
540 	 */
541 	if (config->devx && config->dv_flow_en && config->dest_tir) {
542 		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
543 			return mlx5_txq_devx_obj_new(dev, idx);
544 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
545 		if (config->dv_esw_en)
546 			return mlx5_txq_devx_obj_new(dev, idx);
547 #endif
548 	}
549 	return mlx5_txq_ibv_obj_new(dev, idx);
550 }
551 
552 /**
553  * Release an Tx DevX/verbs queue object.
554  *
555  * @param txq_obj
556  *   DevX/Verbs Tx queue object.
557  */
558 static void
559 mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
560 {
561 	struct mlx5_dev_config *config = &txq_obj->txq_ctrl->priv->config;
562 
563 	if (config->devx && config->dv_flow_en && config->dest_tir) {
564 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
565 		if (config->dv_esw_en) {
566 			mlx5_txq_devx_obj_release(txq_obj);
567 			return;
568 		}
569 #endif
570 		if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
571 			mlx5_txq_devx_obj_release(txq_obj);
572 			return;
573 		}
574 	}
575 	mlx5_txq_ibv_obj_release(txq_obj);
576 }
577 
578 /**
579  * Spawn an Ethernet device from Verbs information.
580  *
581  * @param dpdk_dev
582  *   Backing DPDK device.
583  * @param spawn
584  *   Verbs device parameters (name, port, switch_info) to spawn.
585  * @param config
586  *   Device configuration parameters.
587  *
588  * @return
589  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
590  *   is set. The following errors are defined:
591  *
592  *   EBUSY: device is not supposed to be spawned.
593  *   EEXIST: device is already spawned
594  */
595 static struct rte_eth_dev *
596 mlx5_dev_spawn(struct rte_device *dpdk_dev,
597 	       struct mlx5_dev_spawn_data *spawn,
598 	       struct mlx5_dev_config *config)
599 {
600 	const struct mlx5_switch_info *switch_info = &spawn->info;
601 	struct mlx5_dev_ctx_shared *sh = NULL;
602 	struct ibv_port_attr port_attr;
603 	struct mlx5dv_context dv_attr = { .comp_mask = 0 };
604 	struct rte_eth_dev *eth_dev = NULL;
605 	struct mlx5_priv *priv = NULL;
606 	int err = 0;
607 	unsigned int hw_padding = 0;
608 	unsigned int mps;
609 	unsigned int cqe_comp;
610 	unsigned int cqe_pad = 0;
611 	unsigned int tunnel_en = 0;
612 	unsigned int mpls_en = 0;
613 	unsigned int swp = 0;
614 	unsigned int mprq = 0;
615 	unsigned int mprq_min_stride_size_n = 0;
616 	unsigned int mprq_max_stride_size_n = 0;
617 	unsigned int mprq_min_stride_num_n = 0;
618 	unsigned int mprq_max_stride_num_n = 0;
619 	struct rte_ether_addr mac;
620 	char name[RTE_ETH_NAME_MAX_LEN];
621 	int own_domain_id = 0;
622 	uint16_t port_id;
623 	unsigned int i;
624 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
625 	struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
626 #endif
627 
628 	/* Determine if this port representor is supposed to be spawned. */
629 	if (switch_info->representor && dpdk_dev->devargs) {
630 		struct rte_eth_devargs eth_da;
631 
632 		err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
633 		if (err) {
634 			rte_errno = -err;
635 			DRV_LOG(ERR, "failed to process device arguments: %s",
636 				strerror(rte_errno));
637 			return NULL;
638 		}
639 		for (i = 0; i < eth_da.nb_representor_ports; ++i)
640 			if (eth_da.representor_ports[i] ==
641 			    (uint16_t)switch_info->port_name)
642 				break;
643 		if (i == eth_da.nb_representor_ports) {
644 			rte_errno = EBUSY;
645 			return NULL;
646 		}
647 	}
648 	/* Build device name. */
649 	if (spawn->pf_bond <  0) {
650 		/* Single device. */
651 		if (!switch_info->representor)
652 			strlcpy(name, dpdk_dev->name, sizeof(name));
653 		else
654 			snprintf(name, sizeof(name), "%s_representor_%u",
655 				 dpdk_dev->name, switch_info->port_name);
656 	} else {
657 		/* Bonding device. */
658 		if (!switch_info->representor)
659 			snprintf(name, sizeof(name), "%s_%s",
660 				 dpdk_dev->name,
661 				 mlx5_os_get_dev_device_name(spawn->phys_dev));
662 		else
663 			snprintf(name, sizeof(name), "%s_%s_representor_%u",
664 				 dpdk_dev->name,
665 				 mlx5_os_get_dev_device_name(spawn->phys_dev),
666 				 switch_info->port_name);
667 	}
668 	/* check if the device is already spawned */
669 	if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
670 		rte_errno = EEXIST;
671 		return NULL;
672 	}
673 	DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
674 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
675 		struct mlx5_mp_id mp_id;
676 
677 		eth_dev = rte_eth_dev_attach_secondary(name);
678 		if (eth_dev == NULL) {
679 			DRV_LOG(ERR, "can not attach rte ethdev");
680 			rte_errno = ENOMEM;
681 			return NULL;
682 		}
683 		eth_dev->device = dpdk_dev;
684 		eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
685 		eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
686 		eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
687 		err = mlx5_proc_priv_init(eth_dev);
688 		if (err)
689 			return NULL;
690 		mp_id.port_id = eth_dev->data->port_id;
691 		strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
692 		/* Receive command fd from primary process */
693 		err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
694 		if (err < 0)
695 			goto err_secondary;
696 		/* Remap UAR for Tx queues. */
697 		err = mlx5_tx_uar_init_secondary(eth_dev, err);
698 		if (err)
699 			goto err_secondary;
700 		/*
701 		 * Ethdev pointer is still required as input since
702 		 * the primary device is not accessible from the
703 		 * secondary process.
704 		 */
705 		eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
706 		eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
707 		return eth_dev;
708 err_secondary:
709 		mlx5_dev_close(eth_dev);
710 		return NULL;
711 	}
712 	/*
713 	 * Some parameters ("tx_db_nc" in particularly) are needed in
714 	 * advance to create dv/verbs device context. We proceed the
715 	 * devargs here to get ones, and later proceed devargs again
716 	 * to override some hardware settings.
717 	 */
718 	err = mlx5_args(config, dpdk_dev->devargs);
719 	if (err) {
720 		err = rte_errno;
721 		DRV_LOG(ERR, "failed to process device arguments: %s",
722 			strerror(rte_errno));
723 		goto error;
724 	}
725 	mlx5_malloc_mem_select(config->sys_mem_en);
726 	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
727 	if (!sh)
728 		return NULL;
729 	config->devx = sh->devx;
730 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
731 	config->dest_tir = 1;
732 #endif
733 #ifdef HAVE_IBV_MLX5_MOD_SWP
734 	dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
735 #endif
736 	/*
737 	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
738 	 * as all ConnectX-5 devices.
739 	 */
740 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
741 	dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
742 #endif
743 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
744 	dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
745 #endif
746 	mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
747 	if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
748 		if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
749 			DRV_LOG(DEBUG, "enhanced MPW is supported");
750 			mps = MLX5_MPW_ENHANCED;
751 		} else {
752 			DRV_LOG(DEBUG, "MPW is supported");
753 			mps = MLX5_MPW;
754 		}
755 	} else {
756 		DRV_LOG(DEBUG, "MPW isn't supported");
757 		mps = MLX5_MPW_DISABLED;
758 	}
759 #ifdef HAVE_IBV_MLX5_MOD_SWP
760 	if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
761 		swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
762 	DRV_LOG(DEBUG, "SWP support: %u", swp);
763 #endif
764 	config->swp = !!swp;
765 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
766 	if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
767 		struct mlx5dv_striding_rq_caps mprq_caps =
768 			dv_attr.striding_rq_caps;
769 
770 		DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
771 			mprq_caps.min_single_stride_log_num_of_bytes);
772 		DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
773 			mprq_caps.max_single_stride_log_num_of_bytes);
774 		DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
775 			mprq_caps.min_single_wqe_log_num_of_strides);
776 		DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
777 			mprq_caps.max_single_wqe_log_num_of_strides);
778 		DRV_LOG(DEBUG, "\tsupported_qpts: %d",
779 			mprq_caps.supported_qpts);
780 		DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
781 		mprq = 1;
782 		mprq_min_stride_size_n =
783 			mprq_caps.min_single_stride_log_num_of_bytes;
784 		mprq_max_stride_size_n =
785 			mprq_caps.max_single_stride_log_num_of_bytes;
786 		mprq_min_stride_num_n =
787 			mprq_caps.min_single_wqe_log_num_of_strides;
788 		mprq_max_stride_num_n =
789 			mprq_caps.max_single_wqe_log_num_of_strides;
790 	}
791 #endif
792 	if (RTE_CACHE_LINE_SIZE == 128 &&
793 	    !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
794 		cqe_comp = 0;
795 	else
796 		cqe_comp = 1;
797 	config->cqe_comp = cqe_comp;
798 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
799 	/* Whether device supports 128B Rx CQE padding. */
800 	cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
801 		  (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
802 #endif
803 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
804 	if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
805 		tunnel_en = ((dv_attr.tunnel_offloads_caps &
806 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
807 			     (dv_attr.tunnel_offloads_caps &
808 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
809 			     (dv_attr.tunnel_offloads_caps &
810 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
811 	}
812 	DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
813 		tunnel_en ? "" : "not ");
814 #else
815 	DRV_LOG(WARNING,
816 		"tunnel offloading disabled due to old OFED/rdma-core version");
817 #endif
818 	config->tunnel_en = tunnel_en;
819 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
820 	mpls_en = ((dv_attr.tunnel_offloads_caps &
821 		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
822 		   (dv_attr.tunnel_offloads_caps &
823 		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
824 	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
825 		mpls_en ? "" : "not ");
826 #else
827 	DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
828 		" old OFED/rdma-core version or firmware configuration");
829 #endif
830 	config->mpls_en = mpls_en;
831 	/* Check port status. */
832 	err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
833 	if (err) {
834 		DRV_LOG(ERR, "port query failed: %s", strerror(err));
835 		goto error;
836 	}
837 	if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
838 		DRV_LOG(ERR, "port is not configured in Ethernet mode");
839 		err = EINVAL;
840 		goto error;
841 	}
842 	if (port_attr.state != IBV_PORT_ACTIVE)
843 		DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
844 			mlx5_glue->port_state_str(port_attr.state),
845 			port_attr.state);
846 	/* Allocate private eth device data. */
847 	priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
848 			   sizeof(*priv),
849 			   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
850 	if (priv == NULL) {
851 		DRV_LOG(ERR, "priv allocation failure");
852 		err = ENOMEM;
853 		goto error;
854 	}
855 	priv->sh = sh;
856 	priv->dev_port = spawn->phys_port;
857 	priv->pci_dev = spawn->pci_dev;
858 	priv->mtu = RTE_ETHER_MTU;
859 	priv->mp_id.port_id = port_id;
860 	strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
861 	/* Some internal functions rely on Netlink sockets, open them now. */
862 	priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
863 	priv->nl_socket_route =	mlx5_nl_init(NETLINK_ROUTE);
864 	priv->representor = !!switch_info->representor;
865 	priv->master = !!switch_info->master;
866 	priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
867 	priv->vport_meta_tag = 0;
868 	priv->vport_meta_mask = 0;
869 	priv->pf_bond = spawn->pf_bond;
870 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
871 	/*
872 	 * The DevX port query API is implemented. E-Switch may use
873 	 * either vport or reg_c[0] metadata register to match on
874 	 * vport index. The engaged part of metadata register is
875 	 * defined by mask.
876 	 */
877 	if (switch_info->representor || switch_info->master) {
878 		devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
879 				      MLX5DV_DEVX_PORT_MATCH_REG_C_0;
880 		err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port,
881 						 &devx_port);
882 		if (err) {
883 			DRV_LOG(WARNING,
884 				"can't query devx port %d on device %s",
885 				spawn->phys_port,
886 				mlx5_os_get_dev_device_name(spawn->phys_dev));
887 			devx_port.comp_mask = 0;
888 		}
889 	}
890 	if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
891 		priv->vport_meta_tag = devx_port.reg_c_0.value;
892 		priv->vport_meta_mask = devx_port.reg_c_0.mask;
893 		if (!priv->vport_meta_mask) {
894 			DRV_LOG(ERR, "vport zero mask for port %d"
895 				     " on bonding device %s",
896 				     spawn->phys_port,
897 				     mlx5_os_get_dev_device_name
898 							(spawn->phys_dev));
899 			err = ENOTSUP;
900 			goto error;
901 		}
902 		if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
903 			DRV_LOG(ERR, "invalid vport tag for port %d"
904 				     " on bonding device %s",
905 				     spawn->phys_port,
906 				     mlx5_os_get_dev_device_name
907 							(spawn->phys_dev));
908 			err = ENOTSUP;
909 			goto error;
910 		}
911 	}
912 	if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
913 		priv->vport_id = devx_port.vport_num;
914 	} else if (spawn->pf_bond >= 0) {
915 		DRV_LOG(ERR, "can't deduce vport index for port %d"
916 			     " on bonding device %s",
917 			     spawn->phys_port,
918 			     mlx5_os_get_dev_device_name(spawn->phys_dev));
919 		err = ENOTSUP;
920 		goto error;
921 	} else {
922 		/* Suppose vport index in compatible way. */
923 		priv->vport_id = switch_info->representor ?
924 				 switch_info->port_name + 1 : -1;
925 	}
926 #else
927 	/*
928 	 * Kernel/rdma_core support single E-Switch per PF configurations
929 	 * only and vport_id field contains the vport index for
930 	 * associated VF, which is deduced from representor port name.
931 	 * For example, let's have the IB device port 10, it has
932 	 * attached network device eth0, which has port name attribute
933 	 * pf0vf2, we can deduce the VF number as 2, and set vport index
934 	 * as 3 (2+1). This assigning schema should be changed if the
935 	 * multiple E-Switch instances per PF configurations or/and PCI
936 	 * subfunctions are added.
937 	 */
938 	priv->vport_id = switch_info->representor ?
939 			 switch_info->port_name + 1 : -1;
940 #endif
941 	/* representor_id field keeps the unmodified VF index. */
942 	priv->representor_id = switch_info->representor ?
943 			       switch_info->port_name : -1;
944 	/*
945 	 * Look for sibling devices in order to reuse their switch domain
946 	 * if any, otherwise allocate one.
947 	 */
948 	MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
949 		const struct mlx5_priv *opriv =
950 			rte_eth_devices[port_id].data->dev_private;
951 
952 		if (!opriv ||
953 		    opriv->sh != priv->sh ||
954 			opriv->domain_id ==
955 			RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
956 			continue;
957 		priv->domain_id = opriv->domain_id;
958 		break;
959 	}
960 	if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
961 		err = rte_eth_switch_domain_alloc(&priv->domain_id);
962 		if (err) {
963 			err = rte_errno;
964 			DRV_LOG(ERR, "unable to allocate switch domain: %s",
965 				strerror(rte_errno));
966 			goto error;
967 		}
968 		own_domain_id = 1;
969 	}
970 	/* Override some values set by hardware configuration. */
971 	mlx5_args(config, dpdk_dev->devargs);
972 	err = mlx5_dev_check_sibling_config(priv, config);
973 	if (err)
974 		goto error;
975 	config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
976 			    IBV_DEVICE_RAW_IP_CSUM);
977 	DRV_LOG(DEBUG, "checksum offloading is %ssupported",
978 		(config->hw_csum ? "" : "not "));
979 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
980 	!defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
981 	DRV_LOG(DEBUG, "counters are not supported");
982 #endif
983 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
984 	if (config->dv_flow_en) {
985 		DRV_LOG(WARNING, "DV flow is not supported");
986 		config->dv_flow_en = 0;
987 	}
988 #endif
989 	config->ind_table_max_size =
990 		sh->device_attr.max_rwq_indirection_table_size;
991 	/*
992 	 * Remove this check once DPDK supports larger/variable
993 	 * indirection tables.
994 	 */
995 	if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
996 		config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
997 	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
998 		config->ind_table_max_size);
999 	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1000 				  IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1001 	DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1002 		(config->hw_vlan_strip ? "" : "not "));
1003 	config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1004 				 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1005 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1006 	hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1007 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1008 	hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1009 			IBV_DEVICE_PCI_WRITE_END_PADDING);
1010 #endif
1011 	if (config->hw_padding && !hw_padding) {
1012 		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1013 		config->hw_padding = 0;
1014 	} else if (config->hw_padding) {
1015 		DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1016 	}
1017 	config->tso = (sh->device_attr.max_tso > 0 &&
1018 		      (sh->device_attr.tso_supported_qpts &
1019 		       (1 << IBV_QPT_RAW_PACKET)));
1020 	if (config->tso)
1021 		config->tso_max_payload_sz = sh->device_attr.max_tso;
1022 	/*
1023 	 * MPW is disabled by default, while the Enhanced MPW is enabled
1024 	 * by default.
1025 	 */
1026 	if (config->mps == MLX5_ARG_UNSET)
1027 		config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1028 							  MLX5_MPW_DISABLED;
1029 	else
1030 		config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
1031 	DRV_LOG(INFO, "%sMPS is %s",
1032 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
1033 		config->mps == MLX5_MPW ? "legacy " : "",
1034 		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1035 	if (config->cqe_comp && !cqe_comp) {
1036 		DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1037 		config->cqe_comp = 0;
1038 	}
1039 	if (config->cqe_pad && !cqe_pad) {
1040 		DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1041 		config->cqe_pad = 0;
1042 	} else if (config->cqe_pad) {
1043 		DRV_LOG(INFO, "Rx CQE padding is enabled");
1044 	}
1045 	if (config->devx) {
1046 		priv->counter_fallback = 0;
1047 		err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
1048 		if (err) {
1049 			err = -err;
1050 			goto error;
1051 		}
1052 		if (!config->hca_attr.flow_counters_dump)
1053 			priv->counter_fallback = 1;
1054 #ifndef HAVE_IBV_DEVX_ASYNC
1055 		priv->counter_fallback = 1;
1056 #endif
1057 		if (priv->counter_fallback)
1058 			DRV_LOG(INFO, "Use fall-back DV counter management");
1059 		/* Check for LRO support. */
1060 		if (config->dest_tir && config->hca_attr.lro_cap &&
1061 		    config->dv_flow_en) {
1062 			/* TBD check tunnel lro caps. */
1063 			config->lro.supported = config->hca_attr.lro_cap;
1064 			DRV_LOG(DEBUG, "Device supports LRO");
1065 			/*
1066 			 * If LRO timeout is not configured by application,
1067 			 * use the minimal supported value.
1068 			 */
1069 			if (!config->lro.timeout)
1070 				config->lro.timeout =
1071 				config->hca_attr.lro_timer_supported_periods[0];
1072 			DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1073 				config->lro.timeout);
1074 		}
1075 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
1076 		if (config->hca_attr.qos.sup &&
1077 		    config->hca_attr.qos.srtcm_sup &&
1078 		    config->dv_flow_en) {
1079 			uint8_t reg_c_mask =
1080 				config->hca_attr.qos.flow_meter_reg_c_ids;
1081 			/*
1082 			 * Meter needs two REG_C's for color match and pre-sfx
1083 			 * flow match. Here get the REG_C for color match.
1084 			 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1085 			 */
1086 			reg_c_mask &= 0xfc;
1087 			if (__builtin_popcount(reg_c_mask) < 1) {
1088 				priv->mtr_en = 0;
1089 				DRV_LOG(WARNING, "No available register for"
1090 					" meter.");
1091 			} else {
1092 				priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
1093 						      REG_C_0;
1094 				priv->mtr_en = 1;
1095 				priv->mtr_reg_share =
1096 				      config->hca_attr.qos.flow_meter_reg_share;
1097 				DRV_LOG(DEBUG, "The REG_C meter uses is %d",
1098 					priv->mtr_color_reg);
1099 			}
1100 		}
1101 #endif
1102 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1103 		if (config->hca_attr.log_max_ft_sampler_num > 0  &&
1104 		    config->dv_flow_en) {
1105 			priv->sampler_en = 1;
1106 			DRV_LOG(DEBUG, "The Sampler enabled!\n");
1107 		} else {
1108 			priv->sampler_en = 0;
1109 			if (!config->hca_attr.log_max_ft_sampler_num)
1110 				DRV_LOG(WARNING, "No available register for"
1111 						" Sampler.");
1112 			else
1113 				DRV_LOG(DEBUG, "DV flow is not supported!\n");
1114 		}
1115 #endif
1116 	}
1117 	if (config->tx_pp) {
1118 		DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
1119 			config->hca_attr.dev_freq_khz);
1120 		DRV_LOG(DEBUG, "Packet pacing is %ssupported",
1121 			config->hca_attr.qos.packet_pacing ? "" : "not ");
1122 		DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
1123 			config->hca_attr.cross_channel ? "" : "not ");
1124 		DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1125 			config->hca_attr.wqe_index_ignore ? "" : "not ");
1126 		DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1127 			config->hca_attr.non_wire_sq ? "" : "not ");
1128 		DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1129 			config->hca_attr.log_max_static_sq_wq ? "" : "not ",
1130 			config->hca_attr.log_max_static_sq_wq);
1131 		DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1132 			config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
1133 		if (!config->devx) {
1134 			DRV_LOG(ERR, "DevX is required for packet pacing");
1135 			err = ENODEV;
1136 			goto error;
1137 		}
1138 		if (!config->hca_attr.qos.packet_pacing) {
1139 			DRV_LOG(ERR, "Packet pacing is not supported");
1140 			err = ENODEV;
1141 			goto error;
1142 		}
1143 		if (!config->hca_attr.cross_channel) {
1144 			DRV_LOG(ERR, "Cross channel operations are"
1145 				     " required for packet pacing");
1146 			err = ENODEV;
1147 			goto error;
1148 		}
1149 		if (!config->hca_attr.wqe_index_ignore) {
1150 			DRV_LOG(ERR, "WQE index ignore feature is"
1151 				     " required for packet pacing");
1152 			err = ENODEV;
1153 			goto error;
1154 		}
1155 		if (!config->hca_attr.non_wire_sq) {
1156 			DRV_LOG(ERR, "Non-wire SQ feature is"
1157 				     " required for packet pacing");
1158 			err = ENODEV;
1159 			goto error;
1160 		}
1161 		if (!config->hca_attr.log_max_static_sq_wq) {
1162 			DRV_LOG(ERR, "Static WQE SQ feature is"
1163 				     " required for packet pacing");
1164 			err = ENODEV;
1165 			goto error;
1166 		}
1167 		if (!config->hca_attr.qos.wqe_rate_pp) {
1168 			DRV_LOG(ERR, "WQE rate mode is required"
1169 				     " for packet pacing");
1170 			err = ENODEV;
1171 			goto error;
1172 		}
1173 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1174 		DRV_LOG(ERR, "DevX does not provide UAR offset,"
1175 			     " can't create queues for packet pacing");
1176 		err = ENODEV;
1177 		goto error;
1178 #endif
1179 	}
1180 	if (config->devx) {
1181 		uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1182 
1183 		err = config->hca_attr.access_register_user ?
1184 			mlx5_devx_cmd_register_read
1185 				(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1186 				reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
1187 		if (!err) {
1188 			uint32_t ts_mode;
1189 
1190 			/* MTUTC register is read successfully. */
1191 			ts_mode = MLX5_GET(register_mtutc, reg,
1192 					   time_stamp_mode);
1193 			if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1194 				config->rt_timestamp = 1;
1195 		} else {
1196 			/* Kernel does not support register reading. */
1197 			if (config->hca_attr.dev_freq_khz ==
1198 						 (NS_PER_S / MS_PER_S))
1199 				config->rt_timestamp = 1;
1200 		}
1201 	}
1202 	/*
1203 	 * If HW has bug working with tunnel packet decapsulation and
1204 	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1205 	 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1206 	 */
1207 	if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
1208 		config->hw_fcs_strip = 0;
1209 	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1210 		(config->hw_fcs_strip ? "" : "not "));
1211 	if (config->mprq.enabled && mprq) {
1212 		if (config->mprq.stride_num_n &&
1213 		    (config->mprq.stride_num_n > mprq_max_stride_num_n ||
1214 		     config->mprq.stride_num_n < mprq_min_stride_num_n)) {
1215 			config->mprq.stride_num_n =
1216 				RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1217 						mprq_min_stride_num_n),
1218 					mprq_max_stride_num_n);
1219 			DRV_LOG(WARNING,
1220 				"the number of strides"
1221 				" for Multi-Packet RQ is out of range,"
1222 				" setting default value (%u)",
1223 				1 << config->mprq.stride_num_n);
1224 		}
1225 		if (config->mprq.stride_size_n &&
1226 		    (config->mprq.stride_size_n > mprq_max_stride_size_n ||
1227 		     config->mprq.stride_size_n < mprq_min_stride_size_n)) {
1228 			config->mprq.stride_size_n =
1229 				RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
1230 						mprq_min_stride_size_n),
1231 					mprq_max_stride_size_n);
1232 			DRV_LOG(WARNING,
1233 				"the size of a stride"
1234 				" for Multi-Packet RQ is out of range,"
1235 				" setting default value (%u)",
1236 				1 << config->mprq.stride_size_n);
1237 		}
1238 		config->mprq.min_stride_size_n = mprq_min_stride_size_n;
1239 		config->mprq.max_stride_size_n = mprq_max_stride_size_n;
1240 	} else if (config->mprq.enabled && !mprq) {
1241 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1242 		config->mprq.enabled = 0;
1243 	}
1244 	if (config->max_dump_files_num == 0)
1245 		config->max_dump_files_num = 128;
1246 	eth_dev = rte_eth_dev_allocate(name);
1247 	if (eth_dev == NULL) {
1248 		DRV_LOG(ERR, "can not allocate rte ethdev");
1249 		err = ENOMEM;
1250 		goto error;
1251 	}
1252 	if (priv->representor) {
1253 		eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1254 		eth_dev->data->representor_id = priv->representor_id;
1255 	}
1256 	/*
1257 	 * Store associated network device interface index. This index
1258 	 * is permanent throughout the lifetime of device. So, we may store
1259 	 * the ifindex here and use the cached value further.
1260 	 */
1261 	MLX5_ASSERT(spawn->ifindex);
1262 	priv->if_index = spawn->ifindex;
1263 	if (priv->pf_bond >= 0 && priv->master) {
1264 		/* Get bond interface info */
1265 		err = mlx5_sysfs_bond_info(priv->if_index,
1266 				     &priv->bond_ifindex,
1267 				     priv->bond_name);
1268 		if (err)
1269 			DRV_LOG(ERR, "unable to get bond info: %s",
1270 				strerror(rte_errno));
1271 		else
1272 			DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
1273 				priv->if_index, priv->bond_ifindex,
1274 				priv->bond_name);
1275 	}
1276 	eth_dev->data->dev_private = priv;
1277 	priv->dev_data = eth_dev->data;
1278 	eth_dev->data->mac_addrs = priv->mac;
1279 	eth_dev->device = dpdk_dev;
1280 	/* Configure the first MAC address by default. */
1281 	if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1282 		DRV_LOG(ERR,
1283 			"port %u cannot get MAC address, is mlx5_en"
1284 			" loaded? (errno: %s)",
1285 			eth_dev->data->port_id, strerror(rte_errno));
1286 		err = ENODEV;
1287 		goto error;
1288 	}
1289 	DRV_LOG(INFO,
1290 		"port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1291 		eth_dev->data->port_id,
1292 		mac.addr_bytes[0], mac.addr_bytes[1],
1293 		mac.addr_bytes[2], mac.addr_bytes[3],
1294 		mac.addr_bytes[4], mac.addr_bytes[5]);
1295 #ifdef RTE_LIBRTE_MLX5_DEBUG
1296 	{
1297 		char ifname[IF_NAMESIZE];
1298 
1299 		if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1300 			DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1301 				eth_dev->data->port_id, ifname);
1302 		else
1303 			DRV_LOG(DEBUG, "port %u ifname is unknown",
1304 				eth_dev->data->port_id);
1305 	}
1306 #endif
1307 	/* Get actual MTU if possible. */
1308 	err = mlx5_get_mtu(eth_dev, &priv->mtu);
1309 	if (err) {
1310 		err = rte_errno;
1311 		goto error;
1312 	}
1313 	DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1314 		priv->mtu);
1315 	/* Initialize burst functions to prevent crashes before link-up. */
1316 	eth_dev->rx_pkt_burst = removed_rx_burst;
1317 	eth_dev->tx_pkt_burst = removed_tx_burst;
1318 	eth_dev->dev_ops = &mlx5_os_dev_ops;
1319 	eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1320 	eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1321 	eth_dev->rx_queue_count = mlx5_rx_queue_count;
1322 	/* Register MAC address. */
1323 	claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1324 	if (config->vf && config->vf_nl_en)
1325 		mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1326 				      mlx5_ifindex(eth_dev),
1327 				      eth_dev->data->mac_addrs,
1328 				      MLX5_MAX_MAC_ADDRESSES);
1329 	priv->flows = 0;
1330 	priv->ctrl_flows = 0;
1331 	TAILQ_INIT(&priv->flow_meters);
1332 	TAILQ_INIT(&priv->flow_meter_profiles);
1333 	/* Hint libmlx5 to use PMD allocator for data plane resources */
1334 	mlx5_glue->dv_set_context_attr(sh->ctx,
1335 			MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1336 			(void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1337 				.alloc = &mlx5_alloc_verbs_buf,
1338 				.free = &mlx5_free_verbs_buf,
1339 				.data = priv,
1340 			}));
1341 	/* Bring Ethernet device up. */
1342 	DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1343 		eth_dev->data->port_id);
1344 	mlx5_set_link_up(eth_dev);
1345 	/*
1346 	 * Even though the interrupt handler is not installed yet,
1347 	 * interrupts will still trigger on the async_fd from
1348 	 * Verbs context returned by ibv_open_device().
1349 	 */
1350 	mlx5_link_update(eth_dev, 0);
1351 #ifdef HAVE_MLX5DV_DR_ESWITCH
1352 	if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
1353 	      (switch_info->representor || switch_info->master)))
1354 		config->dv_esw_en = 0;
1355 #else
1356 	config->dv_esw_en = 0;
1357 #endif
1358 	/* Detect minimal data bytes to inline. */
1359 	mlx5_set_min_inline(spawn, config);
1360 	/* Store device configuration on private structure. */
1361 	priv->config = *config;
1362 	/* Create context for virtual machine VLAN workaround. */
1363 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1364 	if (config->dv_flow_en) {
1365 		err = mlx5_alloc_shared_dr(priv);
1366 		if (err)
1367 			goto error;
1368 		/*
1369 		 * RSS id is shared with meter flow id. Meter flow id can only
1370 		 * use the 24 MSB of the register.
1371 		 */
1372 		priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
1373 				     MLX5_MTR_COLOR_BITS);
1374 		if (!priv->qrss_id_pool) {
1375 			DRV_LOG(ERR, "can't create flow id pool");
1376 			err = ENOMEM;
1377 			goto error;
1378 		}
1379 	}
1380 	/*
1381 	 * Initialize the dev_ops structure with DevX/Verbs function pointers.
1382 	 * When DevX is supported and both DV flow and dest tir are enabled, all
1383 	 * Rx functions use DevX API (except for drop that has not yet been
1384 	 * implemented in DevX).
1385 	 */
1386 	if (config->devx && config->dv_flow_en && config->dest_tir) {
1387 		priv->obj_ops = devx_obj_ops;
1388 		priv->obj_ops.drop_action_create =
1389 						ibv_obj_ops.drop_action_create;
1390 		priv->obj_ops.drop_action_destroy =
1391 						ibv_obj_ops.drop_action_destroy;
1392 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1393 		priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
1394 #else
1395 		if (!config->dv_esw_en)
1396 			priv->obj_ops.txq_obj_modify =
1397 						ibv_obj_ops.txq_obj_modify;
1398 #endif
1399 	} else {
1400 		priv->obj_ops = ibv_obj_ops;
1401 	}
1402 	/* The Tx objects are managed by a specific linux wrapper functions. */
1403 	priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
1404 	priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
1405 	/* Supported Verbs flow priority number detection. */
1406 	err = mlx5_flow_discover_priorities(eth_dev);
1407 	if (err < 0) {
1408 		err = -err;
1409 		goto error;
1410 	}
1411 	priv->config.flow_prio = err;
1412 	if (!priv->config.dv_esw_en &&
1413 	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1414 		DRV_LOG(WARNING, "metadata mode %u is not supported "
1415 				 "(no E-Switch)", priv->config.dv_xmeta_en);
1416 		priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1417 	}
1418 	mlx5_set_metadata_mask(eth_dev);
1419 	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1420 	    !priv->sh->dv_regc0_mask) {
1421 		DRV_LOG(ERR, "metadata mode %u is not supported "
1422 			     "(no metadata reg_c[0] is available)",
1423 			     priv->config.dv_xmeta_en);
1424 			err = ENOTSUP;
1425 			goto error;
1426 	}
1427 	/*
1428 	 * Allocate the buffer for flow creating, just once.
1429 	 * The allocation must be done before any flow creating.
1430 	 */
1431 	mlx5_flow_alloc_intermediate(eth_dev);
1432 	/* Query availability of metadata reg_c's. */
1433 	err = mlx5_flow_discover_mreg_c(eth_dev);
1434 	if (err < 0) {
1435 		err = -err;
1436 		goto error;
1437 	}
1438 	if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1439 		DRV_LOG(DEBUG,
1440 			"port %u extensive metadata register is not supported",
1441 			eth_dev->data->port_id);
1442 		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1443 			DRV_LOG(ERR, "metadata mode %u is not supported "
1444 				     "(no metadata registers available)",
1445 				     priv->config.dv_xmeta_en);
1446 			err = ENOTSUP;
1447 			goto error;
1448 		}
1449 	}
1450 	if (priv->config.dv_flow_en &&
1451 	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1452 	    mlx5_flow_ext_mreg_supported(eth_dev) &&
1453 	    priv->sh->dv_regc0_mask) {
1454 		priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1455 						      MLX5_FLOW_MREG_HTABLE_SZ);
1456 		if (!priv->mreg_cp_tbl) {
1457 			err = ENOMEM;
1458 			goto error;
1459 		}
1460 	}
1461 	return eth_dev;
1462 error:
1463 	if (priv) {
1464 		if (priv->mreg_cp_tbl)
1465 			mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
1466 		if (priv->sh)
1467 			mlx5_os_free_shared_dr(priv);
1468 		if (priv->nl_socket_route >= 0)
1469 			close(priv->nl_socket_route);
1470 		if (priv->nl_socket_rdma >= 0)
1471 			close(priv->nl_socket_rdma);
1472 		if (priv->vmwa_context)
1473 			mlx5_vlan_vmwa_exit(priv->vmwa_context);
1474 		if (priv->qrss_id_pool)
1475 			mlx5_flow_id_pool_release(priv->qrss_id_pool);
1476 		if (own_domain_id)
1477 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1478 		mlx5_free(priv);
1479 		if (eth_dev != NULL)
1480 			eth_dev->data->dev_private = NULL;
1481 	}
1482 	if (eth_dev != NULL) {
1483 		/* mac_addrs must not be freed alone because part of
1484 		 * dev_private
1485 		 **/
1486 		eth_dev->data->mac_addrs = NULL;
1487 		rte_eth_dev_release_port(eth_dev);
1488 	}
1489 	if (sh)
1490 		mlx5_free_shared_dev_ctx(sh);
1491 	MLX5_ASSERT(err > 0);
1492 	rte_errno = err;
1493 	return NULL;
1494 }
1495 
1496 /**
1497  * Comparison callback to sort device data.
1498  *
1499  * This is meant to be used with qsort().
1500  *
1501  * @param a[in]
1502  *   Pointer to pointer to first data object.
1503  * @param b[in]
1504  *   Pointer to pointer to second data object.
1505  *
1506  * @return
1507  *   0 if both objects are equal, less than 0 if the first argument is less
1508  *   than the second, greater than 0 otherwise.
1509  */
1510 static int
1511 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1512 {
1513 	const struct mlx5_switch_info *si_a =
1514 		&((const struct mlx5_dev_spawn_data *)a)->info;
1515 	const struct mlx5_switch_info *si_b =
1516 		&((const struct mlx5_dev_spawn_data *)b)->info;
1517 	int ret;
1518 
1519 	/* Master device first. */
1520 	ret = si_b->master - si_a->master;
1521 	if (ret)
1522 		return ret;
1523 	/* Then representor devices. */
1524 	ret = si_b->representor - si_a->representor;
1525 	if (ret)
1526 		return ret;
1527 	/* Unidentified devices come last in no specific order. */
1528 	if (!si_a->representor)
1529 		return 0;
1530 	/* Order representors by name. */
1531 	return si_a->port_name - si_b->port_name;
1532 }
1533 
1534 /**
1535  * Match PCI information for possible slaves of bonding device.
1536  *
1537  * @param[in] ibv_dev
1538  *   Pointer to Infiniband device structure.
1539  * @param[in] pci_dev
1540  *   Pointer to PCI device structure to match PCI address.
1541  * @param[in] nl_rdma
1542  *   Netlink RDMA group socket handle.
1543  *
1544  * @return
1545  *   negative value if no bonding device found, otherwise
1546  *   positive index of slave PF in bonding.
1547  */
1548 static int
1549 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1550 			   const struct rte_pci_device *pci_dev,
1551 			   int nl_rdma)
1552 {
1553 	char ifname[IF_NAMESIZE + 1];
1554 	unsigned int ifindex;
1555 	unsigned int np, i;
1556 	FILE *file = NULL;
1557 	int pf = -1;
1558 
1559 	/*
1560 	 * Try to get master device name. If something goes
1561 	 * wrong suppose the lack of kernel support and no
1562 	 * bonding devices.
1563 	 */
1564 	if (nl_rdma < 0)
1565 		return -1;
1566 	if (!strstr(ibv_dev->name, "bond"))
1567 		return -1;
1568 	np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1569 	if (!np)
1570 		return -1;
1571 	/*
1572 	 * The Master device might not be on the predefined
1573 	 * port (not on port index 1, it is not garanted),
1574 	 * we have to scan all Infiniband device port and
1575 	 * find master.
1576 	 */
1577 	for (i = 1; i <= np; ++i) {
1578 		/* Check whether Infiniband port is populated. */
1579 		ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1580 		if (!ifindex)
1581 			continue;
1582 		if (!if_indextoname(ifindex, ifname))
1583 			continue;
1584 		/* Try to read bonding slave names from sysfs. */
1585 		MKSTR(slaves,
1586 		      "/sys/class/net/%s/master/bonding/slaves", ifname);
1587 		file = fopen(slaves, "r");
1588 		if (file)
1589 			break;
1590 	}
1591 	if (!file)
1592 		return -1;
1593 	/* Use safe format to check maximal buffer length. */
1594 	MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1595 	while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1596 		char tmp_str[IF_NAMESIZE + 32];
1597 		struct rte_pci_addr pci_addr;
1598 		struct mlx5_switch_info	info;
1599 
1600 		/* Process slave interface names in the loop. */
1601 		snprintf(tmp_str, sizeof(tmp_str),
1602 			 "/sys/class/net/%s", ifname);
1603 		if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1604 			DRV_LOG(WARNING, "can not get PCI address"
1605 					 " for netdev \"%s\"", ifname);
1606 			continue;
1607 		}
1608 		if (pci_dev->addr.domain != pci_addr.domain ||
1609 		    pci_dev->addr.bus != pci_addr.bus ||
1610 		    pci_dev->addr.devid != pci_addr.devid ||
1611 		    pci_dev->addr.function != pci_addr.function)
1612 			continue;
1613 		/* Slave interface PCI address match found. */
1614 		fclose(file);
1615 		snprintf(tmp_str, sizeof(tmp_str),
1616 			 "/sys/class/net/%s/phys_port_name", ifname);
1617 		file = fopen(tmp_str, "rb");
1618 		if (!file)
1619 			break;
1620 		info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1621 		if (fscanf(file, "%32s", tmp_str) == 1)
1622 			mlx5_translate_port_name(tmp_str, &info);
1623 		if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
1624 		    info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1625 			pf = info.port_name;
1626 		break;
1627 	}
1628 	if (file)
1629 		fclose(file);
1630 	return pf;
1631 }
1632 
1633 /**
1634  * DPDK callback to register a PCI device.
1635  *
1636  * This function spawns Ethernet devices out of a given PCI device.
1637  *
1638  * @param[in] pci_drv
1639  *   PCI driver structure (mlx5_driver).
1640  * @param[in] pci_dev
1641  *   PCI device information.
1642  *
1643  * @return
1644  *   0 on success, a negative errno value otherwise and rte_errno is set.
1645  */
1646 int
1647 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1648 		  struct rte_pci_device *pci_dev)
1649 {
1650 	struct ibv_device **ibv_list;
1651 	/*
1652 	 * Number of found IB Devices matching with requested PCI BDF.
1653 	 * nd != 1 means there are multiple IB devices over the same
1654 	 * PCI device and we have representors and master.
1655 	 */
1656 	unsigned int nd = 0;
1657 	/*
1658 	 * Number of found IB device Ports. nd = 1 and np = 1..n means
1659 	 * we have the single multiport IB device, and there may be
1660 	 * representors attached to some of found ports.
1661 	 */
1662 	unsigned int np = 0;
1663 	/*
1664 	 * Number of DPDK ethernet devices to Spawn - either over
1665 	 * multiple IB devices or multiple ports of single IB device.
1666 	 * Actually this is the number of iterations to spawn.
1667 	 */
1668 	unsigned int ns = 0;
1669 	/*
1670 	 * Bonding device
1671 	 *   < 0 - no bonding device (single one)
1672 	 *  >= 0 - bonding device (value is slave PF index)
1673 	 */
1674 	int bd = -1;
1675 	struct mlx5_dev_spawn_data *list = NULL;
1676 	struct mlx5_dev_config dev_config;
1677 	unsigned int dev_config_vf;
1678 	int ret;
1679 
1680 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1681 		mlx5_pmd_socket_init();
1682 	ret = mlx5_init_once();
1683 	if (ret) {
1684 		DRV_LOG(ERR, "unable to init PMD global data: %s",
1685 			strerror(rte_errno));
1686 		return -rte_errno;
1687 	}
1688 	errno = 0;
1689 	ibv_list = mlx5_glue->get_device_list(&ret);
1690 	if (!ibv_list) {
1691 		rte_errno = errno ? errno : ENOSYS;
1692 		DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1693 		return -rte_errno;
1694 	}
1695 	/*
1696 	 * First scan the list of all Infiniband devices to find
1697 	 * matching ones, gathering into the list.
1698 	 */
1699 	struct ibv_device *ibv_match[ret + 1];
1700 	int nl_route = mlx5_nl_init(NETLINK_ROUTE);
1701 	int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1702 	unsigned int i;
1703 
1704 	while (ret-- > 0) {
1705 		struct rte_pci_addr pci_addr;
1706 
1707 		DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1708 		bd = mlx5_device_bond_pci_match
1709 				(ibv_list[ret], pci_dev, nl_rdma);
1710 		if (bd >= 0) {
1711 			/*
1712 			 * Bonding device detected. Only one match is allowed,
1713 			 * the bonding is supported over multi-port IB device,
1714 			 * there should be no matches on representor PCI
1715 			 * functions or non VF LAG bonding devices with
1716 			 * specified address.
1717 			 */
1718 			if (nd) {
1719 				DRV_LOG(ERR,
1720 					"multiple PCI match on bonding device"
1721 					"\"%s\" found", ibv_list[ret]->name);
1722 				rte_errno = ENOENT;
1723 				ret = -rte_errno;
1724 				goto exit;
1725 			}
1726 			DRV_LOG(INFO, "PCI information matches for"
1727 				      " slave %d bonding device \"%s\"",
1728 				      bd, ibv_list[ret]->name);
1729 			ibv_match[nd++] = ibv_list[ret];
1730 			break;
1731 		}
1732 		if (mlx5_dev_to_pci_addr
1733 			(ibv_list[ret]->ibdev_path, &pci_addr))
1734 			continue;
1735 		if (pci_dev->addr.domain != pci_addr.domain ||
1736 		    pci_dev->addr.bus != pci_addr.bus ||
1737 		    pci_dev->addr.devid != pci_addr.devid ||
1738 		    pci_dev->addr.function != pci_addr.function)
1739 			continue;
1740 		DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1741 			ibv_list[ret]->name);
1742 		ibv_match[nd++] = ibv_list[ret];
1743 	}
1744 	ibv_match[nd] = NULL;
1745 	if (!nd) {
1746 		/* No device matches, just complain and bail out. */
1747 		DRV_LOG(WARNING,
1748 			"no Verbs device matches PCI device " PCI_PRI_FMT ","
1749 			" are kernel drivers loaded?",
1750 			pci_dev->addr.domain, pci_dev->addr.bus,
1751 			pci_dev->addr.devid, pci_dev->addr.function);
1752 		rte_errno = ENOENT;
1753 		ret = -rte_errno;
1754 		goto exit;
1755 	}
1756 	if (nd == 1) {
1757 		/*
1758 		 * Found single matching device may have multiple ports.
1759 		 * Each port may be representor, we have to check the port
1760 		 * number and check the representors existence.
1761 		 */
1762 		if (nl_rdma >= 0)
1763 			np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1764 		if (!np)
1765 			DRV_LOG(WARNING, "can not get IB device \"%s\""
1766 					 " ports number", ibv_match[0]->name);
1767 		if (bd >= 0 && !np) {
1768 			DRV_LOG(ERR, "can not get ports"
1769 				     " for bonding device");
1770 			rte_errno = ENOENT;
1771 			ret = -rte_errno;
1772 			goto exit;
1773 		}
1774 	}
1775 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
1776 	if (bd >= 0) {
1777 		/*
1778 		 * This may happen if there is VF LAG kernel support and
1779 		 * application is compiled with older rdma_core library.
1780 		 */
1781 		DRV_LOG(ERR,
1782 			"No kernel/verbs support for VF LAG bonding found.");
1783 		rte_errno = ENOTSUP;
1784 		ret = -rte_errno;
1785 		goto exit;
1786 	}
1787 #endif
1788 	/*
1789 	 * Now we can determine the maximal
1790 	 * amount of devices to be spawned.
1791 	 */
1792 	list = mlx5_malloc(MLX5_MEM_ZERO,
1793 			   sizeof(struct mlx5_dev_spawn_data) *
1794 			   (np ? np : nd),
1795 			   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1796 	if (!list) {
1797 		DRV_LOG(ERR, "spawn data array allocation failure");
1798 		rte_errno = ENOMEM;
1799 		ret = -rte_errno;
1800 		goto exit;
1801 	}
1802 	if (bd >= 0 || np > 1) {
1803 		/*
1804 		 * Single IB device with multiple ports found,
1805 		 * it may be E-Switch master device and representors.
1806 		 * We have to perform identification through the ports.
1807 		 */
1808 		MLX5_ASSERT(nl_rdma >= 0);
1809 		MLX5_ASSERT(ns == 0);
1810 		MLX5_ASSERT(nd == 1);
1811 		MLX5_ASSERT(np);
1812 		for (i = 1; i <= np; ++i) {
1813 			list[ns].max_port = np;
1814 			list[ns].phys_port = i;
1815 			list[ns].phys_dev = ibv_match[0];
1816 			list[ns].eth_dev = NULL;
1817 			list[ns].pci_dev = pci_dev;
1818 			list[ns].pf_bond = bd;
1819 			list[ns].ifindex = mlx5_nl_ifindex
1820 				(nl_rdma,
1821 				mlx5_os_get_dev_device_name
1822 						(list[ns].phys_dev), i);
1823 			if (!list[ns].ifindex) {
1824 				/*
1825 				 * No network interface index found for the
1826 				 * specified port, it means there is no
1827 				 * representor on this port. It's OK,
1828 				 * there can be disabled ports, for example
1829 				 * if sriov_numvfs < sriov_totalvfs.
1830 				 */
1831 				continue;
1832 			}
1833 			ret = -1;
1834 			if (nl_route >= 0)
1835 				ret = mlx5_nl_switch_info
1836 					       (nl_route,
1837 						list[ns].ifindex,
1838 						&list[ns].info);
1839 			if (ret || (!list[ns].info.representor &&
1840 				    !list[ns].info.master)) {
1841 				/*
1842 				 * We failed to recognize representors with
1843 				 * Netlink, let's try to perform the task
1844 				 * with sysfs.
1845 				 */
1846 				ret =  mlx5_sysfs_switch_info
1847 						(list[ns].ifindex,
1848 						 &list[ns].info);
1849 			}
1850 			if (!ret && bd >= 0) {
1851 				switch (list[ns].info.name_type) {
1852 				case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
1853 					if (list[ns].info.port_name == bd)
1854 						ns++;
1855 					break;
1856 				case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
1857 					/* Fallthrough */
1858 				case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
1859 					if (list[ns].info.pf_num == bd)
1860 						ns++;
1861 					break;
1862 				default:
1863 					break;
1864 				}
1865 				continue;
1866 			}
1867 			if (!ret && (list[ns].info.representor ^
1868 				     list[ns].info.master))
1869 				ns++;
1870 		}
1871 		if (!ns) {
1872 			DRV_LOG(ERR,
1873 				"unable to recognize master/representors"
1874 				" on the IB device with multiple ports");
1875 			rte_errno = ENOENT;
1876 			ret = -rte_errno;
1877 			goto exit;
1878 		}
1879 	} else {
1880 		/*
1881 		 * The existence of several matching entries (nd > 1) means
1882 		 * port representors have been instantiated. No existing Verbs
1883 		 * call nor sysfs entries can tell them apart, this can only
1884 		 * be done through Netlink calls assuming kernel drivers are
1885 		 * recent enough to support them.
1886 		 *
1887 		 * In the event of identification failure through Netlink,
1888 		 * try again through sysfs, then:
1889 		 *
1890 		 * 1. A single IB device matches (nd == 1) with single
1891 		 *    port (np=0/1) and is not a representor, assume
1892 		 *    no switch support.
1893 		 *
1894 		 * 2. Otherwise no safe assumptions can be made;
1895 		 *    complain louder and bail out.
1896 		 */
1897 		for (i = 0; i != nd; ++i) {
1898 			memset(&list[ns].info, 0, sizeof(list[ns].info));
1899 			list[ns].max_port = 1;
1900 			list[ns].phys_port = 1;
1901 			list[ns].phys_dev = ibv_match[i];
1902 			list[ns].eth_dev = NULL;
1903 			list[ns].pci_dev = pci_dev;
1904 			list[ns].pf_bond = -1;
1905 			list[ns].ifindex = 0;
1906 			if (nl_rdma >= 0)
1907 				list[ns].ifindex = mlx5_nl_ifindex
1908 				(nl_rdma,
1909 				mlx5_os_get_dev_device_name
1910 						(list[ns].phys_dev), 1);
1911 			if (!list[ns].ifindex) {
1912 				char ifname[IF_NAMESIZE];
1913 
1914 				/*
1915 				 * Netlink failed, it may happen with old
1916 				 * ib_core kernel driver (before 4.16).
1917 				 * We can assume there is old driver because
1918 				 * here we are processing single ports IB
1919 				 * devices. Let's try sysfs to retrieve
1920 				 * the ifindex. The method works for
1921 				 * master device only.
1922 				 */
1923 				if (nd > 1) {
1924 					/*
1925 					 * Multiple devices found, assume
1926 					 * representors, can not distinguish
1927 					 * master/representor and retrieve
1928 					 * ifindex via sysfs.
1929 					 */
1930 					continue;
1931 				}
1932 				ret = mlx5_get_ifname_sysfs
1933 					(ibv_match[i]->ibdev_path, ifname);
1934 				if (!ret)
1935 					list[ns].ifindex =
1936 						if_nametoindex(ifname);
1937 				if (!list[ns].ifindex) {
1938 					/*
1939 					 * No network interface index found
1940 					 * for the specified device, it means
1941 					 * there it is neither representor
1942 					 * nor master.
1943 					 */
1944 					continue;
1945 				}
1946 			}
1947 			ret = -1;
1948 			if (nl_route >= 0)
1949 				ret = mlx5_nl_switch_info
1950 					       (nl_route,
1951 						list[ns].ifindex,
1952 						&list[ns].info);
1953 			if (ret || (!list[ns].info.representor &&
1954 				    !list[ns].info.master)) {
1955 				/*
1956 				 * We failed to recognize representors with
1957 				 * Netlink, let's try to perform the task
1958 				 * with sysfs.
1959 				 */
1960 				ret =  mlx5_sysfs_switch_info
1961 						(list[ns].ifindex,
1962 						 &list[ns].info);
1963 			}
1964 			if (!ret && (list[ns].info.representor ^
1965 				     list[ns].info.master)) {
1966 				ns++;
1967 			} else if ((nd == 1) &&
1968 				   !list[ns].info.representor &&
1969 				   !list[ns].info.master) {
1970 				/*
1971 				 * Single IB device with
1972 				 * one physical port and
1973 				 * attached network device.
1974 				 * May be SRIOV is not enabled
1975 				 * or there is no representors.
1976 				 */
1977 				DRV_LOG(INFO, "no E-Switch support detected");
1978 				ns++;
1979 				break;
1980 			}
1981 		}
1982 		if (!ns) {
1983 			DRV_LOG(ERR,
1984 				"unable to recognize master/representors"
1985 				" on the multiple IB devices");
1986 			rte_errno = ENOENT;
1987 			ret = -rte_errno;
1988 			goto exit;
1989 		}
1990 	}
1991 	MLX5_ASSERT(ns);
1992 	/*
1993 	 * Sort list to probe devices in natural order for users convenience
1994 	 * (i.e. master first, then representors from lowest to highest ID).
1995 	 */
1996 	qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
1997 	/* Device specific configuration. */
1998 	switch (pci_dev->id.device_id) {
1999 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2000 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2001 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2002 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2003 	case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2004 	case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2005 	case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
2006 		dev_config_vf = 1;
2007 		break;
2008 	default:
2009 		dev_config_vf = 0;
2010 		break;
2011 	}
2012 	for (i = 0; i != ns; ++i) {
2013 		uint32_t restore;
2014 
2015 		/* Default configuration. */
2016 		memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
2017 		dev_config.vf = dev_config_vf;
2018 		dev_config.mps = MLX5_ARG_UNSET;
2019 		dev_config.dbnc = MLX5_ARG_UNSET;
2020 		dev_config.rx_vec_en = 1;
2021 		dev_config.txq_inline_max = MLX5_ARG_UNSET;
2022 		dev_config.txq_inline_min = MLX5_ARG_UNSET;
2023 		dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
2024 		dev_config.txqs_inline = MLX5_ARG_UNSET;
2025 		dev_config.vf_nl_en = 1;
2026 		dev_config.mr_ext_memseg_en = 1;
2027 		dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2028 		dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2029 		dev_config.dv_esw_en = 1;
2030 		dev_config.dv_flow_en = 1;
2031 		dev_config.decap_en = 1;
2032 		dev_config.log_hp_size = MLX5_ARG_UNSET;
2033 		list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2034 						 &list[i],
2035 						 &dev_config);
2036 		if (!list[i].eth_dev) {
2037 			if (rte_errno != EBUSY && rte_errno != EEXIST)
2038 				break;
2039 			/* Device is disabled or already spawned. Ignore it. */
2040 			continue;
2041 		}
2042 		restore = list[i].eth_dev->data->dev_flags;
2043 		rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2044 		/* Restore non-PCI flags cleared by the above call. */
2045 		list[i].eth_dev->data->dev_flags |= restore;
2046 		rte_eth_dev_probing_finish(list[i].eth_dev);
2047 	}
2048 	if (i != ns) {
2049 		DRV_LOG(ERR,
2050 			"probe of PCI device " PCI_PRI_FMT " aborted after"
2051 			" encountering an error: %s",
2052 			pci_dev->addr.domain, pci_dev->addr.bus,
2053 			pci_dev->addr.devid, pci_dev->addr.function,
2054 			strerror(rte_errno));
2055 		ret = -rte_errno;
2056 		/* Roll back. */
2057 		while (i--) {
2058 			if (!list[i].eth_dev)
2059 				continue;
2060 			mlx5_dev_close(list[i].eth_dev);
2061 			/* mac_addrs must not be freed because in dev_private */
2062 			list[i].eth_dev->data->mac_addrs = NULL;
2063 			claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2064 		}
2065 		/* Restore original error. */
2066 		rte_errno = -ret;
2067 	} else {
2068 		ret = 0;
2069 	}
2070 exit:
2071 	/*
2072 	 * Do the routine cleanup:
2073 	 * - close opened Netlink sockets
2074 	 * - free allocated spawn data array
2075 	 * - free the Infiniband device list
2076 	 */
2077 	if (nl_rdma >= 0)
2078 		close(nl_rdma);
2079 	if (nl_route >= 0)
2080 		close(nl_route);
2081 	if (list)
2082 		mlx5_free(list);
2083 	MLX5_ASSERT(ibv_list);
2084 	mlx5_glue->free_device_list(ibv_list);
2085 	return ret;
2086 }
2087 
2088 static int
2089 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
2090 {
2091 	char *env;
2092 	int value;
2093 
2094 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2095 	/* Get environment variable to store. */
2096 	env = getenv(MLX5_SHUT_UP_BF);
2097 	value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
2098 	if (config->dbnc == MLX5_ARG_UNSET)
2099 		setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
2100 	else
2101 		setenv(MLX5_SHUT_UP_BF,
2102 		       config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
2103 	return value;
2104 }
2105 
2106 static void
2107 mlx5_restore_doorbell_mapping_env(int value)
2108 {
2109 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2110 	/* Restore the original environment variable state. */
2111 	if (value == MLX5_ARG_UNSET)
2112 		unsetenv(MLX5_SHUT_UP_BF);
2113 	else
2114 		setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
2115 }
2116 
2117 /**
2118  * Extract pdn of PD object using DV API.
2119  *
2120  * @param[in] pd
2121  *   Pointer to the verbs PD object.
2122  * @param[out] pdn
2123  *   Pointer to the PD object number variable.
2124  *
2125  * @return
2126  *   0 on success, error value otherwise.
2127  */
2128 int
2129 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
2130 {
2131 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2132 	struct mlx5dv_obj obj;
2133 	struct mlx5dv_pd pd_info;
2134 	int ret = 0;
2135 
2136 	obj.pd.in = pd;
2137 	obj.pd.out = &pd_info;
2138 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
2139 	if (ret) {
2140 		DRV_LOG(DEBUG, "Fail to get PD object info");
2141 		return ret;
2142 	}
2143 	*pdn = pd_info.pdn;
2144 	return 0;
2145 #else
2146 	(void)pd;
2147 	(void)pdn;
2148 	return -ENOTSUP;
2149 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
2150 }
2151 
2152 /**
2153  * Function API to open IB device.
2154  *
2155  * This function calls the Linux glue APIs to open a device.
2156  *
2157  * @param[in] spawn
2158  *   Pointer to the IB device attributes (name, port, etc).
2159  * @param[out] config
2160  *   Pointer to device configuration structure.
2161  * @param[out] sh
2162  *   Pointer to shared context structure.
2163  *
2164  * @return
2165  *   0 on success, a positive error value otherwise.
2166  */
2167 int
2168 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
2169 		     const struct mlx5_dev_config *config,
2170 		     struct mlx5_dev_ctx_shared *sh)
2171 {
2172 	int dbmap_env;
2173 	int err = 0;
2174 
2175 	sh->numa_node = spawn->pci_dev->device.numa_node;
2176 	pthread_mutex_init(&sh->txpp.mutex, NULL);
2177 	/*
2178 	 * Configure environment variable "MLX5_BF_SHUT_UP"
2179 	 * before the device creation. The rdma_core library
2180 	 * checks the variable at device creation and
2181 	 * stores the result internally.
2182 	 */
2183 	dbmap_env = mlx5_config_doorbell_mapping_env(config);
2184 	/* Try to open IB device with DV first, then usual Verbs. */
2185 	errno = 0;
2186 	sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
2187 	if (sh->ctx) {
2188 		sh->devx = 1;
2189 		DRV_LOG(DEBUG, "DevX is supported");
2190 		/* The device is created, no need for environment. */
2191 		mlx5_restore_doorbell_mapping_env(dbmap_env);
2192 	} else {
2193 		/* The environment variable is still configured. */
2194 		sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
2195 		err = errno ? errno : ENODEV;
2196 		/*
2197 		 * The environment variable is not needed anymore,
2198 		 * all device creation attempts are completed.
2199 		 */
2200 		mlx5_restore_doorbell_mapping_env(dbmap_env);
2201 		if (!sh->ctx)
2202 			return err;
2203 		DRV_LOG(DEBUG, "DevX is NOT supported");
2204 		err = 0;
2205 	}
2206 	return err;
2207 }
2208 
2209 /**
2210  * Install shared asynchronous device events handler.
2211  * This function is implemented to support event sharing
2212  * between multiple ports of single IB device.
2213  *
2214  * @param sh
2215  *   Pointer to mlx5_dev_ctx_shared object.
2216  */
2217 void
2218 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2219 {
2220 	int ret;
2221 	int flags;
2222 
2223 	sh->intr_handle.fd = -1;
2224 	flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
2225 	ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
2226 		    F_SETFL, flags | O_NONBLOCK);
2227 	if (ret) {
2228 		DRV_LOG(INFO, "failed to change file descriptor async event"
2229 			" queue");
2230 	} else {
2231 		sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
2232 		sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
2233 		if (rte_intr_callback_register(&sh->intr_handle,
2234 					mlx5_dev_interrupt_handler, sh)) {
2235 			DRV_LOG(INFO, "Fail to install the shared interrupt.");
2236 			sh->intr_handle.fd = -1;
2237 		}
2238 	}
2239 	if (sh->devx) {
2240 #ifdef HAVE_IBV_DEVX_ASYNC
2241 		sh->intr_handle_devx.fd = -1;
2242 		sh->devx_comp =
2243 			(void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
2244 		struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2245 		if (!devx_comp) {
2246 			DRV_LOG(INFO, "failed to allocate devx_comp.");
2247 			return;
2248 		}
2249 		flags = fcntl(devx_comp->fd, F_GETFL);
2250 		ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2251 		if (ret) {
2252 			DRV_LOG(INFO, "failed to change file descriptor"
2253 				" devx comp");
2254 			return;
2255 		}
2256 		sh->intr_handle_devx.fd = devx_comp->fd;
2257 		sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
2258 		if (rte_intr_callback_register(&sh->intr_handle_devx,
2259 					mlx5_dev_interrupt_handler_devx, sh)) {
2260 			DRV_LOG(INFO, "Fail to install the devx shared"
2261 				" interrupt.");
2262 			sh->intr_handle_devx.fd = -1;
2263 		}
2264 #endif /* HAVE_IBV_DEVX_ASYNC */
2265 	}
2266 }
2267 
2268 /**
2269  * Uninstall shared asynchronous device events handler.
2270  * This function is implemented to support event sharing
2271  * between multiple ports of single IB device.
2272  *
2273  * @param dev
2274  *   Pointer to mlx5_dev_ctx_shared object.
2275  */
2276 void
2277 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2278 {
2279 	if (sh->intr_handle.fd >= 0)
2280 		mlx5_intr_callback_unregister(&sh->intr_handle,
2281 					      mlx5_dev_interrupt_handler, sh);
2282 #ifdef HAVE_IBV_DEVX_ASYNC
2283 	if (sh->intr_handle_devx.fd >= 0)
2284 		rte_intr_callback_unregister(&sh->intr_handle_devx,
2285 				  mlx5_dev_interrupt_handler_devx, sh);
2286 	if (sh->devx_comp)
2287 		mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2288 #endif
2289 }
2290 
2291 /**
2292  * Read statistics by a named counter.
2293  *
2294  * @param[in] priv
2295  *   Pointer to the private device data structure.
2296  * @param[in] ctr_name
2297  *   Pointer to the name of the statistic counter to read
2298  * @param[out] stat
2299  *   Pointer to read statistic value.
2300  * @return
2301  *   0 on success and stat is valud, 1 if failed to read the value
2302  *   rte_errno is set.
2303  *
2304  */
2305 int
2306 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2307 		      uint64_t *stat)
2308 {
2309 	int fd;
2310 
2311 	if (priv->sh) {
2312 		MKSTR(path, "%s/ports/%d/hw_counters/%s",
2313 		      priv->sh->ibdev_path,
2314 		      priv->dev_port,
2315 		      ctr_name);
2316 		fd = open(path, O_RDONLY);
2317 		/*
2318 		 * in switchdev the file location is not per port
2319 		 * but rather in <ibdev_path>/hw_counters/<file_name>.
2320 		 */
2321 		if (fd == -1) {
2322 			MKSTR(path1, "%s/hw_counters/%s",
2323 			      priv->sh->ibdev_path,
2324 			      ctr_name);
2325 			fd = open(path1, O_RDONLY);
2326 		}
2327 		if (fd != -1) {
2328 			char buf[21] = {'\0'};
2329 			ssize_t n = read(fd, buf, sizeof(buf));
2330 
2331 			close(fd);
2332 			if (n != -1) {
2333 				*stat = strtoull(buf, NULL, 10);
2334 				return 0;
2335 			}
2336 		}
2337 	}
2338 	*stat = 0;
2339 	return 1;
2340 }
2341 
2342 /**
2343  * Set the reg_mr and dereg_mr call backs
2344  *
2345  * @param reg_mr_cb[out]
2346  *   Pointer to reg_mr func
2347  * @param dereg_mr_cb[out]
2348  *   Pointer to dereg_mr func
2349  *
2350  */
2351 void
2352 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2353 		      mlx5_dereg_mr_t *dereg_mr_cb)
2354 {
2355 	*reg_mr_cb = mlx5_verbs_ops.reg_mr;
2356 	*dereg_mr_cb = mlx5_verbs_ops.dereg_mr;
2357 }
2358 
2359 /**
2360  * Remove a MAC address from device
2361  *
2362  * @param dev
2363  *   Pointer to Ethernet device structure.
2364  * @param index
2365  *   MAC address index.
2366  */
2367 void
2368 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2369 {
2370 	struct mlx5_priv *priv = dev->data->dev_private;
2371 	const int vf = priv->config.vf;
2372 
2373 	if (vf)
2374 		mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2375 					mlx5_ifindex(dev), priv->mac_own,
2376 					&dev->data->mac_addrs[index], index);
2377 }
2378 
2379 /**
2380  * Adds a MAC address to the device
2381  *
2382  * @param dev
2383  *   Pointer to Ethernet device structure.
2384  * @param mac_addr
2385  *   MAC address to register.
2386  * @param index
2387  *   MAC address index.
2388  *
2389  * @return
2390  *   0 on success, a negative errno value otherwise
2391  */
2392 int
2393 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2394 		     uint32_t index)
2395 {
2396 	struct mlx5_priv *priv = dev->data->dev_private;
2397 	const int vf = priv->config.vf;
2398 	int ret = 0;
2399 
2400 	if (vf)
2401 		ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2402 					   mlx5_ifindex(dev), priv->mac_own,
2403 					   mac, index);
2404 	return ret;
2405 }
2406 
2407 /**
2408  * Modify a VF MAC address
2409  *
2410  * @param priv
2411  *   Pointer to device private data.
2412  * @param mac_addr
2413  *   MAC address to modify into.
2414  * @param iface_idx
2415  *   Net device interface index
2416  * @param vf_index
2417  *   VF index
2418  *
2419  * @return
2420  *   0 on success, a negative errno value otherwise
2421  */
2422 int
2423 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2424 			   unsigned int iface_idx,
2425 			   struct rte_ether_addr *mac_addr,
2426 			   int vf_index)
2427 {
2428 	return mlx5_nl_vf_mac_addr_modify
2429 		(priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2430 }
2431 
2432 /**
2433  * Set device promiscuous mode
2434  *
2435  * @param dev
2436  *   Pointer to Ethernet device structure.
2437  * @param enable
2438  *   0 - promiscuous is disabled, otherwise - enabled
2439  *
2440  * @return
2441  *   0 on success, a negative error value otherwise
2442  */
2443 int
2444 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2445 {
2446 	struct mlx5_priv *priv = dev->data->dev_private;
2447 
2448 	return mlx5_nl_promisc(priv->nl_socket_route,
2449 			       mlx5_ifindex(dev), !!enable);
2450 }
2451 
2452 /**
2453  * Set device promiscuous mode
2454  *
2455  * @param dev
2456  *   Pointer to Ethernet device structure.
2457  * @param enable
2458  *   0 - all multicase is disabled, otherwise - enabled
2459  *
2460  * @return
2461  *   0 on success, a negative error value otherwise
2462  */
2463 int
2464 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
2465 {
2466 	struct mlx5_priv *priv = dev->data->dev_private;
2467 
2468 	return mlx5_nl_allmulti(priv->nl_socket_route,
2469 				mlx5_ifindex(dev), !!enable);
2470 }
2471 
2472 /**
2473  * Flush device MAC addresses
2474  *
2475  * @param dev
2476  *   Pointer to Ethernet device structure.
2477  *
2478  */
2479 void
2480 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
2481 {
2482 	struct mlx5_priv *priv = dev->data->dev_private;
2483 
2484 	mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
2485 			       dev->data->mac_addrs,
2486 			       MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
2487 }
2488 
2489 const struct eth_dev_ops mlx5_os_dev_ops = {
2490 	.dev_configure = mlx5_dev_configure,
2491 	.dev_start = mlx5_dev_start,
2492 	.dev_stop = mlx5_dev_stop,
2493 	.dev_set_link_down = mlx5_set_link_down,
2494 	.dev_set_link_up = mlx5_set_link_up,
2495 	.dev_close = mlx5_dev_close,
2496 	.promiscuous_enable = mlx5_promiscuous_enable,
2497 	.promiscuous_disable = mlx5_promiscuous_disable,
2498 	.allmulticast_enable = mlx5_allmulticast_enable,
2499 	.allmulticast_disable = mlx5_allmulticast_disable,
2500 	.link_update = mlx5_link_update,
2501 	.stats_get = mlx5_stats_get,
2502 	.stats_reset = mlx5_stats_reset,
2503 	.xstats_get = mlx5_xstats_get,
2504 	.xstats_reset = mlx5_xstats_reset,
2505 	.xstats_get_names = mlx5_xstats_get_names,
2506 	.fw_version_get = mlx5_fw_version_get,
2507 	.dev_infos_get = mlx5_dev_infos_get,
2508 	.read_clock = mlx5_txpp_read_clock,
2509 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2510 	.vlan_filter_set = mlx5_vlan_filter_set,
2511 	.rx_queue_setup = mlx5_rx_queue_setup,
2512 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2513 	.tx_queue_setup = mlx5_tx_queue_setup,
2514 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2515 	.rx_queue_release = mlx5_rx_queue_release,
2516 	.tx_queue_release = mlx5_tx_queue_release,
2517 	.rx_queue_start = mlx5_rx_queue_start,
2518 	.rx_queue_stop = mlx5_rx_queue_stop,
2519 	.tx_queue_start = mlx5_tx_queue_start,
2520 	.tx_queue_stop = mlx5_tx_queue_stop,
2521 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2522 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2523 	.mac_addr_remove = mlx5_mac_addr_remove,
2524 	.mac_addr_add = mlx5_mac_addr_add,
2525 	.mac_addr_set = mlx5_mac_addr_set,
2526 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2527 	.mtu_set = mlx5_dev_set_mtu,
2528 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2529 	.vlan_offload_set = mlx5_vlan_offload_set,
2530 	.reta_update = mlx5_dev_rss_reta_update,
2531 	.reta_query = mlx5_dev_rss_reta_query,
2532 	.rss_hash_update = mlx5_rss_hash_update,
2533 	.rss_hash_conf_get = mlx5_rss_hash_conf_get,
2534 	.filter_ctrl = mlx5_dev_filter_ctrl,
2535 	.rxq_info_get = mlx5_rxq_info_get,
2536 	.txq_info_get = mlx5_txq_info_get,
2537 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2538 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2539 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2540 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2541 	.is_removed = mlx5_is_removed,
2542 	.udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
2543 	.get_module_info = mlx5_get_module_info,
2544 	.get_module_eeprom = mlx5_get_module_eeprom,
2545 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2546 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2547 };
2548 
2549 /* Available operations from secondary process. */
2550 const struct eth_dev_ops mlx5_os_dev_sec_ops = {
2551 	.stats_get = mlx5_stats_get,
2552 	.stats_reset = mlx5_stats_reset,
2553 	.xstats_get = mlx5_xstats_get,
2554 	.xstats_reset = mlx5_xstats_reset,
2555 	.xstats_get_names = mlx5_xstats_get_names,
2556 	.fw_version_get = mlx5_fw_version_get,
2557 	.dev_infos_get = mlx5_dev_infos_get,
2558 	.read_clock = mlx5_txpp_read_clock,
2559 	.rx_queue_start = mlx5_rx_queue_start,
2560 	.rx_queue_stop = mlx5_rx_queue_stop,
2561 	.tx_queue_start = mlx5_tx_queue_start,
2562 	.tx_queue_stop = mlx5_tx_queue_stop,
2563 	.rxq_info_get = mlx5_rxq_info_get,
2564 	.txq_info_get = mlx5_txq_info_get,
2565 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2566 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2567 	.get_module_info = mlx5_get_module_info,
2568 	.get_module_eeprom = mlx5_get_module_eeprom,
2569 };
2570 
2571 /* Available operations in flow isolated mode. */
2572 const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
2573 	.dev_configure = mlx5_dev_configure,
2574 	.dev_start = mlx5_dev_start,
2575 	.dev_stop = mlx5_dev_stop,
2576 	.dev_set_link_down = mlx5_set_link_down,
2577 	.dev_set_link_up = mlx5_set_link_up,
2578 	.dev_close = mlx5_dev_close,
2579 	.promiscuous_enable = mlx5_promiscuous_enable,
2580 	.promiscuous_disable = mlx5_promiscuous_disable,
2581 	.allmulticast_enable = mlx5_allmulticast_enable,
2582 	.allmulticast_disable = mlx5_allmulticast_disable,
2583 	.link_update = mlx5_link_update,
2584 	.stats_get = mlx5_stats_get,
2585 	.stats_reset = mlx5_stats_reset,
2586 	.xstats_get = mlx5_xstats_get,
2587 	.xstats_reset = mlx5_xstats_reset,
2588 	.xstats_get_names = mlx5_xstats_get_names,
2589 	.fw_version_get = mlx5_fw_version_get,
2590 	.dev_infos_get = mlx5_dev_infos_get,
2591 	.read_clock = mlx5_txpp_read_clock,
2592 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2593 	.vlan_filter_set = mlx5_vlan_filter_set,
2594 	.rx_queue_setup = mlx5_rx_queue_setup,
2595 	.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2596 	.tx_queue_setup = mlx5_tx_queue_setup,
2597 	.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2598 	.rx_queue_release = mlx5_rx_queue_release,
2599 	.tx_queue_release = mlx5_tx_queue_release,
2600 	.rx_queue_start = mlx5_rx_queue_start,
2601 	.rx_queue_stop = mlx5_rx_queue_stop,
2602 	.tx_queue_start = mlx5_tx_queue_start,
2603 	.tx_queue_stop = mlx5_tx_queue_stop,
2604 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2605 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2606 	.mac_addr_remove = mlx5_mac_addr_remove,
2607 	.mac_addr_add = mlx5_mac_addr_add,
2608 	.mac_addr_set = mlx5_mac_addr_set,
2609 	.set_mc_addr_list = mlx5_set_mc_addr_list,
2610 	.mtu_set = mlx5_dev_set_mtu,
2611 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2612 	.vlan_offload_set = mlx5_vlan_offload_set,
2613 	.filter_ctrl = mlx5_dev_filter_ctrl,
2614 	.rxq_info_get = mlx5_rxq_info_get,
2615 	.txq_info_get = mlx5_txq_info_get,
2616 	.rx_burst_mode_get = mlx5_rx_burst_mode_get,
2617 	.tx_burst_mode_get = mlx5_tx_burst_mode_get,
2618 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
2619 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
2620 	.is_removed = mlx5_is_removed,
2621 	.get_module_info = mlx5_get_module_info,
2622 	.get_module_eeprom = mlx5_get_module_eeprom,
2623 	.hairpin_cap_get = mlx5_hairpin_cap_get,
2624 	.mtr_ops_get = mlx5_flow_meter_ops_get,
2625 };
2626