xref: /dpdk/drivers/net/mlx5/mlx5.c (revision 3998e2a07220844d3f3c17f76a781ced3efe0de0)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stddef.h>
35 #include <unistd.h>
36 #include <string.h>
37 #include <assert.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <errno.h>
41 #include <net/if.h>
42 
43 /* Verbs header. */
44 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #ifdef PEDANTIC
46 #pragma GCC diagnostic ignored "-Wpedantic"
47 #endif
48 #include <infiniband/verbs.h>
49 #ifdef PEDANTIC
50 #pragma GCC diagnostic error "-Wpedantic"
51 #endif
52 
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_ethdev_pci.h>
56 #include <rte_pci.h>
57 #include <rte_bus_pci.h>
58 #include <rte_common.h>
59 #include <rte_kvargs.h>
60 
61 #include "mlx5.h"
62 #include "mlx5_utils.h"
63 #include "mlx5_rxtx.h"
64 #include "mlx5_autoconf.h"
65 #include "mlx5_defs.h"
66 
67 /* Device parameter to enable RX completion queue compression. */
68 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
69 
70 /* Device parameter to configure inline send. */
71 #define MLX5_TXQ_INLINE "txq_inline"
72 
73 /*
74  * Device parameter to configure the number of TX queues threshold for
75  * enabling inline send.
76  */
77 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
78 
79 /* Device parameter to enable multi-packet send WQEs. */
80 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
81 
82 /* Device parameter to include 2 dsegs in the title WQEBB. */
83 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
84 
85 /* Device parameter to limit the size of inlining packet. */
86 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
87 
88 /* Device parameter to enable hardware TSO offload. */
89 #define MLX5_TSO "tso"
90 
91 /* Device parameter to enable hardware Tx vector. */
92 #define MLX5_TX_VEC_EN "tx_vec_en"
93 
94 /* Device parameter to enable hardware Rx vector. */
95 #define MLX5_RX_VEC_EN "rx_vec_en"
96 
97 /* Default PMD specific parameter value. */
98 #define MLX5_ARG_UNSET (-1)
99 
100 #ifndef HAVE_IBV_MLX5_MOD_MPW
101 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
102 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
103 #endif
104 
105 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
106 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
107 #endif
108 
109 struct mlx5_args {
110 	int cqe_comp;
111 	int txq_inline;
112 	int txqs_inline;
113 	int mps;
114 	int mpw_hdr_dseg;
115 	int inline_max_packet_sz;
116 	int tso;
117 	int tx_vec_en;
118 	int rx_vec_en;
119 };
120 /**
121  * Retrieve integer value from environment variable.
122  *
123  * @param[in] name
124  *   Environment variable name.
125  *
126  * @return
127  *   Integer value, 0 if the variable is not set.
128  */
129 int
130 mlx5_getenv_int(const char *name)
131 {
132 	const char *val = getenv(name);
133 
134 	if (val == NULL)
135 		return 0;
136 	return atoi(val);
137 }
138 
139 /**
140  * Verbs callback to allocate a memory. This function should allocate the space
141  * according to the size provided residing inside a huge page.
142  * Please note that all allocation must respect the alignment from libmlx5
143  * (i.e. currently sysconf(_SC_PAGESIZE)).
144  *
145  * @param[in] size
146  *   The size in bytes of the memory to allocate.
147  * @param[in] data
148  *   A pointer to the callback data.
149  *
150  * @return
151  *   a pointer to the allocate space.
152  */
153 static void *
154 mlx5_alloc_verbs_buf(size_t size, void *data)
155 {
156 	struct priv *priv = data;
157 	void *ret;
158 	size_t alignment = sysconf(_SC_PAGESIZE);
159 
160 	assert(data != NULL);
161 	assert(!mlx5_is_secondary());
162 	ret = rte_malloc_socket(__func__, size, alignment,
163 				priv->dev->device->numa_node);
164 	DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
165 	return ret;
166 }
167 
168 /**
169  * Verbs callback to free a memory.
170  *
171  * @param[in] ptr
172  *   A pointer to the memory to free.
173  * @param[in] data
174  *   A pointer to the callback data.
175  */
176 static void
177 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
178 {
179 	assert(data != NULL);
180 	assert(!mlx5_is_secondary());
181 	DEBUG("Extern free request: %p", ptr);
182 	rte_free(ptr);
183 }
184 
185 /**
186  * DPDK callback to close the device.
187  *
188  * Destroy all queues and objects, free memory.
189  *
190  * @param dev
191  *   Pointer to Ethernet device structure.
192  */
193 static void
194 mlx5_dev_close(struct rte_eth_dev *dev)
195 {
196 	struct priv *priv = mlx5_get_priv(dev);
197 	unsigned int i;
198 	int ret;
199 
200 	priv_lock(priv);
201 	DEBUG("%p: closing device \"%s\"",
202 	      (void *)dev,
203 	      ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
204 	/* In case mlx5_dev_stop() has not been called. */
205 	priv_dev_interrupt_handler_uninstall(priv, dev);
206 	priv_dev_traffic_disable(priv, dev);
207 	/* Prevent crashes when queues are still in use. */
208 	dev->rx_pkt_burst = removed_rx_burst;
209 	dev->tx_pkt_burst = removed_tx_burst;
210 	if (priv->rxqs != NULL) {
211 		/* XXX race condition if mlx5_rx_burst() is still running. */
212 		usleep(1000);
213 		for (i = 0; (i != priv->rxqs_n); ++i)
214 			mlx5_priv_rxq_release(priv, i);
215 		priv->rxqs_n = 0;
216 		priv->rxqs = NULL;
217 	}
218 	if (priv->txqs != NULL) {
219 		/* XXX race condition if mlx5_tx_burst() is still running. */
220 		usleep(1000);
221 		for (i = 0; (i != priv->txqs_n); ++i)
222 			mlx5_priv_txq_release(priv, i);
223 		priv->txqs_n = 0;
224 		priv->txqs = NULL;
225 	}
226 	if (priv->pd != NULL) {
227 		assert(priv->ctx != NULL);
228 		claim_zero(ibv_dealloc_pd(priv->pd));
229 		claim_zero(ibv_close_device(priv->ctx));
230 	} else
231 		assert(priv->ctx == NULL);
232 	if (priv->rss_conf.rss_key != NULL)
233 		rte_free(priv->rss_conf.rss_key);
234 	if (priv->reta_idx != NULL)
235 		rte_free(priv->reta_idx);
236 	priv_socket_uninit(priv);
237 	ret = mlx5_priv_hrxq_ibv_verify(priv);
238 	if (ret)
239 		WARN("%p: some Hash Rx queue still remain", (void *)priv);
240 	ret = mlx5_priv_ind_table_ibv_verify(priv);
241 	if (ret)
242 		WARN("%p: some Indirection table still remain", (void *)priv);
243 	ret = mlx5_priv_rxq_ibv_verify(priv);
244 	if (ret)
245 		WARN("%p: some Verbs Rx queue still remain", (void *)priv);
246 	ret = mlx5_priv_rxq_verify(priv);
247 	if (ret)
248 		WARN("%p: some Rx Queues still remain", (void *)priv);
249 	ret = mlx5_priv_txq_ibv_verify(priv);
250 	if (ret)
251 		WARN("%p: some Verbs Tx queue still remain", (void *)priv);
252 	ret = mlx5_priv_txq_verify(priv);
253 	if (ret)
254 		WARN("%p: some Tx Queues still remain", (void *)priv);
255 	ret = priv_flow_verify(priv);
256 	if (ret)
257 		WARN("%p: some flows still remain", (void *)priv);
258 	ret = priv_mr_verify(priv);
259 	if (ret)
260 		WARN("%p: some Memory Region still remain", (void *)priv);
261 	priv_unlock(priv);
262 	memset(priv, 0, sizeof(*priv));
263 }
264 
265 const struct eth_dev_ops mlx5_dev_ops = {
266 	.dev_configure = mlx5_dev_configure,
267 	.dev_start = mlx5_dev_start,
268 	.dev_stop = mlx5_dev_stop,
269 	.dev_set_link_down = mlx5_set_link_down,
270 	.dev_set_link_up = mlx5_set_link_up,
271 	.dev_close = mlx5_dev_close,
272 	.promiscuous_enable = mlx5_promiscuous_enable,
273 	.promiscuous_disable = mlx5_promiscuous_disable,
274 	.allmulticast_enable = mlx5_allmulticast_enable,
275 	.allmulticast_disable = mlx5_allmulticast_disable,
276 	.link_update = mlx5_link_update,
277 	.stats_get = mlx5_stats_get,
278 	.stats_reset = mlx5_stats_reset,
279 	.xstats_get = mlx5_xstats_get,
280 	.xstats_reset = mlx5_xstats_reset,
281 	.xstats_get_names = mlx5_xstats_get_names,
282 	.dev_infos_get = mlx5_dev_infos_get,
283 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
284 	.vlan_filter_set = mlx5_vlan_filter_set,
285 	.rx_queue_setup = mlx5_rx_queue_setup,
286 	.tx_queue_setup = mlx5_tx_queue_setup,
287 	.rx_queue_release = mlx5_rx_queue_release,
288 	.tx_queue_release = mlx5_tx_queue_release,
289 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
290 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
291 	.mac_addr_remove = mlx5_mac_addr_remove,
292 	.mac_addr_add = mlx5_mac_addr_add,
293 	.mac_addr_set = mlx5_mac_addr_set,
294 	.mtu_set = mlx5_dev_set_mtu,
295 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
296 	.vlan_offload_set = mlx5_vlan_offload_set,
297 	.reta_update = mlx5_dev_rss_reta_update,
298 	.reta_query = mlx5_dev_rss_reta_query,
299 	.rss_hash_update = mlx5_rss_hash_update,
300 	.rss_hash_conf_get = mlx5_rss_hash_conf_get,
301 	.filter_ctrl = mlx5_dev_filter_ctrl,
302 	.rx_descriptor_status = mlx5_rx_descriptor_status,
303 	.tx_descriptor_status = mlx5_tx_descriptor_status,
304 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
305 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
306 };
307 
308 static const struct eth_dev_ops mlx5_dev_sec_ops = {
309 	.stats_get = mlx5_stats_get,
310 	.stats_reset = mlx5_stats_reset,
311 	.xstats_get = mlx5_xstats_get,
312 	.xstats_reset = mlx5_xstats_reset,
313 	.xstats_get_names = mlx5_xstats_get_names,
314 	.dev_infos_get = mlx5_dev_infos_get,
315 	.rx_descriptor_status = mlx5_rx_descriptor_status,
316 	.tx_descriptor_status = mlx5_tx_descriptor_status,
317 };
318 
319 /* Available operators in flow isolated mode. */
320 const struct eth_dev_ops mlx5_dev_ops_isolate = {
321 	.dev_configure = mlx5_dev_configure,
322 	.dev_start = mlx5_dev_start,
323 	.dev_stop = mlx5_dev_stop,
324 	.dev_set_link_down = mlx5_set_link_down,
325 	.dev_set_link_up = mlx5_set_link_up,
326 	.dev_close = mlx5_dev_close,
327 	.link_update = mlx5_link_update,
328 	.stats_get = mlx5_stats_get,
329 	.stats_reset = mlx5_stats_reset,
330 	.xstats_get = mlx5_xstats_get,
331 	.xstats_reset = mlx5_xstats_reset,
332 	.xstats_get_names = mlx5_xstats_get_names,
333 	.dev_infos_get = mlx5_dev_infos_get,
334 	.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
335 	.vlan_filter_set = mlx5_vlan_filter_set,
336 	.rx_queue_setup = mlx5_rx_queue_setup,
337 	.tx_queue_setup = mlx5_tx_queue_setup,
338 	.rx_queue_release = mlx5_rx_queue_release,
339 	.tx_queue_release = mlx5_tx_queue_release,
340 	.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
341 	.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
342 	.mac_addr_remove = mlx5_mac_addr_remove,
343 	.mac_addr_add = mlx5_mac_addr_add,
344 	.mac_addr_set = mlx5_mac_addr_set,
345 	.mtu_set = mlx5_dev_set_mtu,
346 	.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
347 	.vlan_offload_set = mlx5_vlan_offload_set,
348 	.filter_ctrl = mlx5_dev_filter_ctrl,
349 	.rx_descriptor_status = mlx5_rx_descriptor_status,
350 	.tx_descriptor_status = mlx5_tx_descriptor_status,
351 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
352 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
353 };
354 
355 static struct {
356 	struct rte_pci_addr pci_addr; /* associated PCI address */
357 	uint32_t ports; /* physical ports bitfield. */
358 } mlx5_dev[32];
359 
360 /**
361  * Get device index in mlx5_dev[] from PCI bus address.
362  *
363  * @param[in] pci_addr
364  *   PCI bus address to look for.
365  *
366  * @return
367  *   mlx5_dev[] index on success, -1 on failure.
368  */
369 static int
370 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
371 {
372 	unsigned int i;
373 	int ret = -1;
374 
375 	assert(pci_addr != NULL);
376 	for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
377 		if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
378 		    (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
379 		    (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
380 		    (mlx5_dev[i].pci_addr.function == pci_addr->function))
381 			return i;
382 		if ((mlx5_dev[i].ports == 0) && (ret == -1))
383 			ret = i;
384 	}
385 	return ret;
386 }
387 
388 /**
389  * Verify and store value for device argument.
390  *
391  * @param[in] key
392  *   Key argument to verify.
393  * @param[in] val
394  *   Value associated with key.
395  * @param opaque
396  *   User data.
397  *
398  * @return
399  *   0 on success, negative errno value on failure.
400  */
401 static int
402 mlx5_args_check(const char *key, const char *val, void *opaque)
403 {
404 	struct mlx5_args *args = opaque;
405 	unsigned long tmp;
406 
407 	errno = 0;
408 	tmp = strtoul(val, NULL, 0);
409 	if (errno) {
410 		WARN("%s: \"%s\" is not a valid integer", key, val);
411 		return errno;
412 	}
413 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
414 		args->cqe_comp = !!tmp;
415 	} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
416 		args->txq_inline = tmp;
417 	} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
418 		args->txqs_inline = tmp;
419 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
420 		args->mps = !!tmp;
421 	} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
422 		args->mpw_hdr_dseg = !!tmp;
423 	} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
424 		args->inline_max_packet_sz = tmp;
425 	} else if (strcmp(MLX5_TSO, key) == 0) {
426 		args->tso = !!tmp;
427 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
428 		args->tx_vec_en = !!tmp;
429 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
430 		args->rx_vec_en = !!tmp;
431 	} else {
432 		WARN("%s: unknown parameter", key);
433 		return -EINVAL;
434 	}
435 	return 0;
436 }
437 
438 /**
439  * Parse device parameters.
440  *
441  * @param priv
442  *   Pointer to private structure.
443  * @param devargs
444  *   Device arguments structure.
445  *
446  * @return
447  *   0 on success, errno value on failure.
448  */
449 static int
450 mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
451 {
452 	const char **params = (const char *[]){
453 		MLX5_RXQ_CQE_COMP_EN,
454 		MLX5_TXQ_INLINE,
455 		MLX5_TXQS_MIN_INLINE,
456 		MLX5_TXQ_MPW_EN,
457 		MLX5_TXQ_MPW_HDR_DSEG_EN,
458 		MLX5_TXQ_MAX_INLINE_LEN,
459 		MLX5_TSO,
460 		MLX5_TX_VEC_EN,
461 		MLX5_RX_VEC_EN,
462 		NULL,
463 	};
464 	struct rte_kvargs *kvlist;
465 	int ret = 0;
466 	int i;
467 
468 	if (devargs == NULL)
469 		return 0;
470 	/* Following UGLY cast is done to pass checkpatch. */
471 	kvlist = rte_kvargs_parse(devargs->args, params);
472 	if (kvlist == NULL)
473 		return 0;
474 	/* Process parameters. */
475 	for (i = 0; (params[i] != NULL); ++i) {
476 		if (rte_kvargs_count(kvlist, params[i])) {
477 			ret = rte_kvargs_process(kvlist, params[i],
478 						 mlx5_args_check, args);
479 			if (ret != 0) {
480 				rte_kvargs_free(kvlist);
481 				return ret;
482 			}
483 		}
484 	}
485 	rte_kvargs_free(kvlist);
486 	return 0;
487 }
488 
489 static struct rte_pci_driver mlx5_driver;
490 
491 /**
492  * Assign parameters from args into priv, only non default
493  * values are considered.
494  *
495  * @param[out] priv
496  *   Pointer to private structure.
497  * @param[in] args
498  *   Pointer to args values.
499  */
500 static void
501 mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
502 {
503 	if (args->cqe_comp != MLX5_ARG_UNSET)
504 		priv->cqe_comp = args->cqe_comp;
505 	if (args->txq_inline != MLX5_ARG_UNSET)
506 		priv->txq_inline = args->txq_inline;
507 	if (args->txqs_inline != MLX5_ARG_UNSET)
508 		priv->txqs_inline = args->txqs_inline;
509 	if (args->mps != MLX5_ARG_UNSET)
510 		priv->mps = args->mps ? priv->mps : 0;
511 	if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
512 		priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
513 	if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
514 		priv->inline_max_packet_sz = args->inline_max_packet_sz;
515 	if (args->tso != MLX5_ARG_UNSET)
516 		priv->tso = args->tso;
517 	if (args->tx_vec_en != MLX5_ARG_UNSET)
518 		priv->tx_vec_en = args->tx_vec_en;
519 	if (args->rx_vec_en != MLX5_ARG_UNSET)
520 		priv->rx_vec_en = args->rx_vec_en;
521 }
522 
523 /**
524  * DPDK callback to register a PCI device.
525  *
526  * This function creates an Ethernet device for each port of a given
527  * PCI device.
528  *
529  * @param[in] pci_drv
530  *   PCI driver structure (mlx5_driver).
531  * @param[in] pci_dev
532  *   PCI device information.
533  *
534  * @return
535  *   0 on success, negative errno value on failure.
536  */
537 static int
538 mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
539 {
540 	struct ibv_device **list;
541 	struct ibv_device *ibv_dev;
542 	int err = 0;
543 	struct ibv_context *attr_ctx = NULL;
544 	struct ibv_device_attr_ex device_attr;
545 	unsigned int sriov;
546 	unsigned int mps;
547 	unsigned int cqe_comp;
548 	unsigned int tunnel_en = 0;
549 	int idx;
550 	int i;
551 	struct mlx5dv_context attrs_out;
552 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
553 	struct ibv_counter_set_description cs_desc;
554 #endif
555 
556 	(void)pci_drv;
557 	assert(pci_drv == &mlx5_driver);
558 	/* Get mlx5_dev[] index. */
559 	idx = mlx5_dev_idx(&pci_dev->addr);
560 	if (idx == -1) {
561 		ERROR("this driver cannot support any more adapters");
562 		return -ENOMEM;
563 	}
564 	DEBUG("using driver device index %d", idx);
565 
566 	/* Save PCI address. */
567 	mlx5_dev[idx].pci_addr = pci_dev->addr;
568 	list = ibv_get_device_list(&i);
569 	if (list == NULL) {
570 		assert(errno);
571 		if (errno == ENOSYS)
572 			ERROR("cannot list devices, is ib_uverbs loaded?");
573 		return -errno;
574 	}
575 	assert(i >= 0);
576 	/*
577 	 * For each listed device, check related sysfs entry against
578 	 * the provided PCI ID.
579 	 */
580 	while (i != 0) {
581 		struct rte_pci_addr pci_addr;
582 
583 		--i;
584 		DEBUG("checking device \"%s\"", list[i]->name);
585 		if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
586 			continue;
587 		if ((pci_dev->addr.domain != pci_addr.domain) ||
588 		    (pci_dev->addr.bus != pci_addr.bus) ||
589 		    (pci_dev->addr.devid != pci_addr.devid) ||
590 		    (pci_dev->addr.function != pci_addr.function))
591 			continue;
592 		sriov = ((pci_dev->id.device_id ==
593 		       PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
594 		      (pci_dev->id.device_id ==
595 		       PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
596 		      (pci_dev->id.device_id ==
597 		       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
598 		      (pci_dev->id.device_id ==
599 		       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
600 		switch (pci_dev->id.device_id) {
601 		case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
602 			tunnel_en = 1;
603 			break;
604 		case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
605 		case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
606 		case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
607 		case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
608 		case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
609 			tunnel_en = 1;
610 			break;
611 		default:
612 			break;
613 		}
614 		INFO("PCI information matches, using device \"%s\""
615 		     " (SR-IOV: %s)",
616 		     list[i]->name,
617 		     sriov ? "true" : "false");
618 		attr_ctx = ibv_open_device(list[i]);
619 		err = errno;
620 		break;
621 	}
622 	if (attr_ctx == NULL) {
623 		ibv_free_device_list(list);
624 		switch (err) {
625 		case 0:
626 			ERROR("cannot access device, is mlx5_ib loaded?");
627 			return -ENODEV;
628 		case EINVAL:
629 			ERROR("cannot use device, are drivers up to date?");
630 			return -EINVAL;
631 		}
632 		assert(err > 0);
633 		return -err;
634 	}
635 	ibv_dev = list[i];
636 
637 	DEBUG("device opened");
638 	/*
639 	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
640 	 * as all ConnectX-5 devices.
641 	 */
642 	mlx5dv_query_device(attr_ctx, &attrs_out);
643 	if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
644 		if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
645 			DEBUG("Enhanced MPW is supported");
646 			mps = MLX5_MPW_ENHANCED;
647 		} else {
648 			DEBUG("MPW is supported");
649 			mps = MLX5_MPW;
650 		}
651 	} else {
652 		DEBUG("MPW isn't supported");
653 		mps = MLX5_MPW_DISABLED;
654 	}
655 	if (RTE_CACHE_LINE_SIZE == 128 &&
656 	    !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
657 		cqe_comp = 0;
658 	else
659 		cqe_comp = 1;
660 	if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
661 		goto error;
662 	INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
663 
664 	for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
665 		uint32_t port = i + 1; /* ports are indexed from one */
666 		uint32_t test = (1 << i);
667 		struct ibv_context *ctx = NULL;
668 		struct ibv_port_attr port_attr;
669 		struct ibv_pd *pd = NULL;
670 		struct priv *priv = NULL;
671 		struct rte_eth_dev *eth_dev;
672 		struct ibv_device_attr_ex device_attr_ex;
673 		struct ether_addr mac;
674 		uint16_t num_vfs = 0;
675 		struct ibv_device_attr_ex device_attr;
676 		struct mlx5_args args = {
677 			.cqe_comp = MLX5_ARG_UNSET,
678 			.txq_inline = MLX5_ARG_UNSET,
679 			.txqs_inline = MLX5_ARG_UNSET,
680 			.mps = MLX5_ARG_UNSET,
681 			.mpw_hdr_dseg = MLX5_ARG_UNSET,
682 			.inline_max_packet_sz = MLX5_ARG_UNSET,
683 			.tso = MLX5_ARG_UNSET,
684 			.tx_vec_en = MLX5_ARG_UNSET,
685 			.rx_vec_en = MLX5_ARG_UNSET,
686 		};
687 
688 		mlx5_dev[idx].ports |= test;
689 
690 		if (mlx5_is_secondary()) {
691 			/* from rte_ethdev.c */
692 			char name[RTE_ETH_NAME_MAX_LEN];
693 
694 			snprintf(name, sizeof(name), "%s port %u",
695 				 ibv_get_device_name(ibv_dev), port);
696 			eth_dev = rte_eth_dev_attach_secondary(name);
697 			if (eth_dev == NULL) {
698 				ERROR("can not attach rte ethdev");
699 				err = ENOMEM;
700 				goto error;
701 			}
702 			eth_dev->device = &pci_dev->device;
703 			eth_dev->dev_ops = &mlx5_dev_sec_ops;
704 			priv = eth_dev->data->dev_private;
705 			/* Receive command fd from primary process */
706 			err = priv_socket_connect(priv);
707 			if (err < 0) {
708 				err = -err;
709 				goto error;
710 			}
711 			/* Remap UAR for Tx queues. */
712 			err = priv_tx_uar_remap(priv, err);
713 			if (err < 0) {
714 				err = -err;
715 				goto error;
716 			}
717 			priv_dev_select_rx_function(priv, eth_dev);
718 			priv_dev_select_tx_function(priv, eth_dev);
719 			continue;
720 		}
721 
722 		DEBUG("using port %u (%08" PRIx32 ")", port, test);
723 
724 		ctx = ibv_open_device(ibv_dev);
725 		if (ctx == NULL) {
726 			err = ENODEV;
727 			goto port_error;
728 		}
729 
730 		ibv_query_device_ex(ctx, NULL, &device_attr);
731 		/* Check port status. */
732 		err = ibv_query_port(ctx, port, &port_attr);
733 		if (err) {
734 			ERROR("port query failed: %s", strerror(err));
735 			goto port_error;
736 		}
737 
738 		if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
739 			ERROR("port %d is not configured in Ethernet mode",
740 			      port);
741 			err = EINVAL;
742 			goto port_error;
743 		}
744 
745 		if (port_attr.state != IBV_PORT_ACTIVE)
746 			DEBUG("port %d is not active: \"%s\" (%d)",
747 			      port, ibv_port_state_str(port_attr.state),
748 			      port_attr.state);
749 
750 		/* Allocate protection domain. */
751 		pd = ibv_alloc_pd(ctx);
752 		if (pd == NULL) {
753 			ERROR("PD allocation failure");
754 			err = ENOMEM;
755 			goto port_error;
756 		}
757 
758 		mlx5_dev[idx].ports |= test;
759 
760 		/* from rte_ethdev.c */
761 		priv = rte_zmalloc("ethdev private structure",
762 				   sizeof(*priv),
763 				   RTE_CACHE_LINE_SIZE);
764 		if (priv == NULL) {
765 			ERROR("priv allocation failure");
766 			err = ENOMEM;
767 			goto port_error;
768 		}
769 
770 		priv->ctx = ctx;
771 		strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
772 			sizeof(priv->ibdev_path));
773 		priv->device_attr = device_attr;
774 		priv->port = port;
775 		priv->pd = pd;
776 		priv->mtu = ETHER_MTU;
777 		priv->mps = mps; /* Enable MPW by default if supported. */
778 		priv->cqe_comp = cqe_comp;
779 		priv->tunnel_en = tunnel_en;
780 		/* Enable vector by default if supported. */
781 		priv->tx_vec_en = 1;
782 		priv->rx_vec_en = 1;
783 		err = mlx5_args(&args, pci_dev->device.devargs);
784 		if (err) {
785 			ERROR("failed to process device arguments: %s",
786 			      strerror(err));
787 			goto port_error;
788 		}
789 		mlx5_args_assign(priv, &args);
790 		if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
791 			ERROR("ibv_query_device_ex() failed");
792 			goto port_error;
793 		}
794 
795 		priv->hw_csum =
796 			!!(device_attr_ex.device_cap_flags_ex &
797 			   IBV_DEVICE_RAW_IP_CSUM);
798 		DEBUG("checksum offloading is %ssupported",
799 		      (priv->hw_csum ? "" : "not "));
800 
801 #ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
802 		priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
803 					 IBV_DEVICE_VXLAN_SUPPORT);
804 #endif
805 		DEBUG("L2 tunnel checksum offloads are %ssupported",
806 		      (priv->hw_csum_l2tun ? "" : "not "));
807 
808 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
809 		priv->counter_set_supported = !!(device_attr.max_counter_sets);
810 		ibv_describe_counter_set(ctx, 0, &cs_desc);
811 		DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
812 		      cs_desc.counter_type, cs_desc.num_of_cs,
813 		      cs_desc.attributes);
814 #endif
815 		priv->ind_table_max_size =
816 			device_attr_ex.rss_caps.max_rwq_indirection_table_size;
817 		/* Remove this check once DPDK supports larger/variable
818 		 * indirection tables. */
819 		if (priv->ind_table_max_size >
820 				(unsigned int)ETH_RSS_RETA_SIZE_512)
821 			priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
822 		DEBUG("maximum RX indirection table size is %u",
823 		      priv->ind_table_max_size);
824 		priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
825 					 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
826 		DEBUG("VLAN stripping is %ssupported",
827 		      (priv->hw_vlan_strip ? "" : "not "));
828 
829 		priv->hw_fcs_strip =
830 				!!(device_attr_ex.orig_attr.device_cap_flags &
831 				IBV_WQ_FLAGS_SCATTER_FCS);
832 		DEBUG("FCS stripping configuration is %ssupported",
833 		      (priv->hw_fcs_strip ? "" : "not "));
834 
835 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
836 		priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
837 #endif
838 		DEBUG("hardware RX end alignment padding is %ssupported",
839 		      (priv->hw_padding ? "" : "not "));
840 
841 		priv_get_num_vfs(priv, &num_vfs);
842 		priv->sriov = (num_vfs || sriov);
843 		priv->tso = ((priv->tso) &&
844 			    (device_attr_ex.tso_caps.max_tso > 0) &&
845 			    (device_attr_ex.tso_caps.supported_qpts &
846 			    (1 << IBV_QPT_RAW_PACKET)));
847 		if (priv->tso)
848 			priv->max_tso_payload_sz =
849 				device_attr_ex.tso_caps.max_tso;
850 		if (priv->mps && !mps) {
851 			ERROR("multi-packet send not supported on this device"
852 			      " (" MLX5_TXQ_MPW_EN ")");
853 			err = ENOTSUP;
854 			goto port_error;
855 		} else if (priv->mps && priv->tso) {
856 			WARN("multi-packet send not supported in conjunction "
857 			      "with TSO. MPS disabled");
858 			priv->mps = 0;
859 		}
860 		INFO("%sMPS is %s",
861 		     priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
862 		     priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
863 		/* Set default values for Enhanced MPW, a.k.a MPWv2. */
864 		if (priv->mps == MLX5_MPW_ENHANCED) {
865 			if (args.txqs_inline == MLX5_ARG_UNSET)
866 				priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
867 			if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
868 				priv->inline_max_packet_sz =
869 					MLX5_EMPW_MAX_INLINE_LEN;
870 			if (args.txq_inline == MLX5_ARG_UNSET)
871 				priv->txq_inline = MLX5_WQE_SIZE_MAX -
872 						   MLX5_WQE_SIZE;
873 		}
874 		if (priv->cqe_comp && !cqe_comp) {
875 			WARN("Rx CQE compression isn't supported");
876 			priv->cqe_comp = 0;
877 		}
878 		/* Configure the first MAC address by default. */
879 		if (priv_get_mac(priv, &mac.addr_bytes)) {
880 			ERROR("cannot get MAC address, is mlx5_en loaded?"
881 			      " (errno: %s)", strerror(errno));
882 			err = ENODEV;
883 			goto port_error;
884 		}
885 		INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
886 		     priv->port,
887 		     mac.addr_bytes[0], mac.addr_bytes[1],
888 		     mac.addr_bytes[2], mac.addr_bytes[3],
889 		     mac.addr_bytes[4], mac.addr_bytes[5]);
890 #ifndef NDEBUG
891 		{
892 			char ifname[IF_NAMESIZE];
893 
894 			if (priv_get_ifname(priv, &ifname) == 0)
895 				DEBUG("port %u ifname is \"%s\"",
896 				      priv->port, ifname);
897 			else
898 				DEBUG("port %u ifname is unknown", priv->port);
899 		}
900 #endif
901 		/* Get actual MTU if possible. */
902 		priv_get_mtu(priv, &priv->mtu);
903 		DEBUG("port %u MTU is %u", priv->port, priv->mtu);
904 
905 		/* from rte_ethdev.c */
906 		{
907 			char name[RTE_ETH_NAME_MAX_LEN];
908 
909 			snprintf(name, sizeof(name), "%s port %u",
910 				 ibv_get_device_name(ibv_dev), port);
911 			eth_dev = rte_eth_dev_allocate(name);
912 		}
913 		if (eth_dev == NULL) {
914 			ERROR("can not allocate rte ethdev");
915 			err = ENOMEM;
916 			goto port_error;
917 		}
918 		eth_dev->data->dev_private = priv;
919 		eth_dev->data->mac_addrs = priv->mac;
920 		eth_dev->device = &pci_dev->device;
921 		rte_eth_copy_pci_info(eth_dev, pci_dev);
922 		eth_dev->device->driver = &mlx5_driver.driver;
923 		priv->dev = eth_dev;
924 		eth_dev->dev_ops = &mlx5_dev_ops;
925 		/* Register MAC address. */
926 		claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
927 		TAILQ_INIT(&priv->flows);
928 		TAILQ_INIT(&priv->ctrl_flows);
929 
930 		/* Hint libmlx5 to use PMD allocator for data plane resources */
931 		struct mlx5dv_ctx_allocators alctr = {
932 			.alloc = &mlx5_alloc_verbs_buf,
933 			.free = &mlx5_free_verbs_buf,
934 			.data = priv,
935 		};
936 		mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
937 					(void *)((uintptr_t)&alctr));
938 
939 		/* Bring Ethernet device up. */
940 		DEBUG("forcing Ethernet interface up");
941 		priv_set_flags(priv, ~IFF_UP, IFF_UP);
942 		mlx5_link_update(priv->dev, 1);
943 		continue;
944 
945 port_error:
946 		if (priv)
947 			rte_free(priv);
948 		if (pd)
949 			claim_zero(ibv_dealloc_pd(pd));
950 		if (ctx)
951 			claim_zero(ibv_close_device(ctx));
952 		break;
953 	}
954 
955 	/*
956 	 * XXX if something went wrong in the loop above, there is a resource
957 	 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
958 	 * long as the dpdk does not provide a way to deallocate a ethdev and a
959 	 * way to enumerate the registered ethdevs to free the previous ones.
960 	 */
961 
962 	/* no port found, complain */
963 	if (!mlx5_dev[idx].ports) {
964 		err = ENODEV;
965 		goto error;
966 	}
967 
968 error:
969 	if (attr_ctx)
970 		claim_zero(ibv_close_device(attr_ctx));
971 	if (list)
972 		ibv_free_device_list(list);
973 	assert(err >= 0);
974 	return -err;
975 }
976 
977 static const struct rte_pci_id mlx5_pci_id_map[] = {
978 	{
979 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
980 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4)
981 	},
982 	{
983 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
984 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
985 	},
986 	{
987 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
988 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
989 	},
990 	{
991 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
992 			       PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
993 	},
994 	{
995 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
996 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5)
997 	},
998 	{
999 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1000 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1001 	},
1002 	{
1003 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1004 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1005 	},
1006 	{
1007 		RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1008 			       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1009 	},
1010 	{
1011 		.vendor_id = 0
1012 	}
1013 };
1014 
1015 static struct rte_pci_driver mlx5_driver = {
1016 	.driver = {
1017 		.name = MLX5_DRIVER_NAME
1018 	},
1019 	.id_table = mlx5_pci_id_map,
1020 	.probe = mlx5_pci_probe,
1021 	.drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1022 };
1023 
1024 /**
1025  * Driver initialization routine.
1026  */
1027 RTE_INIT(rte_mlx5_pmd_init);
1028 static void
1029 rte_mlx5_pmd_init(void)
1030 {
1031 	/* Build the static table for ptype conversion. */
1032 	mlx5_set_ptype_table();
1033 	/*
1034 	 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1035 	 * huge pages. Calling ibv_fork_init() during init allows
1036 	 * applications to use fork() safely for purposes other than
1037 	 * using this PMD, which is not supported in forked processes.
1038 	 */
1039 	setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1040 	/* Match the size of Rx completion entry to the size of a cacheline. */
1041 	if (RTE_CACHE_LINE_SIZE == 128)
1042 		setenv("MLX5_CQE_SIZE", "128", 0);
1043 	ibv_fork_init();
1044 	rte_pci_register(&mlx5_driver);
1045 }
1046 
1047 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1048 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1049 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
1050