xref: /dpdk/drivers/common/mlx5/mlx5_common.c (revision 8928997a1388ba1b045a55732acced4baf00b21d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #include <unistd.h>
6 #include <string.h>
7 #include <stdio.h>
8 
9 #include <rte_errno.h>
10 #include <rte_mempool.h>
11 #include <rte_class.h>
12 #include <rte_malloc.h>
13 #include <rte_eal_paging.h>
14 
15 #include "mlx5_common.h"
16 #include "mlx5_common_os.h"
17 #include "mlx5_common_mp.h"
18 #include "mlx5_common_log.h"
19 #include "mlx5_common_defs.h"
20 #include "mlx5_common_private.h"
21 
22 uint8_t haswell_broadwell_cpu;
23 
24 /* In case this is an x86_64 intel processor to check if
25  * we should use relaxed ordering.
26  */
27 #ifdef RTE_ARCH_X86_64
28 /**
29  * This function returns processor identification and feature information
30  * into the registers.
31  *
32  * @param eax, ebx, ecx, edx
33  *		Pointers to the registers that will hold cpu information.
34  * @param level
35  *		The main category of information returned.
36  */
37 static inline void mlx5_cpu_id(unsigned int level,
38 				unsigned int *eax, unsigned int *ebx,
39 				unsigned int *ecx, unsigned int *edx)
40 {
41 	__asm__("cpuid\n\t"
42 		: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
43 		: "0" (level));
44 }
45 #endif
46 
47 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
48 
49 /* Head of list of drivers. */
50 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
51 				TAILQ_HEAD_INITIALIZER(drivers_list);
52 
53 /* Head of devices. */
54 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
55 				TAILQ_HEAD_INITIALIZER(devices_list);
56 static pthread_mutex_t devices_list_lock;
57 
58 static const struct {
59 	const char *name;
60 	unsigned int drv_class;
61 } mlx5_classes[] = {
62 	{ .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
63 	{ .name = "eth", .drv_class = MLX5_CLASS_ETH },
64 	/* Keep class "net" for backward compatibility. */
65 	{ .name = "net", .drv_class = MLX5_CLASS_ETH },
66 	{ .name = "regex", .drv_class = MLX5_CLASS_REGEX },
67 	{ .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
68 	{ .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
69 };
70 
71 static int
72 class_name_to_value(const char *class_name)
73 {
74 	unsigned int i;
75 
76 	for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
77 		if (strcmp(class_name, mlx5_classes[i].name) == 0)
78 			return mlx5_classes[i].drv_class;
79 	}
80 	return -EINVAL;
81 }
82 
83 static struct mlx5_class_driver *
84 driver_get(uint32_t class)
85 {
86 	struct mlx5_class_driver *driver;
87 
88 	TAILQ_FOREACH(driver, &drivers_list, next) {
89 		if ((uint32_t)driver->drv_class == class)
90 			return driver;
91 	}
92 	return NULL;
93 }
94 
95 /**
96  * Verify and store value for devargs.
97  *
98  * @param[in] key
99  *   Key argument to verify.
100  * @param[in] val
101  *   Value associated with key.
102  * @param opaque
103  *   User data.
104  *
105  * @return
106  *   0 on success, a negative errno value otherwise and rte_errno is set.
107  */
108 static int
109 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
110 {
111 	struct mlx5_common_dev_config *config = opaque;
112 	signed long tmp;
113 
114 	errno = 0;
115 	tmp = strtol(val, NULL, 0);
116 	if (errno) {
117 		rte_errno = errno;
118 		DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
119 		return -rte_errno;
120 	}
121 	if (strcmp(key, "tx_db_nc") == 0) {
122 		if (tmp != MLX5_TXDB_CACHED &&
123 		    tmp != MLX5_TXDB_NCACHED &&
124 		    tmp != MLX5_TXDB_HEURISTIC) {
125 			DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
126 			rte_errno = EINVAL;
127 			return -rte_errno;
128 		}
129 		config->dbnc = tmp;
130 	} else if (strcmp(key, "mr_ext_memseg_en") == 0) {
131 		config->mr_ext_memseg_en = !!tmp;
132 	} else if (strcmp(key, "mr_mempool_reg_en") == 0) {
133 		config->mr_mempool_reg_en = !!tmp;
134 	} else if (strcmp(key, "sys_mem_en") == 0) {
135 		config->sys_mem_en = !!tmp;
136 	}
137 	return 0;
138 }
139 
140 /**
141  * Parse common device parameters.
142  *
143  * @param devargs
144  *   Device arguments structure.
145  * @param config
146  *   Pointer to device configuration structure.
147  *
148  * @return
149  *   0 on success, a negative errno value otherwise and rte_errno is set.
150  */
151 static int
152 mlx5_common_config_get(struct rte_devargs *devargs,
153 		       struct mlx5_common_dev_config *config)
154 {
155 	struct rte_kvargs *kvlist;
156 	int ret = 0;
157 
158 	/* Set defaults. */
159 	config->mr_ext_memseg_en = 1;
160 	config->mr_mempool_reg_en = 1;
161 	config->sys_mem_en = 0;
162 	config->dbnc = MLX5_ARG_UNSET;
163 	if (devargs == NULL)
164 		return 0;
165 	kvlist = rte_kvargs_parse(devargs->args, NULL);
166 	if (kvlist == NULL) {
167 		rte_errno = EINVAL;
168 		return -rte_errno;
169 	}
170 	ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
171 				 config);
172 	if (ret)
173 		ret = -rte_errno;
174 	rte_kvargs_free(kvlist);
175 	DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
176 	DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
177 	DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
178 	DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
179 	return ret;
180 }
181 
182 static int
183 devargs_class_handler(__rte_unused const char *key,
184 		      const char *class_names, void *opaque)
185 {
186 	int *ret = opaque;
187 	int class_val;
188 	char *scratch;
189 	char *found;
190 	char *refstr = NULL;
191 
192 	*ret = 0;
193 	scratch = strdup(class_names);
194 	if (scratch == NULL) {
195 		*ret = -ENOMEM;
196 		return *ret;
197 	}
198 	found = strtok_r(scratch, ":", &refstr);
199 	if (found == NULL)
200 		/* Empty string. */
201 		goto err;
202 	do {
203 		/* Extract each individual class name. Multiple
204 		 * classes can be supplied as class=net:regex:foo:bar.
205 		 */
206 		class_val = class_name_to_value(found);
207 		/* Check if its a valid class. */
208 		if (class_val < 0) {
209 			*ret = -EINVAL;
210 			goto err;
211 		}
212 		*ret |= class_val;
213 		found = strtok_r(NULL, ":", &refstr);
214 	} while (found != NULL);
215 err:
216 	free(scratch);
217 	if (*ret < 0)
218 		DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
219 	return *ret;
220 }
221 
222 static int
223 parse_class_options(const struct rte_devargs *devargs)
224 {
225 	struct rte_kvargs *kvlist;
226 	int ret = 0;
227 
228 	if (devargs == NULL)
229 		return 0;
230 	if (devargs->cls != NULL && devargs->cls->name != NULL)
231 		/* Global syntax, only one class type. */
232 		return class_name_to_value(devargs->cls->name);
233 	/* Legacy devargs support multiple classes. */
234 	kvlist = rte_kvargs_parse(devargs->args, NULL);
235 	if (kvlist == NULL)
236 		return 0;
237 	rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
238 			   devargs_class_handler, &ret);
239 	rte_kvargs_free(kvlist);
240 	return ret;
241 }
242 
243 static const unsigned int mlx5_class_invalid_combinations[] = {
244 	MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
245 	/* New class combination should be added here. */
246 };
247 
248 static int
249 is_valid_class_combination(uint32_t user_classes)
250 {
251 	unsigned int i;
252 
253 	/* Verify if user specified unsupported combination. */
254 	for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
255 		if ((mlx5_class_invalid_combinations[i] & user_classes) ==
256 		    mlx5_class_invalid_combinations[i])
257 			return -EINVAL;
258 	}
259 	/* Not found any invalid class combination. */
260 	return 0;
261 }
262 
263 static bool
264 mlx5_bus_match(const struct mlx5_class_driver *drv,
265 	       const struct rte_device *dev)
266 {
267 	if (mlx5_dev_is_pci(dev))
268 		return mlx5_dev_pci_match(drv, dev);
269 	return true;
270 }
271 
272 static struct mlx5_common_device *
273 to_mlx5_device(const struct rte_device *rte_dev)
274 {
275 	struct mlx5_common_device *cdev;
276 
277 	TAILQ_FOREACH(cdev, &devices_list, next) {
278 		if (rte_dev == cdev->dev)
279 			return cdev;
280 	}
281 	return NULL;
282 }
283 
284 int
285 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
286 {
287 	struct rte_pci_addr pci_addr = { 0 };
288 	int ret;
289 
290 	if (mlx5_dev_is_pci(dev)) {
291 		/* Input might be <BDF>, format PCI address to <DBDF>. */
292 		ret = rte_pci_addr_parse(dev->name, &pci_addr);
293 		if (ret != 0)
294 			return -ENODEV;
295 		rte_pci_device_name(&pci_addr, addr, size);
296 		return 0;
297 	}
298 #ifdef RTE_EXEC_ENV_LINUX
299 	return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
300 			addr, size);
301 #else
302 	rte_errno = ENODEV;
303 	return -rte_errno;
304 #endif
305 }
306 
307 /**
308  * Register the mempool for the protection domain.
309  *
310  * @param cdev
311  *   Pointer to the mlx5 common device.
312  * @param mp
313  *   Mempool being registered.
314  *
315  * @return
316  *   0 on success, (-1) on failure and rte_errno is set.
317  */
318 static int
319 mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
320 			  struct rte_mempool *mp, bool is_extmem)
321 {
322 	return mlx5_mr_mempool_register(cdev, mp, is_extmem);
323 }
324 
325 /**
326  * Unregister the mempool from the protection domain.
327  *
328  * @param cdev
329  *   Pointer to the mlx5 common device.
330  * @param mp
331  *   Mempool being unregistered.
332  */
333 void
334 mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
335 			    struct rte_mempool *mp)
336 {
337 	if (mlx5_mr_mempool_unregister(cdev, mp) < 0)
338 		DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
339 			mp->name, cdev->pd, rte_strerror(rte_errno));
340 }
341 
342 /**
343  * rte_mempool_walk() callback to register mempools for the protection domain.
344  *
345  * @param mp
346  *   The mempool being walked.
347  * @param arg
348  *   Pointer to the device shared context.
349  */
350 static void
351 mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
352 {
353 	struct mlx5_common_device *cdev = arg;
354 	int ret;
355 
356 	ret = mlx5_dev_mempool_register(cdev, mp, false);
357 	if (ret < 0 && rte_errno != EEXIST)
358 		DRV_LOG(ERR,
359 			"Failed to register existing mempool %s for PD %p: %s",
360 			mp->name, cdev->pd, rte_strerror(rte_errno));
361 }
362 
363 /**
364  * rte_mempool_walk() callback to unregister mempools
365  * from the protection domain.
366  *
367  * @param mp
368  *   The mempool being walked.
369  * @param arg
370  *   Pointer to the device shared context.
371  */
372 static void
373 mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
374 {
375 	mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp);
376 }
377 
378 /**
379  * Mempool life cycle callback for mlx5 common devices.
380  *
381  * @param event
382  *   Mempool life cycle event.
383  * @param mp
384  *   Associated mempool.
385  * @param arg
386  *   Pointer to a device shared context.
387  */
388 static void
389 mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
390 			  void *arg)
391 {
392 	struct mlx5_common_device *cdev = arg;
393 
394 	switch (event) {
395 	case RTE_MEMPOOL_EVENT_READY:
396 		if (mlx5_dev_mempool_register(cdev, mp, false) < 0)
397 			DRV_LOG(ERR,
398 				"Failed to register new mempool %s for PD %p: %s",
399 				mp->name, cdev->pd, rte_strerror(rte_errno));
400 		break;
401 	case RTE_MEMPOOL_EVENT_DESTROY:
402 		mlx5_dev_mempool_unregister(cdev, mp);
403 		break;
404 	}
405 }
406 
407 int
408 mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
409 {
410 	int ret = 0;
411 
412 	if (!cdev->config.mr_mempool_reg_en)
413 		return 0;
414 	rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
415 	if (cdev->mr_scache.mp_cb_registered)
416 		goto exit;
417 	/* Callback for this device may be already registered. */
418 	ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
419 						  cdev);
420 	if (ret != 0 && rte_errno != EEXIST)
421 		goto exit;
422 	/* Register mempools only once for this device. */
423 	if (ret == 0)
424 		rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
425 	ret = 0;
426 	cdev->mr_scache.mp_cb_registered = 1;
427 exit:
428 	rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
429 	return ret;
430 }
431 
432 static void
433 mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
434 {
435 	int ret;
436 
437 	if (!cdev->mr_scache.mp_cb_registered ||
438 	    !cdev->config.mr_mempool_reg_en)
439 		return;
440 	/* Stop watching for mempool events and unregister all mempools. */
441 	ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,
442 						    cdev);
443 	if (ret == 0)
444 		rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev);
445 }
446 
447 /**
448  * Callback for memory event.
449  *
450  * @param event_type
451  *   Memory event type.
452  * @param addr
453  *   Address of memory.
454  * @param len
455  *   Size of memory.
456  */
457 static void
458 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
459 		     size_t len, void *arg __rte_unused)
460 {
461 	struct mlx5_common_device *cdev;
462 
463 	/* Must be called from the primary process. */
464 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
465 	switch (event_type) {
466 	case RTE_MEM_EVENT_FREE:
467 		pthread_mutex_lock(&devices_list_lock);
468 		/* Iterate all the existing mlx5 devices. */
469 		TAILQ_FOREACH(cdev, &devices_list, next)
470 			mlx5_free_mr_by_addr(&cdev->mr_scache,
471 					     mlx5_os_get_ctx_device_name
472 								    (cdev->ctx),
473 					     addr, len);
474 		pthread_mutex_unlock(&devices_list_lock);
475 		break;
476 	case RTE_MEM_EVENT_ALLOC:
477 	default:
478 		break;
479 	}
480 }
481 
482 /**
483  * Uninitialize all HW global of device context.
484  *
485  * @param cdev
486  *   Pointer to mlx5 device structure.
487  *
488  * @return
489  *   0 on success, a negative errno value otherwise and rte_errno is set.
490  */
491 static void
492 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
493 {
494 	if (cdev->pd != NULL) {
495 		claim_zero(mlx5_os_dealloc_pd(cdev->pd));
496 		cdev->pd = NULL;
497 	}
498 	if (cdev->ctx != NULL) {
499 		claim_zero(mlx5_glue->close_device(cdev->ctx));
500 		cdev->ctx = NULL;
501 	}
502 }
503 
504 /**
505  * Initialize all HW global of device context.
506  *
507  * @param cdev
508  *   Pointer to mlx5 device structure.
509  * @param classes
510  *   Chosen classes come from user device arguments.
511  *
512  * @return
513  *   0 on success, a negative errno value otherwise and rte_errno is set.
514  */
515 static int
516 mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
517 {
518 	int ret;
519 
520 	/* Create context device */
521 	ret = mlx5_os_open_device(cdev, classes);
522 	if (ret < 0)
523 		return ret;
524 	/* Allocate Protection Domain object and extract its pdn. */
525 	ret = mlx5_os_pd_create(cdev);
526 	if (ret)
527 		goto error;
528 	/* All actions taken below are relevant only when DevX is supported */
529 	if (cdev->config.devx == 0)
530 		return 0;
531 	/* Query HCA attributes. */
532 	ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
533 	if (ret) {
534 		DRV_LOG(ERR, "Unable to read HCA capabilities.");
535 		rte_errno = ENOTSUP;
536 		goto error;
537 	}
538 	return 0;
539 error:
540 	mlx5_dev_hw_global_release(cdev);
541 	return ret;
542 }
543 
544 static void
545 mlx5_common_dev_release(struct mlx5_common_device *cdev)
546 {
547 	pthread_mutex_lock(&devices_list_lock);
548 	TAILQ_REMOVE(&devices_list, cdev, next);
549 	pthread_mutex_unlock(&devices_list_lock);
550 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
551 		if (TAILQ_EMPTY(&devices_list))
552 			rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
553 							  NULL);
554 		mlx5_dev_mempool_unsubscribe(cdev);
555 		mlx5_mr_release_cache(&cdev->mr_scache);
556 		mlx5_dev_hw_global_release(cdev);
557 	}
558 	rte_free(cdev);
559 }
560 
561 static struct mlx5_common_device *
562 mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
563 {
564 	struct mlx5_common_device *cdev;
565 	int ret;
566 
567 	cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
568 	if (!cdev) {
569 		DRV_LOG(ERR, "Device allocation failure.");
570 		rte_errno = ENOMEM;
571 		return NULL;
572 	}
573 	cdev->dev = eal_dev;
574 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
575 		goto exit;
576 	/* Parse device parameters. */
577 	ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
578 	if (ret < 0) {
579 		DRV_LOG(ERR, "Failed to process device arguments: %s",
580 			strerror(rte_errno));
581 		rte_free(cdev);
582 		return NULL;
583 	}
584 	mlx5_malloc_mem_select(cdev->config.sys_mem_en);
585 	/* Initialize all HW global of device context. */
586 	ret = mlx5_dev_hw_global_prepare(cdev, classes);
587 	if (ret) {
588 		DRV_LOG(ERR, "Failed to initialize device context.");
589 		rte_free(cdev);
590 		return NULL;
591 	}
592 	/* Initialize global MR cache resources and update its functions. */
593 	ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
594 	if (ret) {
595 		DRV_LOG(ERR, "Failed to initialize global MR share cache.");
596 		mlx5_dev_hw_global_release(cdev);
597 		rte_free(cdev);
598 		return NULL;
599 	}
600 	/* Register callback function for global shared MR cache management. */
601 	if (TAILQ_EMPTY(&devices_list))
602 		rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
603 						mlx5_mr_mem_event_cb, NULL);
604 exit:
605 	pthread_mutex_lock(&devices_list_lock);
606 	TAILQ_INSERT_HEAD(&devices_list, cdev, next);
607 	pthread_mutex_unlock(&devices_list_lock);
608 	return cdev;
609 }
610 
611 static int
612 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
613 {
614 	struct mlx5_class_driver *driver;
615 	int local_ret = -ENODEV;
616 	unsigned int i = 0;
617 	int ret = 0;
618 
619 	while (enabled_classes) {
620 		driver = driver_get(RTE_BIT64(i));
621 		if (driver != NULL) {
622 			local_ret = driver->remove(cdev);
623 			if (local_ret == 0)
624 				cdev->classes_loaded &= ~RTE_BIT64(i);
625 			else if (ret == 0)
626 				ret = local_ret;
627 		}
628 		enabled_classes &= ~RTE_BIT64(i);
629 		i++;
630 	}
631 	if (local_ret != 0 && ret == 0)
632 		ret = local_ret;
633 	return ret;
634 }
635 
636 static int
637 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
638 {
639 	struct mlx5_class_driver *driver;
640 	uint32_t enabled_classes = 0;
641 	bool already_loaded;
642 	int ret;
643 
644 	TAILQ_FOREACH(driver, &drivers_list, next) {
645 		if ((driver->drv_class & user_classes) == 0)
646 			continue;
647 		if (!mlx5_bus_match(driver, cdev->dev))
648 			continue;
649 		already_loaded = cdev->classes_loaded & driver->drv_class;
650 		if (already_loaded && driver->probe_again == 0) {
651 			DRV_LOG(ERR, "Device %s is already probed",
652 				cdev->dev->name);
653 			ret = -EEXIST;
654 			goto probe_err;
655 		}
656 		ret = driver->probe(cdev);
657 		if (ret < 0) {
658 			DRV_LOG(ERR, "Failed to load driver %s",
659 				driver->name);
660 			goto probe_err;
661 		}
662 		enabled_classes |= driver->drv_class;
663 	}
664 	cdev->classes_loaded |= enabled_classes;
665 	return 0;
666 probe_err:
667 	/*
668 	 * Need to remove only drivers which were not probed before this probe
669 	 * instance, but have already been probed before this failure.
670 	 */
671 	enabled_classes &= ~cdev->classes_loaded;
672 	drivers_remove(cdev, enabled_classes);
673 	return ret;
674 }
675 
676 int
677 mlx5_common_dev_probe(struct rte_device *eal_dev)
678 {
679 	struct mlx5_common_device *cdev;
680 	uint32_t classes = 0;
681 	bool new_device = false;
682 	int ret;
683 
684 	DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
685 	ret = parse_class_options(eal_dev->devargs);
686 	if (ret < 0) {
687 		DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
688 			eal_dev->devargs->args);
689 		return ret;
690 	}
691 	classes = ret;
692 	if (classes == 0)
693 		/* Default to net class. */
694 		classes = MLX5_CLASS_ETH;
695 	cdev = to_mlx5_device(eal_dev);
696 	if (!cdev) {
697 		cdev = mlx5_common_dev_create(eal_dev, classes);
698 		if (!cdev)
699 			return -ENOMEM;
700 		new_device = true;
701 	}
702 	/*
703 	 * Validate combination here.
704 	 * For new device, the classes_loaded field is 0 and it check only
705 	 * the classes given as user device arguments.
706 	 */
707 	ret = is_valid_class_combination(classes | cdev->classes_loaded);
708 	if (ret != 0) {
709 		DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
710 		goto class_err;
711 	}
712 	ret = drivers_probe(cdev, classes);
713 	if (ret)
714 		goto class_err;
715 	return 0;
716 class_err:
717 	if (new_device)
718 		mlx5_common_dev_release(cdev);
719 	return ret;
720 }
721 
722 int
723 mlx5_common_dev_remove(struct rte_device *eal_dev)
724 {
725 	struct mlx5_common_device *cdev;
726 	int ret;
727 
728 	cdev = to_mlx5_device(eal_dev);
729 	if (!cdev)
730 		return -ENODEV;
731 	/* Matching device found, cleanup and unload drivers. */
732 	ret = drivers_remove(cdev, cdev->classes_loaded);
733 	if (ret == 0)
734 		mlx5_common_dev_release(cdev);
735 	return ret;
736 }
737 
738 /**
739  * Callback to DMA map external memory to a device.
740  *
741  * @param rte_dev
742  *   Pointer to the generic device.
743  * @param addr
744  *   Starting virtual address of memory to be mapped.
745  * @param iova
746  *   Starting IOVA address of memory to be mapped.
747  * @param len
748  *   Length of memory segment being mapped.
749  *
750  * @return
751  *   0 on success, negative value on error.
752  */
753 int
754 mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
755 			uint64_t iova __rte_unused, size_t len)
756 {
757 	struct mlx5_common_device *dev;
758 	struct mlx5_mr *mr;
759 
760 	dev = to_mlx5_device(rte_dev);
761 	if (!dev) {
762 		DRV_LOG(WARNING,
763 			"Unable to find matching mlx5 device to device %s",
764 			rte_dev->name);
765 		rte_errno = ENODEV;
766 		return -1;
767 	}
768 	mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
769 				SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
770 	if (!mr) {
771 		DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
772 		rte_errno = EINVAL;
773 		return -1;
774 	}
775 	rte_rwlock_write_lock(&dev->mr_scache.rwlock);
776 	LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
777 	/* Insert to the global cache table. */
778 	mlx5_mr_insert_cache(&dev->mr_scache, mr);
779 	rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
780 	return 0;
781 }
782 
783 /**
784  * Callback to DMA unmap external memory to a device.
785  *
786  * @param rte_dev
787  *   Pointer to the generic device.
788  * @param addr
789  *   Starting virtual address of memory to be unmapped.
790  * @param iova
791  *   Starting IOVA address of memory to be unmapped.
792  * @param len
793  *   Length of memory segment being unmapped.
794  *
795  * @return
796  *   0 on success, negative value on error.
797  */
798 int
799 mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
800 			  uint64_t iova __rte_unused, size_t len __rte_unused)
801 {
802 	struct mlx5_common_device *dev;
803 	struct mr_cache_entry entry;
804 	struct mlx5_mr *mr;
805 
806 	dev = to_mlx5_device(rte_dev);
807 	if (!dev) {
808 		DRV_LOG(WARNING,
809 			"Unable to find matching mlx5 device to device %s.",
810 			rte_dev->name);
811 		rte_errno = ENODEV;
812 		return -1;
813 	}
814 	rte_rwlock_read_lock(&dev->mr_scache.rwlock);
815 	mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
816 	if (!mr) {
817 		rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
818 		DRV_LOG(WARNING,
819 			"Address 0x%" PRIxPTR " wasn't registered to device %s",
820 			(uintptr_t)addr, rte_dev->name);
821 		rte_errno = EINVAL;
822 		return -1;
823 	}
824 	LIST_REMOVE(mr, mr);
825 	DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
826 	mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
827 	mlx5_mr_rebuild_cache(&dev->mr_scache);
828 	/*
829 	 * No explicit wmb is needed after updating dev_gen due to
830 	 * store-release ordering in unlock that provides the
831 	 * implicit barrier at the software visible level.
832 	 */
833 	++dev->mr_scache.dev_gen;
834 	DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
835 		dev->mr_scache.dev_gen);
836 	rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
837 	return 0;
838 }
839 
840 void
841 mlx5_class_driver_register(struct mlx5_class_driver *driver)
842 {
843 	mlx5_common_driver_on_register_pci(driver);
844 	TAILQ_INSERT_TAIL(&drivers_list, driver, next);
845 }
846 
847 static void mlx5_common_driver_init(void)
848 {
849 	mlx5_common_pci_init();
850 #ifdef RTE_EXEC_ENV_LINUX
851 	mlx5_common_auxiliary_init();
852 #endif
853 }
854 
855 static bool mlx5_common_initialized;
856 
857 /**
858  * One time initialization routine for run-time dependency on glue library
859  * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
860  * must invoke in its constructor.
861  */
862 void
863 mlx5_common_init(void)
864 {
865 	if (mlx5_common_initialized)
866 		return;
867 
868 	pthread_mutex_init(&devices_list_lock, NULL);
869 	mlx5_glue_constructor();
870 	mlx5_common_driver_init();
871 	mlx5_common_initialized = true;
872 }
873 
874 /**
875  * This function is responsible of initializing the variable
876  *  haswell_broadwell_cpu by checking if the cpu is intel
877  *  and reading the data returned from mlx5_cpu_id().
878  *  since haswell and broadwell cpus don't have improved performance
879  *  when using relaxed ordering we want to check the cpu type before
880  *  before deciding whether to enable RO or not.
881  *  if the cpu is haswell or broadwell the variable will be set to 1
882  *  otherwise it will be 0.
883  */
884 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
885 {
886 #ifdef RTE_ARCH_X86_64
887 	unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
888 	unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
889 	unsigned int i, model, family, brand_id, vendor;
890 	unsigned int signature_intel_ebx = 0x756e6547;
891 	unsigned int extended_model;
892 	unsigned int eax = 0;
893 	unsigned int ebx = 0;
894 	unsigned int ecx = 0;
895 	unsigned int edx = 0;
896 	int max_level;
897 
898 	mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
899 	vendor = ebx;
900 	max_level = eax;
901 	if (max_level < 1) {
902 		haswell_broadwell_cpu = 0;
903 		return;
904 	}
905 	mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
906 	model = (eax >> 4) & 0x0f;
907 	family = (eax >> 8) & 0x0f;
908 	brand_id = ebx & 0xff;
909 	extended_model = (eax >> 12) & 0xf0;
910 	/* Check if the processor is Haswell or Broadwell */
911 	if (vendor == signature_intel_ebx) {
912 		if (family == 0x06)
913 			model += extended_model;
914 		if (brand_id == 0 && family == 0x6) {
915 			for (i = 0; i < RTE_DIM(broadwell_models); i++)
916 				if (model == broadwell_models[i]) {
917 					haswell_broadwell_cpu = 1;
918 					return;
919 				}
920 			for (i = 0; i < RTE_DIM(haswell_models); i++)
921 				if (model == haswell_models[i]) {
922 					haswell_broadwell_cpu = 1;
923 					return;
924 				}
925 		}
926 	}
927 #endif
928 	haswell_broadwell_cpu = 0;
929 }
930 
931 /**
932  * Allocate the User Access Region with DevX on specified device.
933  * This routine handles the following UAR allocation issues:
934  *
935  *  - Try to allocate the UAR with the most appropriate memory mapping
936  *    type from the ones supported by the host.
937  *
938  *  - Try to allocate the UAR with non-NULL base address OFED 5.0.x and
939  *    Upstream rdma_core before v29 returned the NULL as UAR base address
940  *    if UAR was not the first object in the UAR page.
941  *    It caused the PMD failure and we should try to get another UAR till
942  *    we get the first one with non-NULL base address returned.
943  *
944  * @param [in] cdev
945  *   Pointer to mlx5 device structure to perform allocation on its context.
946  *
947  * @return
948  *   UAR object pointer on success, NULL otherwise and rte_errno is set.
949  */
950 static void *
951 mlx5_devx_alloc_uar(struct mlx5_common_device *cdev)
952 {
953 	void *uar;
954 	uint32_t retry, uar_mapping;
955 	void *base_addr;
956 
957 	for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
958 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
959 		/* Control the mapping type according to the settings. */
960 		uar_mapping = (cdev->config.dbnc == MLX5_TXDB_NCACHED) ?
961 			    MLX5DV_UAR_ALLOC_TYPE_NC : MLX5DV_UAR_ALLOC_TYPE_BF;
962 #else
963 		/*
964 		 * It seems we have no way to control the memory mapping type
965 		 * for the UAR, the default "Write-Combining" type is supposed.
966 		 */
967 		uar_mapping = 0;
968 #endif
969 		uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
970 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
971 		if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
972 			/*
973 			 * In some environments like virtual machine the
974 			 * Write Combining mapped might be not supported and
975 			 * UAR allocation fails. We tried "Non-Cached" mapping
976 			 * for the case.
977 			 */
978 			DRV_LOG(DEBUG, "Failed to allocate DevX UAR (BF)");
979 			uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
980 			uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
981 		} else if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
982 			/*
983 			 * If Verbs/kernel does not support "Non-Cached"
984 			 * try the "Write-Combining".
985 			 */
986 			DRV_LOG(DEBUG, "Failed to allocate DevX UAR (NC)");
987 			uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
988 			uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
989 		}
990 #endif
991 		if (!uar) {
992 			DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
993 			rte_errno = ENOMEM;
994 			goto exit;
995 		}
996 		base_addr = mlx5_os_get_devx_uar_base_addr(uar);
997 		if (base_addr)
998 			break;
999 		/*
1000 		 * The UARs are allocated by rdma_core within the
1001 		 * IB device context, on context closure all UARs
1002 		 * will be freed, should be no memory/object leakage.
1003 		 */
1004 		DRV_LOG(DEBUG, "Retrying to allocate DevX UAR");
1005 		uar = NULL;
1006 	}
1007 	/* Check whether we finally succeeded with valid UAR allocation. */
1008 	if (!uar) {
1009 		DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
1010 		rte_errno = ENOMEM;
1011 	}
1012 	/*
1013 	 * Return void * instead of struct mlx5dv_devx_uar *
1014 	 * is for compatibility with older rdma-core library headers.
1015 	 */
1016 exit:
1017 	return uar;
1018 }
1019 
1020 void
1021 mlx5_devx_uar_release(struct mlx5_uar *uar)
1022 {
1023 	if (uar->obj != NULL)
1024 		mlx5_glue->devx_free_uar(uar->obj);
1025 	memset(uar, 0, sizeof(*uar));
1026 }
1027 
1028 int
1029 mlx5_devx_uar_prepare(struct mlx5_common_device *cdev, struct mlx5_uar *uar)
1030 {
1031 	off_t uar_mmap_offset;
1032 	const size_t page_size = rte_mem_page_size();
1033 	void *base_addr;
1034 	void *uar_obj;
1035 
1036 	if (page_size == (size_t)-1) {
1037 		DRV_LOG(ERR, "Failed to get mem page size");
1038 		rte_errno = ENOMEM;
1039 		return -1;
1040 	}
1041 	uar_obj = mlx5_devx_alloc_uar(cdev);
1042 	if (uar_obj == NULL || mlx5_os_get_devx_uar_reg_addr(uar_obj) == NULL) {
1043 		rte_errno = errno;
1044 		DRV_LOG(ERR, "Failed to allocate UAR.");
1045 		return -1;
1046 	}
1047 	uar->obj = uar_obj;
1048 	uar_mmap_offset = mlx5_os_get_devx_uar_mmap_offset(uar_obj);
1049 	base_addr = mlx5_os_get_devx_uar_base_addr(uar_obj);
1050 	uar->dbnc = mlx5_db_map_type_get(uar_mmap_offset, page_size);
1051 	uar->bf_db.db = mlx5_os_get_devx_uar_reg_addr(uar_obj);
1052 	uar->cq_db.db = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
1053 #ifndef RTE_ARCH_64
1054 	rte_spinlock_init(&uar->bf_sl);
1055 	rte_spinlock_init(&uar->cq_sl);
1056 	uar->bf_db.sl_p = &uar->bf_sl;
1057 	uar->cq_db.sl_p = &uar->cq_sl;
1058 #endif /* RTE_ARCH_64 */
1059 	return 0;
1060 }
1061 
1062 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);
1063