xref: /dpdk/drivers/common/mlx5/mlx5_common.c (revision a5d06c90067b6c0c2facb9614f9b10b2a1f54ffc)
17b4f1e6bSMatan Azrad /* SPDX-License-Identifier: BSD-3-Clause
27b4f1e6bSMatan Azrad  * Copyright 2019 Mellanox Technologies, Ltd
37b4f1e6bSMatan Azrad  */
47b4f1e6bSMatan Azrad 
57b4f1e6bSMatan Azrad #include <unistd.h>
67b4f1e6bSMatan Azrad #include <string.h>
793e30982SMatan Azrad #include <stdio.h>
87b4f1e6bSMatan Azrad 
97b4f1e6bSMatan Azrad #include <rte_errno.h>
10262c7ad0SOri Kam #include <rte_mempool.h>
11ad435d32SXueming Li #include <rte_class.h>
12ad435d32SXueming Li #include <rte_malloc.h>
137b4f1e6bSMatan Azrad 
147b4f1e6bSMatan Azrad #include "mlx5_common.h"
15262c7ad0SOri Kam #include "mlx5_common_os.h"
1625245d5dSShiri Kuzin #include "mlx5_common_log.h"
17a77bedf2SMichael Baum #include "mlx5_common_defs.h"
18ad435d32SXueming Li #include "mlx5_common_private.h"
197b4f1e6bSMatan Azrad 
204c204fe5SShiri Kuzin uint8_t haswell_broadwell_cpu;
214c204fe5SShiri Kuzin 
224c204fe5SShiri Kuzin /* In case this is an x86_64 intel processor to check if
234c204fe5SShiri Kuzin  * we should use relaxed ordering.
244c204fe5SShiri Kuzin  */
254c204fe5SShiri Kuzin #ifdef RTE_ARCH_X86_64
264c204fe5SShiri Kuzin /**
274c204fe5SShiri Kuzin  * This function returns processor identification and feature information
284c204fe5SShiri Kuzin  * into the registers.
294c204fe5SShiri Kuzin  *
304c204fe5SShiri Kuzin  * @param eax, ebx, ecx, edx
314c204fe5SShiri Kuzin  *		Pointers to the registers that will hold cpu information.
324c204fe5SShiri Kuzin  * @param level
334c204fe5SShiri Kuzin  *		The main category of information returned.
344c204fe5SShiri Kuzin  */
354c204fe5SShiri Kuzin static inline void mlx5_cpu_id(unsigned int level,
364c204fe5SShiri Kuzin 				unsigned int *eax, unsigned int *ebx,
374c204fe5SShiri Kuzin 				unsigned int *ecx, unsigned int *edx)
384c204fe5SShiri Kuzin {
394c204fe5SShiri Kuzin 	__asm__("cpuid\n\t"
404c204fe5SShiri Kuzin 		: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
414c204fe5SShiri Kuzin 		: "0" (level));
424c204fe5SShiri Kuzin }
434c204fe5SShiri Kuzin #endif
444c204fe5SShiri Kuzin 
45eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
4683c99c36SThomas Monjalon 
47ad435d32SXueming Li /* Head of list of drivers. */
48ad435d32SXueming Li static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
49ad435d32SXueming Li 				TAILQ_HEAD_INITIALIZER(drivers_list);
50ad435d32SXueming Li 
51ad435d32SXueming Li /* Head of devices. */
52ad435d32SXueming Li static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
53ad435d32SXueming Li 				TAILQ_HEAD_INITIALIZER(devices_list);
54dc26c9c2SMichael Baum static pthread_mutex_t devices_list_lock;
55ad435d32SXueming Li 
56ad435d32SXueming Li static const struct {
57ad435d32SXueming Li 	const char *name;
58ad435d32SXueming Li 	unsigned int drv_class;
59ad435d32SXueming Li } mlx5_classes[] = {
60ad435d32SXueming Li 	{ .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
61ad435d32SXueming Li 	{ .name = "eth", .drv_class = MLX5_CLASS_ETH },
62ad435d32SXueming Li 	/* Keep class "net" for backward compatibility. */
63ad435d32SXueming Li 	{ .name = "net", .drv_class = MLX5_CLASS_ETH },
64ad435d32SXueming Li 	{ .name = "regex", .drv_class = MLX5_CLASS_REGEX },
65ad435d32SXueming Li 	{ .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
66ad435d32SXueming Li 	{ .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
67ad435d32SXueming Li };
68ad435d32SXueming Li 
69ad435d32SXueming Li static int
70ad435d32SXueming Li class_name_to_value(const char *class_name)
71ad435d32SXueming Li {
72ad435d32SXueming Li 	unsigned int i;
73ad435d32SXueming Li 
74ad435d32SXueming Li 	for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
75ad435d32SXueming Li 		if (strcmp(class_name, mlx5_classes[i].name) == 0)
76ad435d32SXueming Li 			return mlx5_classes[i].drv_class;
77ad435d32SXueming Li 	}
78ad435d32SXueming Li 	return -EINVAL;
79ad435d32SXueming Li }
80ad435d32SXueming Li 
81ad435d32SXueming Li static struct mlx5_class_driver *
82ad435d32SXueming Li driver_get(uint32_t class)
83ad435d32SXueming Li {
84ad435d32SXueming Li 	struct mlx5_class_driver *driver;
85ad435d32SXueming Li 
86ad435d32SXueming Li 	TAILQ_FOREACH(driver, &drivers_list, next) {
87ad435d32SXueming Li 		if ((uint32_t)driver->drv_class == class)
88ad435d32SXueming Li 			return driver;
89ad435d32SXueming Li 	}
90ad435d32SXueming Li 	return NULL;
91ad435d32SXueming Li }
92ad435d32SXueming Li 
9385209924SMichael Baum /**
9485209924SMichael Baum  * Verify and store value for devargs.
9585209924SMichael Baum  *
9685209924SMichael Baum  * @param[in] key
9785209924SMichael Baum  *   Key argument to verify.
9885209924SMichael Baum  * @param[in] val
9985209924SMichael Baum  *   Value associated with key.
10085209924SMichael Baum  * @param opaque
10185209924SMichael Baum  *   User data.
10285209924SMichael Baum  *
10385209924SMichael Baum  * @return
10485209924SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
10585209924SMichael Baum  */
10685209924SMichael Baum static int
10785209924SMichael Baum mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
10885209924SMichael Baum {
10985209924SMichael Baum 	struct mlx5_common_dev_config *config = opaque;
11085209924SMichael Baum 	signed long tmp;
11185209924SMichael Baum 
11285209924SMichael Baum 	errno = 0;
11385209924SMichael Baum 	tmp = strtol(val, NULL, 0);
11485209924SMichael Baum 	if (errno) {
11585209924SMichael Baum 		rte_errno = errno;
11685209924SMichael Baum 		DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
11785209924SMichael Baum 		return -rte_errno;
11885209924SMichael Baum 	}
11985209924SMichael Baum 	if (strcmp(key, "tx_db_nc") == 0) {
12085209924SMichael Baum 		if (tmp != MLX5_TXDB_CACHED &&
12185209924SMichael Baum 		    tmp != MLX5_TXDB_NCACHED &&
12285209924SMichael Baum 		    tmp != MLX5_TXDB_HEURISTIC) {
12385209924SMichael Baum 			DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
12485209924SMichael Baum 			rte_errno = EINVAL;
12585209924SMichael Baum 			return -rte_errno;
12685209924SMichael Baum 		}
12785209924SMichael Baum 		config->dbnc = tmp;
12885209924SMichael Baum 	} else if (strcmp(key, "mr_ext_memseg_en") == 0) {
12985209924SMichael Baum 		config->mr_ext_memseg_en = !!tmp;
13085209924SMichael Baum 	} else if (strcmp(key, "mr_mempool_reg_en") == 0) {
13185209924SMichael Baum 		config->mr_mempool_reg_en = !!tmp;
13285209924SMichael Baum 	} else if (strcmp(key, "sys_mem_en") == 0) {
13385209924SMichael Baum 		config->sys_mem_en = !!tmp;
13485209924SMichael Baum 	}
13585209924SMichael Baum 	return 0;
13685209924SMichael Baum }
13785209924SMichael Baum 
13885209924SMichael Baum /**
13985209924SMichael Baum  * Parse common device parameters.
14085209924SMichael Baum  *
14185209924SMichael Baum  * @param devargs
14285209924SMichael Baum  *   Device arguments structure.
14385209924SMichael Baum  * @param config
14485209924SMichael Baum  *   Pointer to device configuration structure.
14585209924SMichael Baum  *
14685209924SMichael Baum  * @return
14785209924SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
14885209924SMichael Baum  */
14985209924SMichael Baum static int
15085209924SMichael Baum mlx5_common_config_get(struct rte_devargs *devargs,
15185209924SMichael Baum 		       struct mlx5_common_dev_config *config)
15285209924SMichael Baum {
15385209924SMichael Baum 	struct rte_kvargs *kvlist;
15485209924SMichael Baum 	int ret = 0;
15585209924SMichael Baum 
15685209924SMichael Baum 	/* Set defaults. */
15785209924SMichael Baum 	config->mr_ext_memseg_en = 1;
15885209924SMichael Baum 	config->mr_mempool_reg_en = 1;
15985209924SMichael Baum 	config->sys_mem_en = 0;
16085209924SMichael Baum 	config->dbnc = MLX5_ARG_UNSET;
16185209924SMichael Baum 	if (devargs == NULL)
16285209924SMichael Baum 		return 0;
16385209924SMichael Baum 	kvlist = rte_kvargs_parse(devargs->args, NULL);
16485209924SMichael Baum 	if (kvlist == NULL) {
16585209924SMichael Baum 		rte_errno = EINVAL;
16685209924SMichael Baum 		return -rte_errno;
16785209924SMichael Baum 	}
16885209924SMichael Baum 	ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
16985209924SMichael Baum 				 config);
17085209924SMichael Baum 	if (ret)
17185209924SMichael Baum 		ret = -rte_errno;
17285209924SMichael Baum 	rte_kvargs_free(kvlist);
17385209924SMichael Baum 	DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
17485209924SMichael Baum 	DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
17585209924SMichael Baum 	DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
17685209924SMichael Baum 	DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
17785209924SMichael Baum 	return ret;
17885209924SMichael Baum }
17985209924SMichael Baum 
180ad435d32SXueming Li static int
181ad435d32SXueming Li devargs_class_handler(__rte_unused const char *key,
182ad435d32SXueming Li 		      const char *class_names, void *opaque)
183ad435d32SXueming Li {
184ad435d32SXueming Li 	int *ret = opaque;
185ad435d32SXueming Li 	int class_val;
186ad435d32SXueming Li 	char *scratch;
187ad435d32SXueming Li 	char *found;
188ad435d32SXueming Li 	char *refstr = NULL;
189ad435d32SXueming Li 
190ad435d32SXueming Li 	*ret = 0;
191ad435d32SXueming Li 	scratch = strdup(class_names);
192ad435d32SXueming Li 	if (scratch == NULL) {
193ad435d32SXueming Li 		*ret = -ENOMEM;
194ad435d32SXueming Li 		return *ret;
195ad435d32SXueming Li 	}
196ad435d32SXueming Li 	found = strtok_r(scratch, ":", &refstr);
197ad435d32SXueming Li 	if (found == NULL)
198ad435d32SXueming Li 		/* Empty string. */
199ad435d32SXueming Li 		goto err;
200ad435d32SXueming Li 	do {
201ad435d32SXueming Li 		/* Extract each individual class name. Multiple
202ad435d32SXueming Li 		 * classes can be supplied as class=net:regex:foo:bar.
203ad435d32SXueming Li 		 */
204ad435d32SXueming Li 		class_val = class_name_to_value(found);
205ad435d32SXueming Li 		/* Check if its a valid class. */
206ad435d32SXueming Li 		if (class_val < 0) {
207ad435d32SXueming Li 			*ret = -EINVAL;
208ad435d32SXueming Li 			goto err;
209ad435d32SXueming Li 		}
210ad435d32SXueming Li 		*ret |= class_val;
211ad435d32SXueming Li 		found = strtok_r(NULL, ":", &refstr);
212ad435d32SXueming Li 	} while (found != NULL);
213ad435d32SXueming Li err:
214ad435d32SXueming Li 	free(scratch);
215ad435d32SXueming Li 	if (*ret < 0)
216ad435d32SXueming Li 		DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
217ad435d32SXueming Li 	return *ret;
218ad435d32SXueming Li }
219ad435d32SXueming Li 
220ad435d32SXueming Li static int
221ad435d32SXueming Li parse_class_options(const struct rte_devargs *devargs)
222ad435d32SXueming Li {
223ad435d32SXueming Li 	struct rte_kvargs *kvlist;
224ad435d32SXueming Li 	int ret = 0;
225ad435d32SXueming Li 
226ad435d32SXueming Li 	if (devargs == NULL)
227ad435d32SXueming Li 		return 0;
228ad435d32SXueming Li 	if (devargs->cls != NULL && devargs->cls->name != NULL)
229ad435d32SXueming Li 		/* Global syntax, only one class type. */
230ad435d32SXueming Li 		return class_name_to_value(devargs->cls->name);
231ad435d32SXueming Li 	/* Legacy devargs support multiple classes. */
232ad435d32SXueming Li 	kvlist = rte_kvargs_parse(devargs->args, NULL);
233ad435d32SXueming Li 	if (kvlist == NULL)
234ad435d32SXueming Li 		return 0;
235ad435d32SXueming Li 	rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
236ad435d32SXueming Li 			   devargs_class_handler, &ret);
237ad435d32SXueming Li 	rte_kvargs_free(kvlist);
238ad435d32SXueming Li 	return ret;
239ad435d32SXueming Li }
240ad435d32SXueming Li 
241ad435d32SXueming Li static const unsigned int mlx5_class_invalid_combinations[] = {
242ad435d32SXueming Li 	MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
243ad435d32SXueming Li 	/* New class combination should be added here. */
244ad435d32SXueming Li };
245ad435d32SXueming Li 
246ad435d32SXueming Li static int
247ad435d32SXueming Li is_valid_class_combination(uint32_t user_classes)
248ad435d32SXueming Li {
249ad435d32SXueming Li 	unsigned int i;
250ad435d32SXueming Li 
251ad435d32SXueming Li 	/* Verify if user specified unsupported combination. */
252ad435d32SXueming Li 	for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
253ad435d32SXueming Li 		if ((mlx5_class_invalid_combinations[i] & user_classes) ==
254ad435d32SXueming Li 		    mlx5_class_invalid_combinations[i])
255ad435d32SXueming Li 			return -EINVAL;
256ad435d32SXueming Li 	}
257ad435d32SXueming Li 	/* Not found any invalid class combination. */
258ad435d32SXueming Li 	return 0;
259ad435d32SXueming Li }
260ad435d32SXueming Li 
261ad435d32SXueming Li static bool
262ad435d32SXueming Li mlx5_bus_match(const struct mlx5_class_driver *drv,
263ad435d32SXueming Li 	       const struct rte_device *dev)
264ad435d32SXueming Li {
265ad435d32SXueming Li 	if (mlx5_dev_is_pci(dev))
266ad435d32SXueming Li 		return mlx5_dev_pci_match(drv, dev);
267ad435d32SXueming Li 	return true;
268ad435d32SXueming Li }
269ad435d32SXueming Li 
270ad435d32SXueming Li static struct mlx5_common_device *
271ad435d32SXueming Li to_mlx5_device(const struct rte_device *rte_dev)
272ad435d32SXueming Li {
27385209924SMichael Baum 	struct mlx5_common_device *cdev;
274ad435d32SXueming Li 
27585209924SMichael Baum 	TAILQ_FOREACH(cdev, &devices_list, next) {
27685209924SMichael Baum 		if (rte_dev == cdev->dev)
27785209924SMichael Baum 			return cdev;
278ad435d32SXueming Li 	}
279ad435d32SXueming Li 	return NULL;
280ad435d32SXueming Li }
281ad435d32SXueming Li 
2824d567938SThomas Monjalon int
2834d567938SThomas Monjalon mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
2844d567938SThomas Monjalon {
2854d567938SThomas Monjalon 	struct rte_pci_addr pci_addr = { 0 };
2864d567938SThomas Monjalon 	int ret;
2874d567938SThomas Monjalon 
2884d567938SThomas Monjalon 	if (mlx5_dev_is_pci(dev)) {
2894d567938SThomas Monjalon 		/* Input might be <BDF>, format PCI address to <DBDF>. */
2904d567938SThomas Monjalon 		ret = rte_pci_addr_parse(dev->name, &pci_addr);
2914d567938SThomas Monjalon 		if (ret != 0)
2924d567938SThomas Monjalon 			return -ENODEV;
2934d567938SThomas Monjalon 		rte_pci_device_name(&pci_addr, addr, size);
2944d567938SThomas Monjalon 		return 0;
2954d567938SThomas Monjalon 	}
2964d567938SThomas Monjalon #ifdef RTE_EXEC_ENV_LINUX
2974d567938SThomas Monjalon 	return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
2984d567938SThomas Monjalon 			addr, size);
2994d567938SThomas Monjalon #else
3004d567938SThomas Monjalon 	rte_errno = ENODEV;
3014d567938SThomas Monjalon 	return -rte_errno;
3024d567938SThomas Monjalon #endif
3034d567938SThomas Monjalon }
3044d567938SThomas Monjalon 
305ca1418ceSMichael Baum /**
3069f1d636fSMichael Baum  * Callback for memory event.
3079f1d636fSMichael Baum  *
3089f1d636fSMichael Baum  * @param event_type
3099f1d636fSMichael Baum  *   Memory event type.
3109f1d636fSMichael Baum  * @param addr
3119f1d636fSMichael Baum  *   Address of memory.
3129f1d636fSMichael Baum  * @param len
3139f1d636fSMichael Baum  *   Size of memory.
3149f1d636fSMichael Baum  */
3159f1d636fSMichael Baum static void
3169f1d636fSMichael Baum mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
3179f1d636fSMichael Baum 		     size_t len, void *arg __rte_unused)
3189f1d636fSMichael Baum {
3199f1d636fSMichael Baum 	struct mlx5_common_device *cdev;
3209f1d636fSMichael Baum 
3219f1d636fSMichael Baum 	/* Must be called from the primary process. */
3229f1d636fSMichael Baum 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
3239f1d636fSMichael Baum 	switch (event_type) {
3249f1d636fSMichael Baum 	case RTE_MEM_EVENT_FREE:
3259f1d636fSMichael Baum 		pthread_mutex_lock(&devices_list_lock);
3269f1d636fSMichael Baum 		/* Iterate all the existing mlx5 devices. */
3279f1d636fSMichael Baum 		TAILQ_FOREACH(cdev, &devices_list, next)
3289f1d636fSMichael Baum 			mlx5_free_mr_by_addr(&cdev->mr_scache,
3299f1d636fSMichael Baum 					     mlx5_os_get_ctx_device_name
3309f1d636fSMichael Baum 								    (cdev->ctx),
3319f1d636fSMichael Baum 					     addr, len);
3329f1d636fSMichael Baum 		pthread_mutex_unlock(&devices_list_lock);
3339f1d636fSMichael Baum 		break;
3349f1d636fSMichael Baum 	case RTE_MEM_EVENT_ALLOC:
3359f1d636fSMichael Baum 	default:
3369f1d636fSMichael Baum 		break;
3379f1d636fSMichael Baum 	}
3389f1d636fSMichael Baum }
3399f1d636fSMichael Baum 
3409f1d636fSMichael Baum /**
341ca1418ceSMichael Baum  * Uninitialize all HW global of device context.
342ca1418ceSMichael Baum  *
343ca1418ceSMichael Baum  * @param cdev
344ca1418ceSMichael Baum  *   Pointer to mlx5 device structure.
345ca1418ceSMichael Baum  *
346ca1418ceSMichael Baum  * @return
347ca1418ceSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
348ca1418ceSMichael Baum  */
349ca1418ceSMichael Baum static void
350ca1418ceSMichael Baum mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
351ca1418ceSMichael Baum {
352e35ccf24SMichael Baum 	if (cdev->pd != NULL) {
353e35ccf24SMichael Baum 		claim_zero(mlx5_os_dealloc_pd(cdev->pd));
354e35ccf24SMichael Baum 		cdev->pd = NULL;
355e35ccf24SMichael Baum 	}
356ca1418ceSMichael Baum 	if (cdev->ctx != NULL) {
357ca1418ceSMichael Baum 		claim_zero(mlx5_glue->close_device(cdev->ctx));
358ca1418ceSMichael Baum 		cdev->ctx = NULL;
359ca1418ceSMichael Baum 	}
360ca1418ceSMichael Baum }
361ca1418ceSMichael Baum 
362ca1418ceSMichael Baum /**
363ca1418ceSMichael Baum  * Initialize all HW global of device context.
364ca1418ceSMichael Baum  *
365ca1418ceSMichael Baum  * @param cdev
366ca1418ceSMichael Baum  *   Pointer to mlx5 device structure.
367ca1418ceSMichael Baum  * @param classes
368ca1418ceSMichael Baum  *   Chosen classes come from user device arguments.
369ca1418ceSMichael Baum  *
370ca1418ceSMichael Baum  * @return
371ca1418ceSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
372ca1418ceSMichael Baum  */
373ca1418ceSMichael Baum static int
374ca1418ceSMichael Baum mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
375ca1418ceSMichael Baum {
376ca1418ceSMichael Baum 	int ret;
377ca1418ceSMichael Baum 
378ca1418ceSMichael Baum 	/* Create context device */
379ca1418ceSMichael Baum 	ret = mlx5_os_open_device(cdev, classes);
380ca1418ceSMichael Baum 	if (ret < 0)
381ca1418ceSMichael Baum 		return ret;
382e35ccf24SMichael Baum 	/* Allocate Protection Domain object and extract its pdn. */
383e35ccf24SMichael Baum 	ret = mlx5_os_pd_create(cdev);
384e35ccf24SMichael Baum 	if (ret)
385e35ccf24SMichael Baum 		goto error;
386fe46b20cSMichael Baum 	/* All actions taken below are relevant only when DevX is supported */
387fe46b20cSMichael Baum 	if (cdev->config.devx == 0)
388fe46b20cSMichael Baum 		return 0;
389fe46b20cSMichael Baum 	/* Query HCA attributes. */
390fe46b20cSMichael Baum 	ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
391fe46b20cSMichael Baum 	if (ret) {
392fe46b20cSMichael Baum 		DRV_LOG(ERR, "Unable to read HCA capabilities.");
393fe46b20cSMichael Baum 		rte_errno = ENOTSUP;
394fe46b20cSMichael Baum 		goto error;
395fe46b20cSMichael Baum 	}
396ca1418ceSMichael Baum 	return 0;
397e35ccf24SMichael Baum error:
398e35ccf24SMichael Baum 	mlx5_dev_hw_global_release(cdev);
399e35ccf24SMichael Baum 	return ret;
400ca1418ceSMichael Baum }
401ca1418ceSMichael Baum 
402ad435d32SXueming Li static void
40385209924SMichael Baum mlx5_common_dev_release(struct mlx5_common_device *cdev)
404ad435d32SXueming Li {
405dc26c9c2SMichael Baum 	pthread_mutex_lock(&devices_list_lock);
40685209924SMichael Baum 	TAILQ_REMOVE(&devices_list, cdev, next);
407dc26c9c2SMichael Baum 	pthread_mutex_unlock(&devices_list_lock);
4089f1d636fSMichael Baum 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4099f1d636fSMichael Baum 		if (TAILQ_EMPTY(&devices_list))
4109f1d636fSMichael Baum 			rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
4119f1d636fSMichael Baum 							  NULL);
4129f1d636fSMichael Baum 		mlx5_mr_release_cache(&cdev->mr_scache);
413ca1418ceSMichael Baum 		mlx5_dev_hw_global_release(cdev);
4149f1d636fSMichael Baum 	}
41585209924SMichael Baum 	rte_free(cdev);
41685209924SMichael Baum }
41785209924SMichael Baum 
41885209924SMichael Baum static struct mlx5_common_device *
419ca1418ceSMichael Baum mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
42085209924SMichael Baum {
42185209924SMichael Baum 	struct mlx5_common_device *cdev;
42285209924SMichael Baum 	int ret;
42385209924SMichael Baum 
42485209924SMichael Baum 	cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
42585209924SMichael Baum 	if (!cdev) {
42685209924SMichael Baum 		DRV_LOG(ERR, "Device allocation failure.");
42785209924SMichael Baum 		rte_errno = ENOMEM;
42885209924SMichael Baum 		return NULL;
42985209924SMichael Baum 	}
43085209924SMichael Baum 	cdev->dev = eal_dev;
43185209924SMichael Baum 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
43285209924SMichael Baum 		goto exit;
43385209924SMichael Baum 	/* Parse device parameters. */
43485209924SMichael Baum 	ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
43585209924SMichael Baum 	if (ret < 0) {
43685209924SMichael Baum 		DRV_LOG(ERR, "Failed to process device arguments: %s",
43785209924SMichael Baum 			strerror(rte_errno));
43885209924SMichael Baum 		rte_free(cdev);
43985209924SMichael Baum 		return NULL;
44085209924SMichael Baum 	}
44185209924SMichael Baum 	mlx5_malloc_mem_select(cdev->config.sys_mem_en);
442ca1418ceSMichael Baum 	/* Initialize all HW global of device context. */
443ca1418ceSMichael Baum 	ret = mlx5_dev_hw_global_prepare(cdev, classes);
444ca1418ceSMichael Baum 	if (ret) {
445ca1418ceSMichael Baum 		DRV_LOG(ERR, "Failed to initialize device context.");
446ca1418ceSMichael Baum 		rte_free(cdev);
447ca1418ceSMichael Baum 		return NULL;
448ca1418ceSMichael Baum 	}
4499f1d636fSMichael Baum 	/* Initialize global MR cache resources and update its functions. */
4509f1d636fSMichael Baum 	ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
4519f1d636fSMichael Baum 	if (ret) {
4529f1d636fSMichael Baum 		DRV_LOG(ERR, "Failed to initialize global MR share cache.");
4539f1d636fSMichael Baum 		mlx5_dev_hw_global_release(cdev);
4549f1d636fSMichael Baum 		rte_free(cdev);
4559f1d636fSMichael Baum 		return NULL;
4569f1d636fSMichael Baum 	}
4579f1d636fSMichael Baum 	/* Register callback function for global shared MR cache management. */
4589f1d636fSMichael Baum 	if (TAILQ_EMPTY(&devices_list))
4599f1d636fSMichael Baum 		rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
4609f1d636fSMichael Baum 						mlx5_mr_mem_event_cb, NULL);
46185209924SMichael Baum exit:
46285209924SMichael Baum 	pthread_mutex_lock(&devices_list_lock);
46385209924SMichael Baum 	TAILQ_INSERT_HEAD(&devices_list, cdev, next);
46485209924SMichael Baum 	pthread_mutex_unlock(&devices_list_lock);
46585209924SMichael Baum 	return cdev;
466ad435d32SXueming Li }
467ad435d32SXueming Li 
468ad435d32SXueming Li static int
46985209924SMichael Baum drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
470ad435d32SXueming Li {
471ad435d32SXueming Li 	struct mlx5_class_driver *driver;
472ad435d32SXueming Li 	int local_ret = -ENODEV;
473ad435d32SXueming Li 	unsigned int i = 0;
474ad435d32SXueming Li 	int ret = 0;
475ad435d32SXueming Li 
47685209924SMichael Baum 	enabled_classes &= cdev->classes_loaded;
477ad435d32SXueming Li 	while (enabled_classes) {
478ad435d32SXueming Li 		driver = driver_get(RTE_BIT64(i));
479ad435d32SXueming Li 		if (driver != NULL) {
48085209924SMichael Baum 			local_ret = driver->remove(cdev);
481ad435d32SXueming Li 			if (local_ret == 0)
48285209924SMichael Baum 				cdev->classes_loaded &= ~RTE_BIT64(i);
483ad435d32SXueming Li 			else if (ret == 0)
484ad435d32SXueming Li 				ret = local_ret;
485ad435d32SXueming Li 		}
486ad435d32SXueming Li 		enabled_classes &= ~RTE_BIT64(i);
487ad435d32SXueming Li 		i++;
488ad435d32SXueming Li 	}
489ad435d32SXueming Li 	if (local_ret != 0 && ret == 0)
490ad435d32SXueming Li 		ret = local_ret;
491ad435d32SXueming Li 	return ret;
492ad435d32SXueming Li }
493ad435d32SXueming Li 
494ad435d32SXueming Li static int
49585209924SMichael Baum drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
496ad435d32SXueming Li {
497ad435d32SXueming Li 	struct mlx5_class_driver *driver;
498ad435d32SXueming Li 	uint32_t enabled_classes = 0;
499ad435d32SXueming Li 	bool already_loaded;
500ad435d32SXueming Li 	int ret;
501ad435d32SXueming Li 
502ad435d32SXueming Li 	TAILQ_FOREACH(driver, &drivers_list, next) {
503ad435d32SXueming Li 		if ((driver->drv_class & user_classes) == 0)
504ad435d32SXueming Li 			continue;
50585209924SMichael Baum 		if (!mlx5_bus_match(driver, cdev->dev))
506ad435d32SXueming Li 			continue;
50785209924SMichael Baum 		already_loaded = cdev->classes_loaded & driver->drv_class;
508ad435d32SXueming Li 		if (already_loaded && driver->probe_again == 0) {
509ad435d32SXueming Li 			DRV_LOG(ERR, "Device %s is already probed",
51085209924SMichael Baum 				cdev->dev->name);
511ad435d32SXueming Li 			ret = -EEXIST;
512ad435d32SXueming Li 			goto probe_err;
513ad435d32SXueming Li 		}
51485209924SMichael Baum 		ret = driver->probe(cdev);
515ad435d32SXueming Li 		if (ret < 0) {
516ad435d32SXueming Li 			DRV_LOG(ERR, "Failed to load driver %s",
517ad435d32SXueming Li 				driver->name);
518ad435d32SXueming Li 			goto probe_err;
519ad435d32SXueming Li 		}
520ad435d32SXueming Li 		enabled_classes |= driver->drv_class;
521ad435d32SXueming Li 	}
52285209924SMichael Baum 	cdev->classes_loaded |= enabled_classes;
523ad435d32SXueming Li 	return 0;
524ad435d32SXueming Li probe_err:
525ad435d32SXueming Li 	/* Only unload drivers which are enabled which were enabled
526ad435d32SXueming Li 	 * in this probe instance.
527ad435d32SXueming Li 	 */
52885209924SMichael Baum 	drivers_remove(cdev, enabled_classes);
529ad435d32SXueming Li 	return ret;
530ad435d32SXueming Li }
531ad435d32SXueming Li 
532ad435d32SXueming Li int
533ad435d32SXueming Li mlx5_common_dev_probe(struct rte_device *eal_dev)
534ad435d32SXueming Li {
53585209924SMichael Baum 	struct mlx5_common_device *cdev;
536ad435d32SXueming Li 	uint32_t classes = 0;
537ad435d32SXueming Li 	bool new_device = false;
538ad435d32SXueming Li 	int ret;
539ad435d32SXueming Li 
540ad435d32SXueming Li 	DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
541ad435d32SXueming Li 	ret = parse_class_options(eal_dev->devargs);
542ad435d32SXueming Li 	if (ret < 0) {
543ad435d32SXueming Li 		DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
544ad435d32SXueming Li 			eal_dev->devargs->args);
545ad435d32SXueming Li 		return ret;
546ad435d32SXueming Li 	}
547ad435d32SXueming Li 	classes = ret;
548ad435d32SXueming Li 	if (classes == 0)
549ad435d32SXueming Li 		/* Default to net class. */
550ad435d32SXueming Li 		classes = MLX5_CLASS_ETH;
55185209924SMichael Baum 	cdev = to_mlx5_device(eal_dev);
55285209924SMichael Baum 	if (!cdev) {
553ca1418ceSMichael Baum 		cdev = mlx5_common_dev_create(eal_dev, classes);
55485209924SMichael Baum 		if (!cdev)
555ad435d32SXueming Li 			return -ENOMEM;
556ad435d32SXueming Li 		new_device = true;
557288d7c3fSMichael Baum 	}
558288d7c3fSMichael Baum 	/*
559288d7c3fSMichael Baum 	 * Validate combination here.
560288d7c3fSMichael Baum 	 * For new device, the classes_loaded field is 0 and it check only
561288d7c3fSMichael Baum 	 * the classes given as user device arguments.
562288d7c3fSMichael Baum 	 */
56385209924SMichael Baum 	ret = is_valid_class_combination(classes | cdev->classes_loaded);
564ad435d32SXueming Li 	if (ret != 0) {
565ad435d32SXueming Li 		DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
566288d7c3fSMichael Baum 		goto class_err;
567ad435d32SXueming Li 	}
56885209924SMichael Baum 	ret = drivers_probe(cdev, classes);
569ad435d32SXueming Li 	if (ret)
570ad435d32SXueming Li 		goto class_err;
571ad435d32SXueming Li 	return 0;
572ad435d32SXueming Li class_err:
573ad435d32SXueming Li 	if (new_device)
57485209924SMichael Baum 		mlx5_common_dev_release(cdev);
575ad435d32SXueming Li 	return ret;
576ad435d32SXueming Li }
577ad435d32SXueming Li 
578ad435d32SXueming Li int
579ad435d32SXueming Li mlx5_common_dev_remove(struct rte_device *eal_dev)
580ad435d32SXueming Li {
58185209924SMichael Baum 	struct mlx5_common_device *cdev;
582ad435d32SXueming Li 	int ret;
583ad435d32SXueming Li 
58485209924SMichael Baum 	cdev = to_mlx5_device(eal_dev);
58585209924SMichael Baum 	if (!cdev)
586ad435d32SXueming Li 		return -ENODEV;
587ad435d32SXueming Li 	/* Matching device found, cleanup and unload drivers. */
58885209924SMichael Baum 	ret = drivers_remove(cdev, cdev->classes_loaded);
589dffae63dSMichael Baum 	if (ret == 0)
59085209924SMichael Baum 		mlx5_common_dev_release(cdev);
591ad435d32SXueming Li 	return ret;
592ad435d32SXueming Li }
593ad435d32SXueming Li 
594*a5d06c90SMichael Baum /**
595*a5d06c90SMichael Baum  * Callback to DMA map external memory to a device.
596*a5d06c90SMichael Baum  *
597*a5d06c90SMichael Baum  * @param rte_dev
598*a5d06c90SMichael Baum  *   Pointer to the generic device.
599*a5d06c90SMichael Baum  * @param addr
600*a5d06c90SMichael Baum  *   Starting virtual address of memory to be mapped.
601*a5d06c90SMichael Baum  * @param iova
602*a5d06c90SMichael Baum  *   Starting IOVA address of memory to be mapped.
603*a5d06c90SMichael Baum  * @param len
604*a5d06c90SMichael Baum  *   Length of memory segment being mapped.
605*a5d06c90SMichael Baum  *
606*a5d06c90SMichael Baum  * @return
607*a5d06c90SMichael Baum  *   0 on success, negative value on error.
608*a5d06c90SMichael Baum  */
609ad435d32SXueming Li int
610*a5d06c90SMichael Baum mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
611*a5d06c90SMichael Baum 			uint64_t iova __rte_unused, size_t len)
612ad435d32SXueming Li {
613*a5d06c90SMichael Baum 	struct mlx5_common_device *dev;
614*a5d06c90SMichael Baum 	struct mlx5_mr *mr;
615ad435d32SXueming Li 
616*a5d06c90SMichael Baum 	dev = to_mlx5_device(rte_dev);
617*a5d06c90SMichael Baum 	if (!dev) {
618*a5d06c90SMichael Baum 		DRV_LOG(WARNING,
619*a5d06c90SMichael Baum 			"Unable to find matching mlx5 device to device %s",
620*a5d06c90SMichael Baum 			rte_dev->name);
621*a5d06c90SMichael Baum 		rte_errno = ENODEV;
622*a5d06c90SMichael Baum 		return -1;
623ad435d32SXueming Li 	}
624*a5d06c90SMichael Baum 	mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
625*a5d06c90SMichael Baum 				SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
626*a5d06c90SMichael Baum 	if (!mr) {
627*a5d06c90SMichael Baum 		DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
628*a5d06c90SMichael Baum 		rte_errno = EINVAL;
629*a5d06c90SMichael Baum 		return -1;
630ad435d32SXueming Li 	}
631*a5d06c90SMichael Baum 	rte_rwlock_write_lock(&dev->mr_scache.rwlock);
632*a5d06c90SMichael Baum 	LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
633*a5d06c90SMichael Baum 	/* Insert to the global cache table. */
634*a5d06c90SMichael Baum 	mlx5_mr_insert_cache(&dev->mr_scache, mr);
635*a5d06c90SMichael Baum 	rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
636*a5d06c90SMichael Baum 	return 0;
637ad435d32SXueming Li }
638ad435d32SXueming Li 
639*a5d06c90SMichael Baum /**
640*a5d06c90SMichael Baum  * Callback to DMA unmap external memory to a device.
641*a5d06c90SMichael Baum  *
642*a5d06c90SMichael Baum  * @param rte_dev
643*a5d06c90SMichael Baum  *   Pointer to the generic device.
644*a5d06c90SMichael Baum  * @param addr
645*a5d06c90SMichael Baum  *   Starting virtual address of memory to be unmapped.
646*a5d06c90SMichael Baum  * @param iova
647*a5d06c90SMichael Baum  *   Starting IOVA address of memory to be unmapped.
648*a5d06c90SMichael Baum  * @param len
649*a5d06c90SMichael Baum  *   Length of memory segment being unmapped.
650*a5d06c90SMichael Baum  *
651*a5d06c90SMichael Baum  * @return
652*a5d06c90SMichael Baum  *   0 on success, negative value on error.
653*a5d06c90SMichael Baum  */
654ad435d32SXueming Li int
655*a5d06c90SMichael Baum mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
656*a5d06c90SMichael Baum 			  uint64_t iova __rte_unused, size_t len __rte_unused)
657ad435d32SXueming Li {
658*a5d06c90SMichael Baum 	struct mlx5_common_device *dev;
659*a5d06c90SMichael Baum 	struct mr_cache_entry entry;
660*a5d06c90SMichael Baum 	struct mlx5_mr *mr;
661ad435d32SXueming Li 
662*a5d06c90SMichael Baum 	dev = to_mlx5_device(rte_dev);
663*a5d06c90SMichael Baum 	if (!dev) {
664*a5d06c90SMichael Baum 		DRV_LOG(WARNING,
665*a5d06c90SMichael Baum 			"Unable to find matching mlx5 device to device %s.",
666*a5d06c90SMichael Baum 			rte_dev->name);
667*a5d06c90SMichael Baum 		rte_errno = ENODEV;
668*a5d06c90SMichael Baum 		return -1;
669ad435d32SXueming Li 	}
670*a5d06c90SMichael Baum 	rte_rwlock_read_lock(&dev->mr_scache.rwlock);
671*a5d06c90SMichael Baum 	mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
672*a5d06c90SMichael Baum 	if (!mr) {
673*a5d06c90SMichael Baum 		rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
674*a5d06c90SMichael Baum 		DRV_LOG(WARNING,
675*a5d06c90SMichael Baum 			"Address 0x%" PRIxPTR " wasn't registered to device %s",
676*a5d06c90SMichael Baum 			(uintptr_t)addr, rte_dev->name);
677*a5d06c90SMichael Baum 		rte_errno = EINVAL;
678*a5d06c90SMichael Baum 		return -1;
679*a5d06c90SMichael Baum 	}
680*a5d06c90SMichael Baum 	LIST_REMOVE(mr, mr);
681*a5d06c90SMichael Baum 	DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
682*a5d06c90SMichael Baum 	mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
683*a5d06c90SMichael Baum 	mlx5_mr_rebuild_cache(&dev->mr_scache);
684*a5d06c90SMichael Baum 	/*
685*a5d06c90SMichael Baum 	 * No explicit wmb is needed after updating dev_gen due to
686*a5d06c90SMichael Baum 	 * store-release ordering in unlock that provides the
687*a5d06c90SMichael Baum 	 * implicit barrier at the software visible level.
688*a5d06c90SMichael Baum 	 */
689*a5d06c90SMichael Baum 	++dev->mr_scache.dev_gen;
690*a5d06c90SMichael Baum 	DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
691*a5d06c90SMichael Baum 		dev->mr_scache.dev_gen);
692*a5d06c90SMichael Baum 	rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
693*a5d06c90SMichael Baum 	return 0;
694ad435d32SXueming Li }
695ad435d32SXueming Li 
696ad435d32SXueming Li void
697ad435d32SXueming Li mlx5_class_driver_register(struct mlx5_class_driver *driver)
698ad435d32SXueming Li {
699ad435d32SXueming Li 	mlx5_common_driver_on_register_pci(driver);
700ad435d32SXueming Li 	TAILQ_INSERT_TAIL(&drivers_list, driver, next);
701ad435d32SXueming Li }
702ad435d32SXueming Li 
703ad435d32SXueming Li static void mlx5_common_driver_init(void)
704ad435d32SXueming Li {
705ad435d32SXueming Li 	mlx5_common_pci_init();
706777b72a9SXueming Li #ifdef RTE_EXEC_ENV_LINUX
707777b72a9SXueming Li 	mlx5_common_auxiliary_init();
708777b72a9SXueming Li #endif
709ad435d32SXueming Li }
710ad435d32SXueming Li 
71182088001SParav Pandit static bool mlx5_common_initialized;
71282088001SParav Pandit 
71383c99c36SThomas Monjalon /**
71482088001SParav Pandit  * One time innitialization routine for run-time dependency on glue library
71582088001SParav Pandit  * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
71682088001SParav Pandit  * must invoke in its constructor.
71783c99c36SThomas Monjalon  */
71882088001SParav Pandit void
71982088001SParav Pandit mlx5_common_init(void)
72083c99c36SThomas Monjalon {
72182088001SParav Pandit 	if (mlx5_common_initialized)
72282088001SParav Pandit 		return;
72382088001SParav Pandit 
724dc26c9c2SMichael Baum 	pthread_mutex_init(&devices_list_lock, NULL);
72579aa4307SOphir Munk 	mlx5_glue_constructor();
726ad435d32SXueming Li 	mlx5_common_driver_init();
72782088001SParav Pandit 	mlx5_common_initialized = true;
7287b4f1e6bSMatan Azrad }
7294c204fe5SShiri Kuzin 
7304c204fe5SShiri Kuzin /**
7314c204fe5SShiri Kuzin  * This function is responsible of initializing the variable
7324c204fe5SShiri Kuzin  *  haswell_broadwell_cpu by checking if the cpu is intel
7334c204fe5SShiri Kuzin  *  and reading the data returned from mlx5_cpu_id().
7344c204fe5SShiri Kuzin  *  since haswell and broadwell cpus don't have improved performance
7354c204fe5SShiri Kuzin  *  when using relaxed ordering we want to check the cpu type before
7364c204fe5SShiri Kuzin  *  before deciding whether to enable RO or not.
7374c204fe5SShiri Kuzin  *  if the cpu is haswell or broadwell the variable will be set to 1
7384c204fe5SShiri Kuzin  *  otherwise it will be 0.
7394c204fe5SShiri Kuzin  */
7404c204fe5SShiri Kuzin RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
7414c204fe5SShiri Kuzin {
7424c204fe5SShiri Kuzin #ifdef RTE_ARCH_X86_64
7434c204fe5SShiri Kuzin 	unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
7444c204fe5SShiri Kuzin 	unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
7454c204fe5SShiri Kuzin 	unsigned int i, model, family, brand_id, vendor;
7464c204fe5SShiri Kuzin 	unsigned int signature_intel_ebx = 0x756e6547;
7474c204fe5SShiri Kuzin 	unsigned int extended_model;
7484c204fe5SShiri Kuzin 	unsigned int eax = 0;
7494c204fe5SShiri Kuzin 	unsigned int ebx = 0;
7504c204fe5SShiri Kuzin 	unsigned int ecx = 0;
7514c204fe5SShiri Kuzin 	unsigned int edx = 0;
7524c204fe5SShiri Kuzin 	int max_level;
7534c204fe5SShiri Kuzin 
7544c204fe5SShiri Kuzin 	mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
7554c204fe5SShiri Kuzin 	vendor = ebx;
7564c204fe5SShiri Kuzin 	max_level = eax;
7574c204fe5SShiri Kuzin 	if (max_level < 1) {
7584c204fe5SShiri Kuzin 		haswell_broadwell_cpu = 0;
7594c204fe5SShiri Kuzin 		return;
7604c204fe5SShiri Kuzin 	}
7614c204fe5SShiri Kuzin 	mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
7624c204fe5SShiri Kuzin 	model = (eax >> 4) & 0x0f;
7634c204fe5SShiri Kuzin 	family = (eax >> 8) & 0x0f;
7644c204fe5SShiri Kuzin 	brand_id = ebx & 0xff;
7654c204fe5SShiri Kuzin 	extended_model = (eax >> 12) & 0xf0;
7664c204fe5SShiri Kuzin 	/* Check if the processor is Haswell or Broadwell */
7674c204fe5SShiri Kuzin 	if (vendor == signature_intel_ebx) {
7684c204fe5SShiri Kuzin 		if (family == 0x06)
7694c204fe5SShiri Kuzin 			model += extended_model;
7704c204fe5SShiri Kuzin 		if (brand_id == 0 && family == 0x6) {
7714c204fe5SShiri Kuzin 			for (i = 0; i < RTE_DIM(broadwell_models); i++)
7724c204fe5SShiri Kuzin 				if (model == broadwell_models[i]) {
7734c204fe5SShiri Kuzin 					haswell_broadwell_cpu = 1;
7744c204fe5SShiri Kuzin 					return;
7754c204fe5SShiri Kuzin 				}
7764c204fe5SShiri Kuzin 			for (i = 0; i < RTE_DIM(haswell_models); i++)
7774c204fe5SShiri Kuzin 				if (model == haswell_models[i]) {
7784c204fe5SShiri Kuzin 					haswell_broadwell_cpu = 1;
7794c204fe5SShiri Kuzin 					return;
7804c204fe5SShiri Kuzin 				}
7814c204fe5SShiri Kuzin 		}
7824c204fe5SShiri Kuzin 	}
7834c204fe5SShiri Kuzin #endif
7844c204fe5SShiri Kuzin 	haswell_broadwell_cpu = 0;
7854c204fe5SShiri Kuzin }
786262c7ad0SOri Kam 
787262c7ad0SOri Kam /**
7889cc0e99cSViacheslav Ovsiienko  * Allocate the User Access Region with DevX on specified device.
7899cc0e99cSViacheslav Ovsiienko  *
7909cc0e99cSViacheslav Ovsiienko  * @param [in] ctx
7919cc0e99cSViacheslav Ovsiienko  *   Infiniband device context to perform allocation on.
7929cc0e99cSViacheslav Ovsiienko  * @param [in] mapping
7939cc0e99cSViacheslav Ovsiienko  *   MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining
7949cc0e99cSViacheslav Ovsiienko  *				attributes (if supported by the host), the
7959cc0e99cSViacheslav Ovsiienko  *				writes to the UAR registers must be followed
7969cc0e99cSViacheslav Ovsiienko  *				by write memory barrier.
7979cc0e99cSViacheslav Ovsiienko  *   MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are
7989cc0e99cSViacheslav Ovsiienko  *				promoted to the registers immediately, no
7999cc0e99cSViacheslav Ovsiienko  *				memory barriers needed.
8009cc0e99cSViacheslav Ovsiienko  *   mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF,
8019cc0e99cSViacheslav Ovsiienko  *		   if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC
8029cc0e99cSViacheslav Ovsiienko  *		   is performed. The drivers specifying negative values should
8039cc0e99cSViacheslav Ovsiienko  *		   always provide the write memory barrier operation after UAR
8049cc0e99cSViacheslav Ovsiienko  *		   register writings.
8059cc0e99cSViacheslav Ovsiienko  * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma
8069cc0e99cSViacheslav Ovsiienko  * library headers), the caller can specify 0.
8079cc0e99cSViacheslav Ovsiienko  *
8089cc0e99cSViacheslav Ovsiienko  * @return
8099cc0e99cSViacheslav Ovsiienko  *   UAR object pointer on success, NULL otherwise and rte_errno is set.
8109cc0e99cSViacheslav Ovsiienko  */
8119cc0e99cSViacheslav Ovsiienko void *
8129cc0e99cSViacheslav Ovsiienko mlx5_devx_alloc_uar(void *ctx, int mapping)
8139cc0e99cSViacheslav Ovsiienko {
8149cc0e99cSViacheslav Ovsiienko 	void *uar;
8159cc0e99cSViacheslav Ovsiienko 	uint32_t retry, uar_mapping;
8169cc0e99cSViacheslav Ovsiienko 	void *base_addr;
8179cc0e99cSViacheslav Ovsiienko 
8189cc0e99cSViacheslav Ovsiienko 	for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
8199cc0e99cSViacheslav Ovsiienko #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
8209cc0e99cSViacheslav Ovsiienko 		/* Control the mapping type according to the settings. */
8219cc0e99cSViacheslav Ovsiienko 		uar_mapping = (mapping < 0) ?
8229cc0e99cSViacheslav Ovsiienko 			      MLX5DV_UAR_ALLOC_TYPE_NC : mapping;
8239cc0e99cSViacheslav Ovsiienko #else
8249cc0e99cSViacheslav Ovsiienko 		/*
8259cc0e99cSViacheslav Ovsiienko 		 * It seems we have no way to control the memory mapping type
8269cc0e99cSViacheslav Ovsiienko 		 * for the UAR, the default "Write-Combining" type is supposed.
8279cc0e99cSViacheslav Ovsiienko 		 */
8289cc0e99cSViacheslav Ovsiienko 		uar_mapping = 0;
8299cc0e99cSViacheslav Ovsiienko 		RTE_SET_USED(mapping);
8309cc0e99cSViacheslav Ovsiienko #endif
8319cc0e99cSViacheslav Ovsiienko 		uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
8329cc0e99cSViacheslav Ovsiienko #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
8339cc0e99cSViacheslav Ovsiienko 		if (!uar &&
8349cc0e99cSViacheslav Ovsiienko 		    mapping < 0 &&
8359cc0e99cSViacheslav Ovsiienko 		    uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
8369cc0e99cSViacheslav Ovsiienko 			/*
8379cc0e99cSViacheslav Ovsiienko 			 * In some environments like virtual machine the
8389cc0e99cSViacheslav Ovsiienko 			 * Write Combining mapped might be not supported and
8399cc0e99cSViacheslav Ovsiienko 			 * UAR allocation fails. We tried "Non-Cached" mapping
8409cc0e99cSViacheslav Ovsiienko 			 * for the case.
8419cc0e99cSViacheslav Ovsiienko 			 */
8429cc0e99cSViacheslav Ovsiienko 			DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)");
8439cc0e99cSViacheslav Ovsiienko 			uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
8449cc0e99cSViacheslav Ovsiienko 			uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
8459cc0e99cSViacheslav Ovsiienko 		} else if (!uar &&
8469cc0e99cSViacheslav Ovsiienko 			   mapping < 0 &&
8479cc0e99cSViacheslav Ovsiienko 			   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
8489cc0e99cSViacheslav Ovsiienko 			/*
8499cc0e99cSViacheslav Ovsiienko 			 * If Verbs/kernel does not support "Non-Cached"
8509cc0e99cSViacheslav Ovsiienko 			 * try the "Write-Combining".
8519cc0e99cSViacheslav Ovsiienko 			 */
8529cc0e99cSViacheslav Ovsiienko 			DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)");
8539cc0e99cSViacheslav Ovsiienko 			uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
8549cc0e99cSViacheslav Ovsiienko 			uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
8559cc0e99cSViacheslav Ovsiienko 		}
8569cc0e99cSViacheslav Ovsiienko #endif
8579cc0e99cSViacheslav Ovsiienko 		if (!uar) {
8589cc0e99cSViacheslav Ovsiienko 			DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
8599cc0e99cSViacheslav Ovsiienko 			rte_errno = ENOMEM;
8609cc0e99cSViacheslav Ovsiienko 			goto exit;
8619cc0e99cSViacheslav Ovsiienko 		}
8629cc0e99cSViacheslav Ovsiienko 		base_addr = mlx5_os_get_devx_uar_base_addr(uar);
8639cc0e99cSViacheslav Ovsiienko 		if (base_addr)
8649cc0e99cSViacheslav Ovsiienko 			break;
8659cc0e99cSViacheslav Ovsiienko 		/*
8669cc0e99cSViacheslav Ovsiienko 		 * The UARs are allocated by rdma_core within the
8679cc0e99cSViacheslav Ovsiienko 		 * IB device context, on context closure all UARs
8689cc0e99cSViacheslav Ovsiienko 		 * will be freed, should be no memory/object leakage.
8699cc0e99cSViacheslav Ovsiienko 		 */
8709cc0e99cSViacheslav Ovsiienko 		DRV_LOG(WARNING, "Retrying to allocate DevX UAR");
8719cc0e99cSViacheslav Ovsiienko 		uar = NULL;
8729cc0e99cSViacheslav Ovsiienko 	}
8739cc0e99cSViacheslav Ovsiienko 	/* Check whether we finally succeeded with valid UAR allocation. */
8749cc0e99cSViacheslav Ovsiienko 	if (!uar) {
8759cc0e99cSViacheslav Ovsiienko 		DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
8769cc0e99cSViacheslav Ovsiienko 		rte_errno = ENOMEM;
8779cc0e99cSViacheslav Ovsiienko 	}
8789cc0e99cSViacheslav Ovsiienko 	/*
8799cc0e99cSViacheslav Ovsiienko 	 * Return void * instead of struct mlx5dv_devx_uar *
8809cc0e99cSViacheslav Ovsiienko 	 * is for compatibility with older rdma-core library headers.
8819cc0e99cSViacheslav Ovsiienko 	 */
8829cc0e99cSViacheslav Ovsiienko exit:
8839cc0e99cSViacheslav Ovsiienko 	return uar;
8849cc0e99cSViacheslav Ovsiienko }
885ad435d32SXueming Li 
886ad435d32SXueming Li RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);
887