xref: /dpdk/drivers/common/mlx5/mlx5_common.c (revision b458bd4cf7dc1c2f881102fbfcd503ae6e594bd8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #include <unistd.h>
6 #include <string.h>
7 #include <stdio.h>
8 
9 #include <rte_errno.h>
10 #include <rte_mempool.h>
11 #include <rte_malloc.h>
12 
13 #include "mlx5_common.h"
14 #include "mlx5_common_os.h"
15 #include "mlx5_common_utils.h"
16 
17 int mlx5_common_logtype;
18 
19 uint8_t haswell_broadwell_cpu;
20 
21 static int
22 mlx5_class_check_handler(__rte_unused const char *key, const char *value,
23 			 void *opaque)
24 {
25 	enum mlx5_class *ret = opaque;
26 
27 	if (strcmp(value, "vdpa") == 0) {
28 		*ret = MLX5_CLASS_VDPA;
29 	} else if (strcmp(value, "net") == 0) {
30 		*ret = MLX5_CLASS_NET;
31 	} else {
32 		DRV_LOG(ERR, "Invalid mlx5 class %s. Maybe typo in device"
33 			" class argument setting?", value);
34 		*ret = MLX5_CLASS_INVALID;
35 	}
36 	return 0;
37 }
38 
39 enum mlx5_class
40 mlx5_class_get(struct rte_devargs *devargs)
41 {
42 	struct rte_kvargs *kvlist;
43 	const char *key = MLX5_CLASS_ARG_NAME;
44 	enum mlx5_class ret = MLX5_CLASS_NET;
45 
46 	if (devargs == NULL)
47 		return ret;
48 	kvlist = rte_kvargs_parse(devargs->args, NULL);
49 	if (kvlist == NULL)
50 		return ret;
51 	if (rte_kvargs_count(kvlist, key))
52 		rte_kvargs_process(kvlist, key, mlx5_class_check_handler, &ret);
53 	rte_kvargs_free(kvlist);
54 	return ret;
55 }
56 
57 
58 /* In case this is an x86_64 intel processor to check if
59  * we should use relaxed ordering.
60  */
61 #ifdef RTE_ARCH_X86_64
62 /**
63  * This function returns processor identification and feature information
64  * into the registers.
65  *
66  * @param eax, ebx, ecx, edx
67  *		Pointers to the registers that will hold cpu information.
68  * @param level
69  *		The main category of information returned.
70  */
71 static inline void mlx5_cpu_id(unsigned int level,
72 				unsigned int *eax, unsigned int *ebx,
73 				unsigned int *ecx, unsigned int *edx)
74 {
75 	__asm__("cpuid\n\t"
76 		: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
77 		: "0" (level));
78 }
79 #endif
80 
81 RTE_INIT_PRIO(mlx5_log_init, LOG)
82 {
83 	mlx5_common_logtype = rte_log_register("pmd.common.mlx5");
84 	if (mlx5_common_logtype >= 0)
85 		rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);
86 }
87 
88 /**
89  * Initialization routine for run-time dependency on glue library.
90  */
91 RTE_INIT_PRIO(mlx5_glue_init, CLASS)
92 {
93 	mlx5_glue_constructor();
94 }
95 
96 /**
97  * This function is responsible of initializing the variable
98  *  haswell_broadwell_cpu by checking if the cpu is intel
99  *  and reading the data returned from mlx5_cpu_id().
100  *  since haswell and broadwell cpus don't have improved performance
101  *  when using relaxed ordering we want to check the cpu type before
102  *  before deciding whether to enable RO or not.
103  *  if the cpu is haswell or broadwell the variable will be set to 1
104  *  otherwise it will be 0.
105  */
106 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
107 {
108 #ifdef RTE_ARCH_X86_64
109 	unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
110 	unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
111 	unsigned int i, model, family, brand_id, vendor;
112 	unsigned int signature_intel_ebx = 0x756e6547;
113 	unsigned int extended_model;
114 	unsigned int eax = 0;
115 	unsigned int ebx = 0;
116 	unsigned int ecx = 0;
117 	unsigned int edx = 0;
118 	int max_level;
119 
120 	mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
121 	vendor = ebx;
122 	max_level = eax;
123 	if (max_level < 1) {
124 		haswell_broadwell_cpu = 0;
125 		return;
126 	}
127 	mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
128 	model = (eax >> 4) & 0x0f;
129 	family = (eax >> 8) & 0x0f;
130 	brand_id = ebx & 0xff;
131 	extended_model = (eax >> 12) & 0xf0;
132 	/* Check if the processor is Haswell or Broadwell */
133 	if (vendor == signature_intel_ebx) {
134 		if (family == 0x06)
135 			model += extended_model;
136 		if (brand_id == 0 && family == 0x6) {
137 			for (i = 0; i < RTE_DIM(broadwell_models); i++)
138 				if (model == broadwell_models[i]) {
139 					haswell_broadwell_cpu = 1;
140 					return;
141 				}
142 			for (i = 0; i < RTE_DIM(haswell_models); i++)
143 				if (model == haswell_models[i]) {
144 					haswell_broadwell_cpu = 1;
145 					return;
146 				}
147 		}
148 	}
149 #endif
150 	haswell_broadwell_cpu = 0;
151 }
152 
153 /**
154  * Allocate page of door-bells and register it using DevX API.
155  *
156  * @param [in] ctx
157  *   Pointer to the device context.
158  *
159  * @return
160  *   Pointer to new page on success, NULL otherwise.
161  */
162 static struct mlx5_devx_dbr_page *
163 mlx5_alloc_dbr_page(void *ctx)
164 {
165 	struct mlx5_devx_dbr_page *page;
166 
167 	/* Allocate space for door-bell page and management data. */
168 	page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
169 				 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
170 	if (!page) {
171 		DRV_LOG(ERR, "cannot allocate dbr page");
172 		return NULL;
173 	}
174 	/* Register allocated memory. */
175 	page->umem = mlx5_glue->devx_umem_reg(ctx, page->dbrs,
176 					      MLX5_DBR_PAGE_SIZE, 0);
177 	if (!page->umem) {
178 		DRV_LOG(ERR, "cannot umem reg dbr page");
179 		rte_free(page);
180 		return NULL;
181 	}
182 	return page;
183 }
184 
185 /**
186  * Find the next available door-bell, allocate new page if needed.
187  *
188  * @param [in] ctx
189  *   Pointer to device context.
190  * @param [in] head
191  *   Pointer to the head of dbr pages list.
192  * @param [out] dbr_page
193  *   Door-bell page containing the page data.
194  *
195  * @return
196  *   Door-bell address offset on success, a negative error value otherwise.
197  */
198 int64_t
199 mlx5_get_dbr(void *ctx,  struct mlx5_dbr_page_list *head,
200 	     struct mlx5_devx_dbr_page **dbr_page)
201 {
202 	struct mlx5_devx_dbr_page *page = NULL;
203 	uint32_t i, j;
204 
205 	LIST_FOREACH(page, head, next)
206 		if (page->dbr_count < MLX5_DBR_PER_PAGE)
207 			break;
208 	if (!page) { /* No page with free door-bell exists. */
209 		page = mlx5_alloc_dbr_page(ctx);
210 		if (!page) /* Failed to allocate new page. */
211 			return (-1);
212 		LIST_INSERT_HEAD(head, page, next);
213 	}
214 	/* Loop to find bitmap part with clear bit. */
215 	for (i = 0;
216 	     i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
217 	     i++)
218 		; /* Empty. */
219 	/* Find the first clear bit. */
220 	MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE);
221 	j = rte_bsf64(~page->dbr_bitmap[i]);
222 	page->dbr_bitmap[i] |= (UINT64_C(1) << j);
223 	page->dbr_count++;
224 	*dbr_page = page;
225 	return (((i * 64) + j) * sizeof(uint64_t));
226 }
227 
228 /**
229  * Release a door-bell record.
230  *
231  * @param [in] head
232  *   Pointer to the head of dbr pages list.
233  * @param [in] umem_id
234  *   UMEM ID of page containing the door-bell record to release.
235  * @param [in] offset
236  *   Offset of door-bell record in page.
237  *
238  * @return
239  *   0 on success, a negative error value otherwise.
240  */
241 int32_t
242 mlx5_release_dbr(struct mlx5_dbr_page_list *head, uint32_t umem_id,
243 		 uint64_t offset)
244 {
245 	struct mlx5_devx_dbr_page *page = NULL;
246 	int ret = 0;
247 
248 	LIST_FOREACH(page, head, next)
249 		/* Find the page this address belongs to. */
250 		if (mlx5_os_get_umem_id(page->umem) == umem_id)
251 			break;
252 	if (!page)
253 		return -EINVAL;
254 	page->dbr_count--;
255 	if (!page->dbr_count) {
256 		/* Page not used, free it and remove from list. */
257 		LIST_REMOVE(page, next);
258 		if (page->umem)
259 			ret = -mlx5_glue->devx_umem_dereg(page->umem);
260 		rte_free(page);
261 	} else {
262 		/* Mark in bitmap that this door-bell is not in use. */
263 		offset /= MLX5_DBR_SIZE;
264 		int i = offset / 64;
265 		int j = offset % 64;
266 
267 		page->dbr_bitmap[i] &= ~(UINT64_C(1) << j);
268 	}
269 	return ret;
270 }
271