xref: /dpdk/drivers/common/mlx5/mlx5_common.c (revision cf9b3c36e5a297200c169dbbf9d6e655d8096948)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #include <unistd.h>
6 #include <string.h>
7 #include <stdio.h>
8 
9 #include <rte_errno.h>
10 #include <rte_mempool.h>
11 #include <rte_malloc.h>
12 
13 #include "mlx5_common.h"
14 #include "mlx5_common_os.h"
15 #include "mlx5_common_utils.h"
16 
17 int mlx5_common_logtype;
18 
19 #ifdef MLX5_GLUE
20 const struct mlx5_glue *mlx5_glue;
21 #endif
22 
23 uint8_t haswell_broadwell_cpu;
24 
25 static int
26 mlx5_class_check_handler(__rte_unused const char *key, const char *value,
27 			 void *opaque)
28 {
29 	enum mlx5_class *ret = opaque;
30 
31 	if (strcmp(value, "vdpa") == 0) {
32 		*ret = MLX5_CLASS_VDPA;
33 	} else if (strcmp(value, "net") == 0) {
34 		*ret = MLX5_CLASS_NET;
35 	} else {
36 		DRV_LOG(ERR, "Invalid mlx5 class %s. Maybe typo in device"
37 			" class argument setting?", value);
38 		*ret = MLX5_CLASS_INVALID;
39 	}
40 	return 0;
41 }
42 
43 enum mlx5_class
44 mlx5_class_get(struct rte_devargs *devargs)
45 {
46 	struct rte_kvargs *kvlist;
47 	const char *key = MLX5_CLASS_ARG_NAME;
48 	enum mlx5_class ret = MLX5_CLASS_NET;
49 
50 	if (devargs == NULL)
51 		return ret;
52 	kvlist = rte_kvargs_parse(devargs->args, NULL);
53 	if (kvlist == NULL)
54 		return ret;
55 	if (rte_kvargs_count(kvlist, key))
56 		rte_kvargs_process(kvlist, key, mlx5_class_check_handler, &ret);
57 	rte_kvargs_free(kvlist);
58 	return ret;
59 }
60 
61 
62 /* In case this is an x86_64 intel processor to check if
63  * we should use relaxed ordering.
64  */
65 #ifdef RTE_ARCH_X86_64
66 /**
67  * This function returns processor identification and feature information
68  * into the registers.
69  *
70  * @param eax, ebx, ecx, edx
71  *		Pointers to the registers that will hold cpu information.
72  * @param level
73  *		The main category of information returned.
74  */
75 static inline void mlx5_cpu_id(unsigned int level,
76 				unsigned int *eax, unsigned int *ebx,
77 				unsigned int *ecx, unsigned int *edx)
78 {
79 	__asm__("cpuid\n\t"
80 		: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
81 		: "0" (level));
82 }
83 #endif
84 
85 RTE_INIT_PRIO(mlx5_log_init, LOG)
86 {
87 	mlx5_common_logtype = rte_log_register("pmd.common.mlx5");
88 	if (mlx5_common_logtype >= 0)
89 		rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);
90 }
91 
92 /**
93  * Initialization routine for run-time dependency on glue library.
94  */
95 RTE_INIT_PRIO(mlx5_glue_init, CLASS)
96 {
97 	mlx5_glue_constructor();
98 }
99 
100 /**
101  * This function is responsible of initializing the variable
102  *  haswell_broadwell_cpu by checking if the cpu is intel
103  *  and reading the data returned from mlx5_cpu_id().
104  *  since haswell and broadwell cpus don't have improved performance
105  *  when using relaxed ordering we want to check the cpu type before
106  *  before deciding whether to enable RO or not.
107  *  if the cpu is haswell or broadwell the variable will be set to 1
108  *  otherwise it will be 0.
109  */
110 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
111 {
112 #ifdef RTE_ARCH_X86_64
113 	unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
114 	unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
115 	unsigned int i, model, family, brand_id, vendor;
116 	unsigned int signature_intel_ebx = 0x756e6547;
117 	unsigned int extended_model;
118 	unsigned int eax = 0;
119 	unsigned int ebx = 0;
120 	unsigned int ecx = 0;
121 	unsigned int edx = 0;
122 	int max_level;
123 
124 	mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
125 	vendor = ebx;
126 	max_level = eax;
127 	if (max_level < 1) {
128 		haswell_broadwell_cpu = 0;
129 		return;
130 	}
131 	mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
132 	model = (eax >> 4) & 0x0f;
133 	family = (eax >> 8) & 0x0f;
134 	brand_id = ebx & 0xff;
135 	extended_model = (eax >> 12) & 0xf0;
136 	/* Check if the processor is Haswell or Broadwell */
137 	if (vendor == signature_intel_ebx) {
138 		if (family == 0x06)
139 			model += extended_model;
140 		if (brand_id == 0 && family == 0x6) {
141 			for (i = 0; i < RTE_DIM(broadwell_models); i++)
142 				if (model == broadwell_models[i]) {
143 					haswell_broadwell_cpu = 1;
144 					return;
145 				}
146 			for (i = 0; i < RTE_DIM(haswell_models); i++)
147 				if (model == haswell_models[i]) {
148 					haswell_broadwell_cpu = 1;
149 					return;
150 				}
151 		}
152 	}
153 #endif
154 	haswell_broadwell_cpu = 0;
155 }
156 
157 /**
158  * Allocate page of door-bells and register it using DevX API.
159  *
160  * @param [in] ctx
161  *   Pointer to the device context.
162  *
163  * @return
164  *   Pointer to new page on success, NULL otherwise.
165  */
166 static struct mlx5_devx_dbr_page *
167 mlx5_alloc_dbr_page(void *ctx)
168 {
169 	struct mlx5_devx_dbr_page *page;
170 
171 	/* Allocate space for door-bell page and management data. */
172 	page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
173 				 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
174 	if (!page) {
175 		DRV_LOG(ERR, "cannot allocate dbr page");
176 		return NULL;
177 	}
178 	/* Register allocated memory. */
179 	page->umem = mlx5_glue->devx_umem_reg(ctx, page->dbrs,
180 					      MLX5_DBR_PAGE_SIZE, 0);
181 	if (!page->umem) {
182 		DRV_LOG(ERR, "cannot umem reg dbr page");
183 		rte_free(page);
184 		return NULL;
185 	}
186 	return page;
187 }
188 
189 /**
190  * Find the next available door-bell, allocate new page if needed.
191  *
192  * @param [in] ctx
193  *   Pointer to device context.
194  * @param [in] head
195  *   Pointer to the head of dbr pages list.
196  * @param [out] dbr_page
197  *   Door-bell page containing the page data.
198  *
199  * @return
200  *   Door-bell address offset on success, a negative error value otherwise.
201  */
202 int64_t
203 mlx5_get_dbr(void *ctx,  struct mlx5_dbr_page_list *head,
204 	     struct mlx5_devx_dbr_page **dbr_page)
205 {
206 	struct mlx5_devx_dbr_page *page = NULL;
207 	uint32_t i, j;
208 
209 	LIST_FOREACH(page, head, next)
210 		if (page->dbr_count < MLX5_DBR_PER_PAGE)
211 			break;
212 	if (!page) { /* No page with free door-bell exists. */
213 		page = mlx5_alloc_dbr_page(ctx);
214 		if (!page) /* Failed to allocate new page. */
215 			return (-1);
216 		LIST_INSERT_HEAD(head, page, next);
217 	}
218 	/* Loop to find bitmap part with clear bit. */
219 	for (i = 0;
220 	     i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
221 	     i++)
222 		; /* Empty. */
223 	/* Find the first clear bit. */
224 	MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE);
225 	j = rte_bsf64(~page->dbr_bitmap[i]);
226 	page->dbr_bitmap[i] |= (UINT64_C(1) << j);
227 	page->dbr_count++;
228 	*dbr_page = page;
229 	return (((i * 64) + j) * sizeof(uint64_t));
230 }
231 
232 /**
233  * Release a door-bell record.
234  *
235  * @param [in] head
236  *   Pointer to the head of dbr pages list.
237  * @param [in] umem_id
238  *   UMEM ID of page containing the door-bell record to release.
239  * @param [in] offset
240  *   Offset of door-bell record in page.
241  *
242  * @return
243  *   0 on success, a negative error value otherwise.
244  */
245 int32_t
246 mlx5_release_dbr(struct mlx5_dbr_page_list *head, uint32_t umem_id,
247 		 uint64_t offset)
248 {
249 	struct mlx5_devx_dbr_page *page = NULL;
250 	int ret = 0;
251 
252 	LIST_FOREACH(page, head, next)
253 		/* Find the page this address belongs to. */
254 		if (mlx5_os_get_umem_id(page->umem) == umem_id)
255 			break;
256 	if (!page)
257 		return -EINVAL;
258 	page->dbr_count--;
259 	if (!page->dbr_count) {
260 		/* Page not used, free it and remove from list. */
261 		LIST_REMOVE(page, next);
262 		if (page->umem)
263 			ret = -mlx5_glue->devx_umem_dereg(page->umem);
264 		rte_free(page);
265 	} else {
266 		/* Mark in bitmap that this door-bell is not in use. */
267 		offset /= MLX5_DBR_SIZE;
268 		int i = offset / 64;
269 		int j = offset % 64;
270 
271 		page->dbr_bitmap[i] &= ~(UINT64_C(1) << j);
272 	}
273 	return ret;
274 }
275