xref: /spdk/lib/idxd/idxd_kernel.c (revision da2fd6651a9cd4732b0910d30291821e77f4d643)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include <accel-config/libaccel_config.h>
38 
39 #include "spdk/env.h"
40 #include "spdk/util.h"
41 #include "spdk/memory.h"
42 #include "spdk/likely.h"
43 
44 #include "spdk/log.h"
45 #include "spdk_internal/idxd.h"
46 
47 #include "idxd.h"
48 
49 #define MAX_DSA_DEVICE_ID  16
50 
51 struct device_config g_kernel_dev_cfg = {};
52 
53 struct spdk_wq_context {
54 	struct accfg_wq *wq;
55 	unsigned int    max_batch_size;
56 	unsigned int    max_xfer_size;
57 	unsigned int    max_xfer_bits;
58 
59 	int fd;
60 	int wq_idx;
61 	void *wq_reg;
62 	int wq_size;
63 	int dedicated;
64 	int bof;
65 
66 	unsigned int wq_max_batch_size;
67 	unsigned long wq_max_xfer_size;
68 };
69 
70 struct spdk_kernel_idxd_device {
71 	struct spdk_idxd_device idxd;
72 	struct accfg_ctx        *ctx;
73 	struct spdk_wq_context  *wq_ctx;
74 	uint32_t                wq_active_num;
75 };
76 
77 #define __kernel_idxd(idxd) SPDK_CONTAINEROF(idxd, struct spdk_kernel_idxd_device, idxd)
78 
79 /* Bit scan reverse */
80 static uint32_t bsr(uint32_t val)
81 {
82 	uint32_t msb;
83 
84 	msb = (val == 0) ? 0 : 32 - __builtin_clz(val);
85 	return msb - 1;
86 }
87 
88 static void init_idxd_impl(struct spdk_idxd_device *idxd);
89 
90 static int
91 dsa_setup_single_wq(struct spdk_kernel_idxd_device *kernel_idxd, struct accfg_wq *wq, int shared)
92 {
93 	struct accfg_device *dev;
94 	int major, minor;
95 	char path[1024];
96 	struct spdk_wq_context *wq_ctx = &kernel_idxd->wq_ctx[kernel_idxd->wq_active_num];
97 
98 	dev = accfg_wq_get_device(wq);
99 	major = accfg_device_get_cdev_major(dev);
100 	if (major < 0) {
101 		return -ENODEV;
102 	}
103 	minor = accfg_wq_get_cdev_minor(wq);
104 	if (minor < 0) {
105 		return -ENODEV;
106 	}
107 
108 	snprintf(path, sizeof(path), "/dev/char/%u:%u", major, minor);
109 	wq_ctx->fd = open(path, O_RDWR);
110 	if (wq_ctx->fd < 0) {
111 		SPDK_ERRLOG("Can not open the Working queue file descriptor on path=%s\n",
112 			    path);
113 		return -errno;
114 	}
115 
116 	wq_ctx->wq_reg = mmap(NULL, 0x1000, PROT_WRITE,
117 			      MAP_SHARED | MAP_POPULATE, wq_ctx->fd, 0);
118 	if (wq_ctx->wq_reg == MAP_FAILED) {
119 		perror("mmap");
120 		return -errno;
121 	}
122 
123 	wq_ctx->dedicated = !shared;
124 	wq_ctx->wq_size = accfg_wq_get_size(wq);
125 	wq_ctx->wq_idx = accfg_wq_get_id(wq);
126 	wq_ctx->bof = accfg_wq_get_block_on_fault(wq);
127 	wq_ctx->wq_max_batch_size = accfg_wq_get_max_batch_size(wq);
128 	wq_ctx->wq_max_xfer_size = accfg_wq_get_max_transfer_size(wq);
129 
130 	wq_ctx->max_batch_size = accfg_device_get_max_batch_size(dev);
131 	wq_ctx->max_xfer_size = accfg_device_get_max_transfer_size(dev);
132 	wq_ctx->max_xfer_bits = bsr(wq_ctx->max_xfer_size);
133 
134 	SPDK_NOTICELOG("alloc wq %d shared %d size %d addr %p batch sz %#x xfer sz %#x\n",
135 		       wq_ctx->wq_idx, shared, wq_ctx->wq_size, wq_ctx->wq_reg,
136 		       wq_ctx->max_batch_size, wq_ctx->max_xfer_size);
137 
138 	wq_ctx->wq = wq;
139 
140 	/* Update the active_wq_num of the kernel device */
141 	kernel_idxd->wq_active_num++;
142 	kernel_idxd->idxd.total_wq_size += wq_ctx->wq_size;
143 
144 	return 0;
145 }
146 
147 static int
148 config_wqs(struct spdk_kernel_idxd_device *kernel_idxd,
149 	   int dev_id, int shared)
150 {
151 	struct accfg_device *device;
152 	struct accfg_wq *wq;
153 	int rc;
154 
155 	accfg_device_foreach(kernel_idxd->ctx, device) {
156 		enum accfg_device_state dstate;
157 
158 		/* Make sure that the device is enabled */
159 		dstate = accfg_device_get_state(device);
160 		if (dstate != ACCFG_DEVICE_ENABLED) {
161 			continue;
162 		}
163 
164 		/* Match the device to the id requested */
165 		if (accfg_device_get_id(device) != dev_id &&
166 		    dev_id != -1) {
167 			continue;
168 		}
169 
170 		accfg_wq_foreach(device, wq) {
171 			enum accfg_wq_state wstate;
172 			enum accfg_wq_mode mode;
173 			enum accfg_wq_type type;
174 
175 			/* Get a workqueue that's enabled */
176 			wstate = accfg_wq_get_state(wq);
177 			if (wstate != ACCFG_WQ_ENABLED) {
178 				continue;
179 			}
180 
181 			/* The wq type should be user */
182 			type = accfg_wq_get_type(wq);
183 			if (type != ACCFG_WQT_USER) {
184 				continue;
185 			}
186 
187 			/* Make sure the mode is correct */
188 			mode = accfg_wq_get_mode(wq);
189 			if ((mode == ACCFG_WQ_SHARED && !shared)
190 			    || (mode == ACCFG_WQ_DEDICATED && shared)) {
191 				continue;
192 			}
193 
194 			/* We already config enough work queues */
195 			if (kernel_idxd->wq_active_num == g_kernel_dev_cfg.total_wqs) {
196 				break;
197 			}
198 
199 			rc = dsa_setup_single_wq(kernel_idxd, wq, shared);
200 			if (rc < 0) {
201 				return -1;
202 			}
203 		}
204 	}
205 
206 	if ((kernel_idxd->wq_active_num != 0) &&
207 	    (kernel_idxd->wq_active_num != g_kernel_dev_cfg.total_wqs)) {
208 		SPDK_ERRLOG("Failed to configure the expected wq nums=%d, and get the real wq nums=%d\n",
209 			    g_kernel_dev_cfg.total_wqs, kernel_idxd->wq_active_num);
210 		return -1;
211 	}
212 
213 	return 0;
214 }
215 
216 static void
217 kernel_idxd_device_destruct(struct spdk_idxd_device *idxd)
218 {
219 	uint32_t i;
220 	struct spdk_kernel_idxd_device *kernel_idxd = __kernel_idxd(idxd);
221 
222 	if (kernel_idxd->wq_ctx) {
223 		for (i = 0; i < kernel_idxd->wq_active_num; i++) {
224 			if (munmap(kernel_idxd->wq_ctx[i].wq_reg, 0x1000)) {
225 				SPDK_ERRLOG("munmap failed %d on kernel_device=%p on dsa_context with wq_reg=%p\n",
226 					    errno, kernel_idxd, kernel_idxd->wq_ctx[i].wq_reg);
227 			}
228 			close(kernel_idxd->wq_ctx[i].fd);
229 		}
230 		free(kernel_idxd->wq_ctx);
231 	}
232 
233 	accfg_unref(kernel_idxd->ctx);
234 	free(idxd);
235 }
236 
237 /*
238  * Build work queue (WQ) config based on getting info from the device combined
239  * with the defined configuration. Once built, it is written to the device.
240  */
241 static int
242 kernel_idxd_wq_config(struct spdk_kernel_idxd_device *kernel_idxd)
243 {
244 	uint32_t i;
245 	struct idxd_wq *queue;
246 	struct spdk_idxd_device *idxd = &kernel_idxd->idxd;
247 
248 	/* initialize the group */
249 	idxd->groups = calloc(g_kernel_dev_cfg.num_groups, sizeof(struct idxd_group));
250 	if (idxd->groups == NULL) {
251 		SPDK_ERRLOG("Failed to allocate group memory\n");
252 		return -ENOMEM;
253 	}
254 
255 	for (i = 0; i < g_kernel_dev_cfg.num_groups; i++) {
256 		idxd->groups[i].idxd = idxd;
257 		idxd->groups[i].id = i;
258 	}
259 
260 	idxd->queues = calloc(g_kernel_dev_cfg.total_wqs, sizeof(struct idxd_wq));
261 	if (idxd->queues == NULL) {
262 		SPDK_ERRLOG("Failed to allocate queue memory\n");
263 		return -ENOMEM;
264 	}
265 
266 	for (i = 0; i < g_kernel_dev_cfg.total_wqs; i++) {
267 		queue = &idxd->queues[i];
268 		queue->wqcfg.wq_size = kernel_idxd->wq_ctx[i].wq_size;
269 		queue->wqcfg.mode = WQ_MODE_DEDICATED;
270 		queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
271 		queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
272 		queue->wqcfg.wq_state = WQ_ENABLED;
273 		queue->wqcfg.priority = WQ_PRIORITY_1;
274 
275 		/* Not part of the config struct */
276 		queue->idxd = idxd;
277 		queue->group = &idxd->groups[i % g_kernel_dev_cfg.num_groups];
278 	}
279 
280 	return 0;
281 }
282 
283 static int
284 _kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, int dev_id)
285 {
286 	int rc;
287 	struct spdk_kernel_idxd_device *kernel_idxd;
288 	struct accfg_ctx *ctx;
289 
290 	kernel_idxd = calloc(1, sizeof(struct spdk_kernel_idxd_device));
291 	if (kernel_idxd == NULL) {
292 		SPDK_ERRLOG("Failed to allocate memory for kernel_idxd device.\n");
293 		return -ENOMEM;
294 	}
295 
296 	kernel_idxd->wq_ctx = calloc(g_kernel_dev_cfg.total_wqs, sizeof(struct spdk_wq_context));
297 	if (kernel_idxd->wq_ctx == NULL) {
298 		rc = -ENOMEM;
299 		SPDK_ERRLOG("Failed to allocate memory for the work queue contexts on kernel_idxd=%p.\n",
300 			    kernel_idxd);
301 		goto end;
302 	}
303 
304 	rc = accfg_new(&ctx);
305 	if (rc < 0) {
306 		SPDK_ERRLOG("Failed to allocate accfg context when probe kernel_idxd=%p\n", kernel_idxd);
307 		goto end;
308 	}
309 
310 	init_idxd_impl(&kernel_idxd->idxd);
311 	kernel_idxd->ctx = ctx;
312 
313 	/* Supporting non-shared mode first.
314 	 * Todo: Add the shared mode support later.
315 	 */
316 	rc = config_wqs(kernel_idxd, dev_id, 0);
317 	if (rc) {
318 		SPDK_ERRLOG("Failed to probe requsted wqs on kernel device context=%p\n", ctx);
319 		return -ENODEV;
320 	}
321 
322 	/* No active work queues */
323 	if (kernel_idxd->wq_active_num == 0) {
324 		goto end;
325 	}
326 
327 	kernel_idxd_wq_config(kernel_idxd);
328 
329 	attach_cb(cb_ctx, &kernel_idxd->idxd);
330 
331 	SPDK_NOTICELOG("Successfully got an kernel device=%p\n", kernel_idxd);
332 	return 0;
333 
334 end:
335 	kernel_idxd_device_destruct(&kernel_idxd->idxd);
336 	return rc;
337 }
338 
339 static int
340 kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb)
341 {
342 	int i;
343 
344 	for (i = 0; i < MAX_DSA_DEVICE_ID; i++) {
345 		_kernel_idxd_probe(cb_ctx, attach_cb, i);
346 	}
347 
348 	return 0;
349 }
350 
351 static void
352 kernel_idxd_dump_sw_error(struct spdk_idxd_device *idxd, void *portal)
353 {
354 	/* Need to be enhanced later */
355 }
356 
357 static void
358 kernel_idxd_set_config(struct device_config *dev_cfg, uint32_t config_num)
359 {
360 	g_kernel_dev_cfg = *dev_cfg;
361 }
362 
363 static char *
364 kernel_idxd_portal_get_addr(struct spdk_idxd_device *idxd)
365 {
366 	struct spdk_kernel_idxd_device *kernel_idxd = __kernel_idxd(idxd);
367 	assert(idxd->wq_id <= (g_kernel_dev_cfg.total_wqs - 1));
368 	return (char *)kernel_idxd->wq_ctx[idxd->wq_id].wq_reg;
369 }
370 
371 static struct spdk_idxd_impl g_kernel_idxd_impl = {
372 	.name			= "kernel",
373 	.set_config		= kernel_idxd_set_config,
374 	.probe			= kernel_idxd_probe,
375 	.destruct		= kernel_idxd_device_destruct,
376 	.dump_sw_error		= kernel_idxd_dump_sw_error,
377 	.portal_get_addr	= kernel_idxd_portal_get_addr,
378 };
379 
380 static void
381 init_idxd_impl(struct spdk_idxd_device *idxd)
382 {
383 	idxd->impl = &g_kernel_idxd_impl;
384 }
385 
386 SPDK_IDXD_IMPL_REGISTER(kernel, &g_kernel_idxd_impl);
387