xref: /dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_string_fns.h>
5 #include <rte_reorder.h>
6 #include <rte_cryptodev.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_malloc.h>
9 
10 #include "rte_cryptodev_scheduler.h"
11 #include "scheduler_pmd_private.h"
12 
13 /** update the scheduler pmd's capability with attaching device's
14  *  capability.
15  *  For each device to be attached, the scheduler's capability should be
16  *  the common capability set of all workers
17  **/
18 static uint32_t
19 sync_caps(struct rte_cryptodev_capabilities *caps,
20 		uint32_t nb_caps,
21 		const struct rte_cryptodev_capabilities *worker_caps)
22 {
23 	uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;
24 	uint32_t i;
25 
26 	while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
27 		nb_worker_caps++;
28 
29 	if (nb_caps == 0) {
30 		rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);
31 		return nb_worker_caps;
32 	}
33 
34 	for (i = 0; i < sync_nb_caps; i++) {
35 		struct rte_cryptodev_capabilities *cap = &caps[i];
36 		uint32_t j;
37 
38 		for (j = 0; j < nb_worker_caps; j++) {
39 			const struct rte_cryptodev_capabilities *s_cap =
40 					&worker_caps[j];
41 
42 			if (s_cap->op != cap->op || s_cap->sym.xform_type !=
43 					cap->sym.xform_type)
44 				continue;
45 
46 			if (s_cap->sym.xform_type ==
47 					RTE_CRYPTO_SYM_XFORM_AUTH) {
48 				if (s_cap->sym.auth.algo !=
49 						cap->sym.auth.algo)
50 					continue;
51 
52 				cap->sym.auth.digest_size.min =
53 					s_cap->sym.auth.digest_size.min <
54 					cap->sym.auth.digest_size.min ?
55 					s_cap->sym.auth.digest_size.min :
56 					cap->sym.auth.digest_size.min;
57 				cap->sym.auth.digest_size.max =
58 					s_cap->sym.auth.digest_size.max <
59 					cap->sym.auth.digest_size.max ?
60 					s_cap->sym.auth.digest_size.max :
61 					cap->sym.auth.digest_size.max;
62 
63 			}
64 
65 			if (s_cap->sym.xform_type ==
66 					RTE_CRYPTO_SYM_XFORM_CIPHER)
67 				if (s_cap->sym.cipher.algo !=
68 						cap->sym.cipher.algo)
69 					continue;
70 
71 			/* no common cap found */
72 			break;
73 		}
74 
75 		if (j < nb_worker_caps)
76 			continue;
77 
78 		/* remove a uncommon cap from the array */
79 		for (j = i; j < sync_nb_caps - 1; j++)
80 			rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
81 
82 		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
83 		sync_nb_caps--;
84 	}
85 
86 	return sync_nb_caps;
87 }
88 
89 static int
90 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
91 {
92 	struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
93 	uint32_t nb_caps = 0, i;
94 
95 	if (sched_ctx->capabilities) {
96 		rte_free(sched_ctx->capabilities);
97 		sched_ctx->capabilities = NULL;
98 	}
99 
100 	for (i = 0; i < sched_ctx->nb_workers; i++) {
101 		struct rte_cryptodev_info dev_info;
102 
103 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
104 
105 		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
106 		if (nb_caps == 0)
107 			return -1;
108 	}
109 
110 	sched_ctx->capabilities = rte_zmalloc_socket(NULL,
111 			sizeof(struct rte_cryptodev_capabilities) *
112 			(nb_caps + 1), 0, SOCKET_ID_ANY);
113 	if (!sched_ctx->capabilities)
114 		return -ENOMEM;
115 
116 	rte_memcpy(sched_ctx->capabilities, tmp_caps,
117 			sizeof(struct rte_cryptodev_capabilities) * nb_caps);
118 
119 	return 0;
120 }
121 
122 static void
123 update_scheduler_feature_flag(struct rte_cryptodev *dev)
124 {
125 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
126 	uint32_t i;
127 
128 	dev->feature_flags = 0;
129 
130 	for (i = 0; i < sched_ctx->nb_workers; i++) {
131 		struct rte_cryptodev_info dev_info;
132 
133 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
134 
135 		dev->feature_flags |= dev_info.feature_flags;
136 	}
137 }
138 
139 static void
140 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
141 {
142 	uint32_t i;
143 	uint32_t max_nb_qp;
144 
145 	if (!sched_ctx->nb_workers)
146 		return;
147 
148 	max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;
149 
150 	for (i = 0; i < sched_ctx->nb_workers; i++) {
151 		struct rte_cryptodev_info dev_info;
152 
153 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
154 		max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
155 				dev_info.max_nb_queue_pairs : max_nb_qp;
156 	}
157 
158 	sched_ctx->max_nb_queue_pairs = max_nb_qp;
159 }
160 
161 /** Attach a device to the scheduler. */
162 int
163 rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
164 {
165 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
166 	struct scheduler_ctx *sched_ctx;
167 	struct scheduler_worker *worker;
168 	struct rte_cryptodev_info dev_info;
169 	uint32_t i;
170 
171 	if (!dev) {
172 		CR_SCHED_LOG(ERR, "Operation not supported");
173 		return -ENOTSUP;
174 	}
175 
176 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
177 		CR_SCHED_LOG(ERR, "Operation not supported");
178 		return -ENOTSUP;
179 	}
180 
181 	if (dev->data->dev_started) {
182 		CR_SCHED_LOG(ERR, "Illegal operation");
183 		return -EBUSY;
184 	}
185 
186 	sched_ctx = dev->data->dev_private;
187 	if (sched_ctx->nb_workers >=
188 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
189 		CR_SCHED_LOG(ERR, "Too many workers attached");
190 		return -ENOMEM;
191 	}
192 
193 	for (i = 0; i < sched_ctx->nb_workers; i++)
194 		if (sched_ctx->workers[i].dev_id == worker_id) {
195 			CR_SCHED_LOG(ERR, "Worker already added");
196 			return -ENOTSUP;
197 		}
198 
199 	worker = &sched_ctx->workers[sched_ctx->nb_workers];
200 
201 	rte_cryptodev_info_get(worker_id, &dev_info);
202 
203 	worker->dev_id = worker_id;
204 	worker->driver_id = dev_info.driver_id;
205 	sched_ctx->nb_workers++;
206 
207 	if (update_scheduler_capability(sched_ctx) < 0) {
208 		worker->dev_id = 0;
209 		worker->driver_id = 0;
210 		sched_ctx->nb_workers--;
211 
212 		CR_SCHED_LOG(ERR, "capabilities update failed");
213 		return -ENOTSUP;
214 	}
215 
216 	update_scheduler_feature_flag(dev);
217 
218 	update_max_nb_qp(sched_ctx);
219 
220 	return 0;
221 }
222 
223 int
224 rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
225 {
226 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
227 	struct scheduler_ctx *sched_ctx;
228 	uint32_t i, worker_pos;
229 
230 	if (!dev) {
231 		CR_SCHED_LOG(ERR, "Operation not supported");
232 		return -ENOTSUP;
233 	}
234 
235 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
236 		CR_SCHED_LOG(ERR, "Operation not supported");
237 		return -ENOTSUP;
238 	}
239 
240 	if (dev->data->dev_started) {
241 		CR_SCHED_LOG(ERR, "Illegal operation");
242 		return -EBUSY;
243 	}
244 
245 	sched_ctx = dev->data->dev_private;
246 
247 	for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)
248 		if (sched_ctx->workers[worker_pos].dev_id == worker_id)
249 			break;
250 	if (worker_pos == sched_ctx->nb_workers) {
251 		CR_SCHED_LOG(ERR, "Cannot find worker");
252 		return -ENOTSUP;
253 	}
254 
255 	if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {
256 		CR_SCHED_LOG(ERR, "Failed to detach worker");
257 		return -ENOTSUP;
258 	}
259 
260 	for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {
261 		memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],
262 				sizeof(struct scheduler_worker));
263 	}
264 	memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,
265 			sizeof(struct scheduler_worker));
266 	sched_ctx->nb_workers--;
267 
268 	if (update_scheduler_capability(sched_ctx) < 0) {
269 		CR_SCHED_LOG(ERR, "capabilities update failed");
270 		return -ENOTSUP;
271 	}
272 
273 	update_scheduler_feature_flag(dev);
274 
275 	update_max_nb_qp(sched_ctx);
276 
277 	return 0;
278 }
279 
280 int
281 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
282 		enum rte_cryptodev_scheduler_mode mode)
283 {
284 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
285 	struct scheduler_ctx *sched_ctx;
286 
287 	if (!dev) {
288 		CR_SCHED_LOG(ERR, "Operation not supported");
289 		return -ENOTSUP;
290 	}
291 
292 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
293 		CR_SCHED_LOG(ERR, "Operation not supported");
294 		return -ENOTSUP;
295 	}
296 
297 	if (dev->data->dev_started) {
298 		CR_SCHED_LOG(ERR, "Illegal operation");
299 		return -EBUSY;
300 	}
301 
302 	sched_ctx = dev->data->dev_private;
303 
304 	if (mode == sched_ctx->mode)
305 		return 0;
306 
307 	switch (mode) {
308 	case CDEV_SCHED_MODE_ROUNDROBIN:
309 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
310 				crypto_scheduler_roundrobin) < 0) {
311 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
312 			return -1;
313 		}
314 		break;
315 	case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
316 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
317 				crypto_scheduler_pkt_size_based_distr) < 0) {
318 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
319 			return -1;
320 		}
321 		break;
322 	case CDEV_SCHED_MODE_FAILOVER:
323 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
324 				crypto_scheduler_failover) < 0) {
325 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
326 			return -1;
327 		}
328 		break;
329 	case CDEV_SCHED_MODE_MULTICORE:
330 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
331 				crypto_scheduler_multicore) < 0) {
332 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
333 			return -1;
334 		}
335 		break;
336 	default:
337 		CR_SCHED_LOG(ERR, "Not yet supported");
338 		return -ENOTSUP;
339 	}
340 
341 	return 0;
342 }
343 
344 enum rte_cryptodev_scheduler_mode
345 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
346 {
347 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
348 	struct scheduler_ctx *sched_ctx;
349 
350 	if (!dev) {
351 		CR_SCHED_LOG(ERR, "Operation not supported");
352 		return -ENOTSUP;
353 	}
354 
355 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
356 		CR_SCHED_LOG(ERR, "Operation not supported");
357 		return -ENOTSUP;
358 	}
359 
360 	sched_ctx = dev->data->dev_private;
361 
362 	return sched_ctx->mode;
363 }
364 
365 int
366 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
367 		uint32_t enable_reorder)
368 {
369 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
370 	struct scheduler_ctx *sched_ctx;
371 
372 	if (!dev) {
373 		CR_SCHED_LOG(ERR, "Operation not supported");
374 		return -ENOTSUP;
375 	}
376 
377 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
378 		CR_SCHED_LOG(ERR, "Operation not supported");
379 		return -ENOTSUP;
380 	}
381 
382 	if (dev->data->dev_started) {
383 		CR_SCHED_LOG(ERR, "Illegal operation");
384 		return -EBUSY;
385 	}
386 
387 	sched_ctx = dev->data->dev_private;
388 
389 	sched_ctx->reordering_enabled = enable_reorder;
390 
391 	return 0;
392 }
393 
394 int
395 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
396 {
397 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
398 	struct scheduler_ctx *sched_ctx;
399 
400 	if (!dev) {
401 		CR_SCHED_LOG(ERR, "Operation not supported");
402 		return -ENOTSUP;
403 	}
404 
405 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
406 		CR_SCHED_LOG(ERR, "Operation not supported");
407 		return -ENOTSUP;
408 	}
409 
410 	sched_ctx = dev->data->dev_private;
411 
412 	return (int)sched_ctx->reordering_enabled;
413 }
414 
415 int
416 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
417 		struct rte_cryptodev_scheduler *scheduler) {
418 
419 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
420 	struct scheduler_ctx *sched_ctx;
421 
422 	if (!dev) {
423 		CR_SCHED_LOG(ERR, "Operation not supported");
424 		return -ENOTSUP;
425 	}
426 
427 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
428 		CR_SCHED_LOG(ERR, "Operation not supported");
429 		return -ENOTSUP;
430 	}
431 
432 	if (dev->data->dev_started) {
433 		CR_SCHED_LOG(ERR, "Illegal operation");
434 		return -EBUSY;
435 	}
436 
437 	sched_ctx = dev->data->dev_private;
438 
439 	if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
440 		CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
441 				"%u bytes.", scheduler->name,
442 				RTE_CRYPTODEV_NAME_MAX_LEN);
443 		return -EINVAL;
444 	}
445 	strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name));
446 
447 	if (strlen(scheduler->description) >
448 			RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
449 		CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
450 				"%u bytes.", scheduler->description,
451 				RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
452 		return -EINVAL;
453 	}
454 	strlcpy(sched_ctx->description, scheduler->description,
455 		sizeof(sched_ctx->description));
456 
457 	/* load scheduler instance operations functions */
458 	sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
459 	sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
460 	sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
461 	sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
462 	sched_ctx->ops.worker_attach = scheduler->ops->worker_attach;
463 	sched_ctx->ops.worker_detach = scheduler->ops->worker_detach;
464 	sched_ctx->ops.option_set = scheduler->ops->option_set;
465 	sched_ctx->ops.option_get = scheduler->ops->option_get;
466 
467 	if (sched_ctx->private_ctx) {
468 		rte_free(sched_ctx->private_ctx);
469 		sched_ctx->private_ctx = NULL;
470 	}
471 
472 	if (sched_ctx->ops.create_private_ctx) {
473 		int ret = (*sched_ctx->ops.create_private_ctx)(dev);
474 
475 		if (ret < 0) {
476 			CR_SCHED_LOG(ERR, "Unable to create scheduler private "
477 					"context");
478 			return ret;
479 		}
480 	}
481 
482 	sched_ctx->mode = scheduler->mode;
483 
484 	return 0;
485 }
486 
487 int
488 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)
489 {
490 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
491 	struct scheduler_ctx *sched_ctx;
492 	uint32_t nb_workers = 0;
493 
494 	if (!dev) {
495 		CR_SCHED_LOG(ERR, "Operation not supported");
496 		return -ENOTSUP;
497 	}
498 
499 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
500 		CR_SCHED_LOG(ERR, "Operation not supported");
501 		return -ENOTSUP;
502 	}
503 
504 	sched_ctx = dev->data->dev_private;
505 
506 	nb_workers = sched_ctx->nb_workers;
507 
508 	if (workers && nb_workers) {
509 		uint32_t i;
510 
511 		for (i = 0; i < nb_workers; i++)
512 			workers[i] = sched_ctx->workers[i].dev_id;
513 	}
514 
515 	return (int)nb_workers;
516 }
517 
518 int
519 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
520 		enum rte_cryptodev_schedule_option_type option_type,
521 		void *option)
522 {
523 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
524 	struct scheduler_ctx *sched_ctx;
525 
526 	if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
527 			option_type >= CDEV_SCHED_OPTION_COUNT) {
528 		CR_SCHED_LOG(ERR, "Invalid option parameter");
529 		return -EINVAL;
530 	}
531 
532 	if (!option) {
533 		CR_SCHED_LOG(ERR, "Invalid option parameter");
534 		return -EINVAL;
535 	}
536 
537 	if (dev->data->dev_started) {
538 		CR_SCHED_LOG(ERR, "Illegal operation");
539 		return -EBUSY;
540 	}
541 
542 	sched_ctx = dev->data->dev_private;
543 
544 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
545 
546 	return (*sched_ctx->ops.option_set)(dev, option_type, option);
547 }
548 
549 int
550 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
551 		enum rte_cryptodev_schedule_option_type option_type,
552 		void *option)
553 {
554 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
555 	struct scheduler_ctx *sched_ctx;
556 
557 	if (!dev) {
558 		CR_SCHED_LOG(ERR, "Operation not supported");
559 		return -ENOTSUP;
560 	}
561 
562 	if (!option) {
563 		CR_SCHED_LOG(ERR, "Invalid option parameter");
564 		return -EINVAL;
565 	}
566 
567 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
568 		CR_SCHED_LOG(ERR, "Operation not supported");
569 		return -ENOTSUP;
570 	}
571 
572 	sched_ctx = dev->data->dev_private;
573 
574 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
575 
576 	return (*sched_ctx->ops.option_get)(dev, option_type, option);
577 }
578 
579 
580 RTE_LOG_REGISTER_DEFAULT(scheduler_logtype_driver, INFO);
581