xref: /dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c (revision 2b28344b4218a1e07e66d415b6ad9f1746fc945b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <rte_reorder.h>
33 #include <rte_cryptodev.h>
34 #include <rte_cryptodev_pmd.h>
35 #include <rte_malloc.h>
36 
37 #include "rte_cryptodev_scheduler.h"
38 #include "scheduler_pmd_private.h"
39 
40 /** update the scheduler pmd's capability with attaching device's
41  *  capability.
42  *  For each device to be attached, the scheduler's capability should be
43  *  the common capability set of all slaves
44  **/
45 static uint32_t
46 sync_caps(struct rte_cryptodev_capabilities *caps,
47 		uint32_t nb_caps,
48 		const struct rte_cryptodev_capabilities *slave_caps)
49 {
50 	uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
51 	uint32_t i;
52 
53 	while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
54 		nb_slave_caps++;
55 
56 	if (nb_caps == 0) {
57 		rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
58 		return nb_slave_caps;
59 	}
60 
61 	for (i = 0; i < sync_nb_caps; i++) {
62 		struct rte_cryptodev_capabilities *cap = &caps[i];
63 		uint32_t j;
64 
65 		for (j = 0; j < nb_slave_caps; j++) {
66 			const struct rte_cryptodev_capabilities *s_cap =
67 					&slave_caps[j];
68 
69 			if (s_cap->op != cap->op || s_cap->sym.xform_type !=
70 					cap->sym.xform_type)
71 				continue;
72 
73 			if (s_cap->sym.xform_type ==
74 					RTE_CRYPTO_SYM_XFORM_AUTH) {
75 				if (s_cap->sym.auth.algo !=
76 						cap->sym.auth.algo)
77 					continue;
78 
79 				cap->sym.auth.digest_size.min =
80 					s_cap->sym.auth.digest_size.min <
81 					cap->sym.auth.digest_size.min ?
82 					s_cap->sym.auth.digest_size.min :
83 					cap->sym.auth.digest_size.min;
84 				cap->sym.auth.digest_size.max =
85 					s_cap->sym.auth.digest_size.max <
86 					cap->sym.auth.digest_size.max ?
87 					s_cap->sym.auth.digest_size.max :
88 					cap->sym.auth.digest_size.max;
89 
90 			}
91 
92 			if (s_cap->sym.xform_type ==
93 					RTE_CRYPTO_SYM_XFORM_CIPHER)
94 				if (s_cap->sym.cipher.algo !=
95 						cap->sym.cipher.algo)
96 					continue;
97 
98 			/* no common cap found */
99 			break;
100 		}
101 
102 		if (j < nb_slave_caps)
103 			continue;
104 
105 		/* remove a uncommon cap from the array */
106 		for (j = i; j < sync_nb_caps - 1; j++)
107 			rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
108 
109 		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
110 		sync_nb_caps--;
111 	}
112 
113 	return sync_nb_caps;
114 }
115 
116 static int
117 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
118 {
119 	struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
120 	uint32_t nb_caps = 0, i;
121 
122 	if (sched_ctx->capabilities)
123 		rte_free(sched_ctx->capabilities);
124 
125 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
126 		struct rte_cryptodev_info dev_info;
127 
128 		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
129 
130 		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
131 		if (nb_caps == 0)
132 			return -1;
133 	}
134 
135 	sched_ctx->capabilities = rte_zmalloc_socket(NULL,
136 			sizeof(struct rte_cryptodev_capabilities) *
137 			(nb_caps + 1), 0, SOCKET_ID_ANY);
138 	if (!sched_ctx->capabilities)
139 		return -ENOMEM;
140 
141 	rte_memcpy(sched_ctx->capabilities, tmp_caps,
142 			sizeof(struct rte_cryptodev_capabilities) * nb_caps);
143 
144 	return 0;
145 }
146 
147 static void
148 update_scheduler_feature_flag(struct rte_cryptodev *dev)
149 {
150 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
151 	uint32_t i;
152 
153 	dev->feature_flags = 0;
154 
155 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
156 		struct rte_cryptodev_info dev_info;
157 
158 		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
159 
160 		dev->feature_flags |= dev_info.feature_flags;
161 	}
162 }
163 
164 static void
165 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
166 {
167 	uint32_t i;
168 	uint32_t max_nb_qp;
169 
170 	if (!sched_ctx->nb_slaves)
171 		return;
172 
173 	max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
174 
175 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
176 		struct rte_cryptodev_info dev_info;
177 
178 		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
179 		max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
180 				dev_info.max_nb_queue_pairs : max_nb_qp;
181 	}
182 
183 	sched_ctx->max_nb_queue_pairs = max_nb_qp;
184 }
185 
186 /** Attach a device to the scheduler. */
187 int
188 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
189 {
190 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
191 	struct scheduler_ctx *sched_ctx;
192 	struct scheduler_slave *slave;
193 	struct rte_cryptodev_info dev_info;
194 	uint32_t i;
195 
196 	if (!dev) {
197 		CS_LOG_ERR("Operation not supported");
198 		return -ENOTSUP;
199 	}
200 
201 	if (dev->driver_id != cryptodev_driver_id) {
202 		CS_LOG_ERR("Operation not supported");
203 		return -ENOTSUP;
204 	}
205 
206 	if (dev->data->dev_started) {
207 		CS_LOG_ERR("Illegal operation");
208 		return -EBUSY;
209 	}
210 
211 	sched_ctx = dev->data->dev_private;
212 	if (sched_ctx->nb_slaves >=
213 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
214 		CS_LOG_ERR("Too many slaves attached");
215 		return -ENOMEM;
216 	}
217 
218 	for (i = 0; i < sched_ctx->nb_slaves; i++)
219 		if (sched_ctx->slaves[i].dev_id == slave_id) {
220 			CS_LOG_ERR("Slave already added");
221 			return -ENOTSUP;
222 		}
223 
224 	slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
225 
226 	rte_cryptodev_info_get(slave_id, &dev_info);
227 
228 	slave->dev_id = slave_id;
229 	slave->driver_id = dev_info.driver_id;
230 	sched_ctx->nb_slaves++;
231 
232 	if (update_scheduler_capability(sched_ctx) < 0) {
233 		slave->dev_id = 0;
234 		slave->driver_id = 0;
235 		sched_ctx->nb_slaves--;
236 
237 		CS_LOG_ERR("capabilities update failed");
238 		return -ENOTSUP;
239 	}
240 
241 	update_scheduler_feature_flag(dev);
242 
243 	update_max_nb_qp(sched_ctx);
244 
245 	return 0;
246 }
247 
248 int
249 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
250 {
251 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
252 	struct scheduler_ctx *sched_ctx;
253 	uint32_t i, slave_pos;
254 
255 	if (!dev) {
256 		CS_LOG_ERR("Operation not supported");
257 		return -ENOTSUP;
258 	}
259 
260 	if (dev->driver_id != cryptodev_driver_id) {
261 		CS_LOG_ERR("Operation not supported");
262 		return -ENOTSUP;
263 	}
264 
265 	if (dev->data->dev_started) {
266 		CS_LOG_ERR("Illegal operation");
267 		return -EBUSY;
268 	}
269 
270 	sched_ctx = dev->data->dev_private;
271 
272 	for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
273 		if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
274 			break;
275 	if (slave_pos == sched_ctx->nb_slaves) {
276 		CS_LOG_ERR("Cannot find slave");
277 		return -ENOTSUP;
278 	}
279 
280 	if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
281 		CS_LOG_ERR("Failed to detach slave");
282 		return -ENOTSUP;
283 	}
284 
285 	for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
286 		memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
287 				sizeof(struct scheduler_slave));
288 	}
289 	memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
290 			sizeof(struct scheduler_slave));
291 	sched_ctx->nb_slaves--;
292 
293 	if (update_scheduler_capability(sched_ctx) < 0) {
294 		CS_LOG_ERR("capabilities update failed");
295 		return -ENOTSUP;
296 	}
297 
298 	update_scheduler_feature_flag(dev);
299 
300 	update_max_nb_qp(sched_ctx);
301 
302 	return 0;
303 }
304 
305 int
306 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
307 		enum rte_cryptodev_scheduler_mode mode)
308 {
309 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
310 	struct scheduler_ctx *sched_ctx;
311 
312 	if (!dev) {
313 		CS_LOG_ERR("Operation not supported");
314 		return -ENOTSUP;
315 	}
316 
317 	if (dev->driver_id != cryptodev_driver_id) {
318 		CS_LOG_ERR("Operation not supported");
319 		return -ENOTSUP;
320 	}
321 
322 	if (dev->data->dev_started) {
323 		CS_LOG_ERR("Illegal operation");
324 		return -EBUSY;
325 	}
326 
327 	sched_ctx = dev->data->dev_private;
328 
329 	if (mode == sched_ctx->mode)
330 		return 0;
331 
332 	switch (mode) {
333 	case CDEV_SCHED_MODE_ROUNDROBIN:
334 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
335 				roundrobin_scheduler) < 0) {
336 			CS_LOG_ERR("Failed to load scheduler");
337 			return -1;
338 		}
339 		break;
340 	case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
341 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
342 				pkt_size_based_distr_scheduler) < 0) {
343 			CS_LOG_ERR("Failed to load scheduler");
344 			return -1;
345 		}
346 		break;
347 	case CDEV_SCHED_MODE_FAILOVER:
348 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
349 				failover_scheduler) < 0) {
350 			CS_LOG_ERR("Failed to load scheduler");
351 			return -1;
352 		}
353 		break;
354 	case CDEV_SCHED_MODE_MULTICORE:
355 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
356 				multicore_scheduler) < 0) {
357 			CS_LOG_ERR("Failed to load scheduler");
358 			return -1;
359 		}
360 		break;
361 	default:
362 		CS_LOG_ERR("Not yet supported");
363 		return -ENOTSUP;
364 	}
365 
366 	return 0;
367 }
368 
369 enum rte_cryptodev_scheduler_mode
370 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
371 {
372 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
373 	struct scheduler_ctx *sched_ctx;
374 
375 	if (!dev) {
376 		CS_LOG_ERR("Operation not supported");
377 		return -ENOTSUP;
378 	}
379 
380 	if (dev->driver_id != cryptodev_driver_id) {
381 		CS_LOG_ERR("Operation not supported");
382 		return -ENOTSUP;
383 	}
384 
385 	sched_ctx = dev->data->dev_private;
386 
387 	return sched_ctx->mode;
388 }
389 
390 int
391 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
392 		uint32_t enable_reorder)
393 {
394 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
395 	struct scheduler_ctx *sched_ctx;
396 
397 	if (!dev) {
398 		CS_LOG_ERR("Operation not supported");
399 		return -ENOTSUP;
400 	}
401 
402 	if (dev->driver_id != cryptodev_driver_id) {
403 		CS_LOG_ERR("Operation not supported");
404 		return -ENOTSUP;
405 	}
406 
407 	if (dev->data->dev_started) {
408 		CS_LOG_ERR("Illegal operation");
409 		return -EBUSY;
410 	}
411 
412 	sched_ctx = dev->data->dev_private;
413 
414 	sched_ctx->reordering_enabled = enable_reorder;
415 
416 	return 0;
417 }
418 
419 int
420 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
421 {
422 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
423 	struct scheduler_ctx *sched_ctx;
424 
425 	if (!dev) {
426 		CS_LOG_ERR("Operation not supported");
427 		return -ENOTSUP;
428 	}
429 
430 	if (dev->driver_id != cryptodev_driver_id) {
431 		CS_LOG_ERR("Operation not supported");
432 		return -ENOTSUP;
433 	}
434 
435 	sched_ctx = dev->data->dev_private;
436 
437 	return (int)sched_ctx->reordering_enabled;
438 }
439 
440 int
441 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
442 		struct rte_cryptodev_scheduler *scheduler) {
443 
444 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
445 	struct scheduler_ctx *sched_ctx;
446 
447 	if (!dev) {
448 		CS_LOG_ERR("Operation not supported");
449 		return -ENOTSUP;
450 	}
451 
452 	if (dev->driver_id != cryptodev_driver_id) {
453 		CS_LOG_ERR("Operation not supported");
454 		return -ENOTSUP;
455 	}
456 
457 	if (dev->data->dev_started) {
458 		CS_LOG_ERR("Illegal operation");
459 		return -EBUSY;
460 	}
461 
462 	sched_ctx = dev->data->dev_private;
463 
464 	if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
465 		CS_LOG_ERR("Invalid name %s, should be less than "
466 				"%u bytes.\n", scheduler->name,
467 				RTE_CRYPTODEV_NAME_MAX_LEN);
468 		return -EINVAL;
469 	}
470 	strncpy(sched_ctx->name, scheduler->name,
471 			RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
472 
473 	if (strlen(scheduler->description) >
474 			RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
475 		CS_LOG_ERR("Invalid description %s, should be less than "
476 				"%u bytes.\n", scheduler->description,
477 				RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
478 		return -EINVAL;
479 	}
480 	strncpy(sched_ctx->description, scheduler->description,
481 			RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
482 
483 	/* load scheduler instance operations functions */
484 	sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
485 	sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
486 	sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
487 	sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
488 	sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
489 	sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
490 	sched_ctx->ops.option_set = scheduler->ops->option_set;
491 	sched_ctx->ops.option_get = scheduler->ops->option_get;
492 
493 	if (sched_ctx->private_ctx)
494 		rte_free(sched_ctx->private_ctx);
495 
496 	if (sched_ctx->ops.create_private_ctx) {
497 		int ret = (*sched_ctx->ops.create_private_ctx)(dev);
498 
499 		if (ret < 0) {
500 			CS_LOG_ERR("Unable to create scheduler private "
501 					"context");
502 			return ret;
503 		}
504 	}
505 
506 	sched_ctx->mode = scheduler->mode;
507 
508 	return 0;
509 }
510 
511 int
512 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
513 {
514 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
515 	struct scheduler_ctx *sched_ctx;
516 	uint32_t nb_slaves = 0;
517 
518 	if (!dev) {
519 		CS_LOG_ERR("Operation not supported");
520 		return -ENOTSUP;
521 	}
522 
523 	if (dev->driver_id != cryptodev_driver_id) {
524 		CS_LOG_ERR("Operation not supported");
525 		return -ENOTSUP;
526 	}
527 
528 	sched_ctx = dev->data->dev_private;
529 
530 	nb_slaves = sched_ctx->nb_slaves;
531 
532 	if (slaves && nb_slaves) {
533 		uint32_t i;
534 
535 		for (i = 0; i < nb_slaves; i++)
536 			slaves[i] = sched_ctx->slaves[i].dev_id;
537 	}
538 
539 	return (int)nb_slaves;
540 }
541 
542 int
543 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
544 		enum rte_cryptodev_schedule_option_type option_type,
545 		void *option)
546 {
547 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
548 	struct scheduler_ctx *sched_ctx;
549 
550 	if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
551 			option_type >= CDEV_SCHED_OPTION_COUNT) {
552 		CS_LOG_ERR("Invalid option parameter");
553 		return -EINVAL;
554 	}
555 
556 	if (!option) {
557 		CS_LOG_ERR("Invalid option parameter");
558 		return -EINVAL;
559 	}
560 
561 	if (dev->data->dev_started) {
562 		CS_LOG_ERR("Illegal operation");
563 		return -EBUSY;
564 	}
565 
566 	sched_ctx = dev->data->dev_private;
567 
568 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
569 
570 	return (*sched_ctx->ops.option_set)(dev, option_type, option);
571 }
572 
573 int
574 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
575 		enum rte_cryptodev_schedule_option_type option_type,
576 		void *option)
577 {
578 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
579 	struct scheduler_ctx *sched_ctx;
580 
581 	if (!dev) {
582 		CS_LOG_ERR("Operation not supported");
583 		return -ENOTSUP;
584 	}
585 
586 	if (!option) {
587 		CS_LOG_ERR("Invalid option parameter");
588 		return -EINVAL;
589 	}
590 
591 	if (dev->driver_id != cryptodev_driver_id) {
592 		CS_LOG_ERR("Operation not supported");
593 		return -ENOTSUP;
594 	}
595 
596 	sched_ctx = dev->data->dev_private;
597 
598 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
599 
600 	return (*sched_ctx->ops.option_get)(dev, option_type, option);
601 }
602