1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4 #include <rte_string_fns.h>
5 #include <rte_reorder.h>
6 #include <rte_cryptodev.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_security_driver.h>
9 #include <rte_malloc.h>
10
11 #include "rte_cryptodev_scheduler.h"
12 #include "scheduler_pmd_private.h"
13
14 #define MAX_CAPS 256
15
16 /** update the scheduler pmd's capability with attaching device's
17 * capability.
18 * For each device to be attached, the scheduler's capability should be
19 * the common capability set of all workers
20 **/
21 static uint32_t
sync_caps(struct rte_cryptodev_capabilities * caps,uint32_t nb_caps,const struct rte_cryptodev_capabilities * worker_caps)22 sync_caps(struct rte_cryptodev_capabilities *caps,
23 uint32_t nb_caps,
24 const struct rte_cryptodev_capabilities *worker_caps)
25 {
26 uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;
27 uint32_t i;
28
29 while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
30 nb_worker_caps++;
31
32 if (nb_caps == 0) {
33 rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);
34 return nb_worker_caps;
35 }
36
37 for (i = 0; i < sync_nb_caps; i++) {
38 struct rte_cryptodev_capabilities *cap = &caps[i];
39 uint32_t j;
40
41 for (j = 0; j < nb_worker_caps; j++) {
42 const struct rte_cryptodev_capabilities *s_cap =
43 &worker_caps[j];
44
45 if (s_cap->op != cap->op || s_cap->sym.xform_type !=
46 cap->sym.xform_type)
47 continue;
48
49 if (s_cap->sym.xform_type ==
50 RTE_CRYPTO_SYM_XFORM_AUTH) {
51 if (s_cap->sym.auth.algo !=
52 cap->sym.auth.algo)
53 continue;
54
55 cap->sym.auth.digest_size.min =
56 s_cap->sym.auth.digest_size.min <
57 cap->sym.auth.digest_size.min ?
58 s_cap->sym.auth.digest_size.min :
59 cap->sym.auth.digest_size.min;
60 cap->sym.auth.digest_size.max =
61 s_cap->sym.auth.digest_size.max <
62 cap->sym.auth.digest_size.max ?
63 s_cap->sym.auth.digest_size.max :
64 cap->sym.auth.digest_size.max;
65 }
66
67 if (s_cap->sym.xform_type ==
68 RTE_CRYPTO_SYM_XFORM_CIPHER)
69 if (s_cap->sym.cipher.algo !=
70 cap->sym.cipher.algo)
71 continue;
72
73 /* no common cap found */
74 break;
75 }
76
77 if (j < nb_worker_caps)
78 continue;
79
80 /* remove a uncommon cap from the array */
81 for (j = i; j < sync_nb_caps - 1; j++)
82 rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
83
84 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
85 sync_nb_caps--;
86 i--;
87 }
88
89 return sync_nb_caps;
90 }
91
92 static int
check_sec_cap_equal(const struct rte_security_capability * sec_cap1,struct rte_security_capability * sec_cap2)93 check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
94 struct rte_security_capability *sec_cap2)
95 {
96 if (sec_cap1->action != sec_cap2->action ||
97 sec_cap1->protocol != sec_cap2->protocol ||
98 sec_cap1->ol_flags != sec_cap2->ol_flags)
99 return 0;
100
101 if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
102 return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,
103 sizeof(sec_cap1->docsis));
104 else
105 return 0;
106 }
107
108 static void
copy_sec_cap(struct rte_security_capability * dst_sec_cap,struct rte_security_capability * src_sec_cap)109 copy_sec_cap(struct rte_security_capability *dst_sec_cap,
110 struct rte_security_capability *src_sec_cap)
111 {
112 dst_sec_cap->action = src_sec_cap->action;
113 dst_sec_cap->protocol = src_sec_cap->protocol;
114 if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
115 dst_sec_cap->docsis = src_sec_cap->docsis;
116 dst_sec_cap->ol_flags = src_sec_cap->ol_flags;
117 }
118
119 static uint32_t
sync_sec_crypto_caps(struct rte_cryptodev_capabilities * tmp_sec_crypto_caps,const struct rte_cryptodev_capabilities * sec_crypto_caps,const struct rte_cryptodev_capabilities * worker_sec_crypto_caps)120 sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps,
121 const struct rte_cryptodev_capabilities *sec_crypto_caps,
122 const struct rte_cryptodev_capabilities *worker_sec_crypto_caps)
123 {
124 uint8_t nb_caps = 0;
125
126 nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps);
127 sync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps);
128
129 return nb_caps;
130 }
131
132 /** update the scheduler pmd's security capability with attaching device's
133 * security capability.
134 * For each device to be attached, the scheduler's security capability should
135 * be the common capability set of all workers
136 **/
137 static uint32_t
sync_sec_caps(uint32_t worker_idx,struct rte_security_capability * sec_caps,struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],uint32_t nb_sec_caps,const struct rte_security_capability * worker_sec_caps)138 sync_sec_caps(uint32_t worker_idx,
139 struct rte_security_capability *sec_caps,
140 struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],
141 uint32_t nb_sec_caps,
142 const struct rte_security_capability *worker_sec_caps)
143 {
144 uint32_t nb_worker_sec_caps = 0, i;
145
146 if (worker_sec_caps == NULL)
147 return 0;
148
149 while (worker_sec_caps[nb_worker_sec_caps].action !=
150 RTE_SECURITY_ACTION_TYPE_NONE)
151 nb_worker_sec_caps++;
152
153 /* Handle first worker */
154 if (worker_idx == 0) {
155 uint32_t nb_worker_sec_crypto_caps = 0;
156 uint32_t nb_worker_supp_sec_caps = 0;
157
158 for (i = 0; i < nb_worker_sec_caps; i++) {
159 /* Check for supported security protocols */
160 if (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,
161 worker_sec_caps[i].protocol))
162 continue;
163
164 sec_caps[nb_worker_supp_sec_caps] = worker_sec_caps[i];
165
166 while (worker_sec_caps[i].crypto_capabilities[
167 nb_worker_sec_crypto_caps].op !=
168 RTE_CRYPTO_OP_TYPE_UNDEFINED)
169 nb_worker_sec_crypto_caps++;
170
171 rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],
172 &worker_sec_caps[i].crypto_capabilities[0],
173 sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *
174 nb_worker_sec_crypto_caps);
175
176 nb_worker_supp_sec_caps++;
177 }
178 return nb_worker_supp_sec_caps;
179 }
180
181 for (i = 0; i < nb_sec_caps; i++) {
182 struct rte_security_capability *sec_cap = &sec_caps[i];
183 uint32_t j;
184
185 for (j = 0; j < nb_worker_sec_caps; j++) {
186 struct rte_cryptodev_capabilities
187 tmp_sec_crypto_caps[MAX_CAPS] = { {0} };
188 uint32_t nb_sec_crypto_caps = 0;
189 const struct rte_security_capability *worker_sec_cap =
190 &worker_sec_caps[j];
191
192 if (!check_sec_cap_equal(worker_sec_cap, sec_cap))
193 continue;
194
195 /* Sync the crypto caps of the common security cap */
196 nb_sec_crypto_caps = sync_sec_crypto_caps(
197 tmp_sec_crypto_caps,
198 &sec_crypto_caps[i][0],
199 &worker_sec_cap->crypto_capabilities[0]);
200
201 memset(&sec_crypto_caps[i][0], 0,
202 sizeof(sec_crypto_caps[i][0]) * MAX_CAPS);
203
204 rte_memcpy(&sec_crypto_caps[i][0],
205 &tmp_sec_crypto_caps[0],
206 sizeof(sec_crypto_caps[i][0]) * nb_sec_crypto_caps);
207
208 break;
209 }
210
211 if (j < nb_worker_sec_caps)
212 continue;
213
214 /*
215 * Remove an uncommon security cap, and it's associated crypto
216 * caps, from the arrays
217 */
218 for (j = i; j < nb_sec_caps - 1; j++) {
219 rte_memcpy(&sec_caps[j], &sec_caps[j+1],
220 sizeof(*sec_cap));
221
222 rte_memcpy(&sec_crypto_caps[j][0],
223 &sec_crypto_caps[j+1][0],
224 sizeof(*&sec_crypto_caps[j][0]) *
225 MAX_CAPS);
226 }
227 memset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap));
228 memset(&sec_crypto_caps[nb_sec_caps - 1][0], 0,
229 sizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) *
230 MAX_CAPS);
231 nb_sec_caps--;
232 i--;
233 }
234
235 return nb_sec_caps;
236 }
237
238 static int
update_scheduler_capability(struct scheduler_ctx * sched_ctx)239 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
240 {
241 struct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} };
242 struct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} };
243 struct rte_cryptodev_capabilities
244 tmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} };
245 uint32_t nb_caps = 0, nb_sec_caps = 0, i;
246 struct rte_cryptodev_info dev_info;
247
248 /* Free any previously allocated capability memory */
249 scheduler_free_capabilities(sched_ctx);
250
251 /* Determine the new cryptodev capabilities for the scheduler */
252 for (i = 0; i < sched_ctx->nb_workers; i++) {
253 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
254
255 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
256 if (nb_caps == 0)
257 return -1;
258 }
259
260 sched_ctx->capabilities = rte_zmalloc_socket(NULL,
261 sizeof(struct rte_cryptodev_capabilities) *
262 (nb_caps + 1), 0, SOCKET_ID_ANY);
263 if (!sched_ctx->capabilities)
264 return -ENOMEM;
265
266 rte_memcpy(sched_ctx->capabilities, tmp_caps,
267 sizeof(struct rte_cryptodev_capabilities) * nb_caps);
268
269 /* Determine the new security capabilities for the scheduler */
270 for (i = 0; i < sched_ctx->nb_workers; i++) {
271 struct rte_cryptodev *dev =
272 &rte_cryptodevs[sched_ctx->workers[i].dev_id];
273 struct rte_security_ctx *sec_ctx = dev->security_ctx;
274
275 nb_sec_caps = sync_sec_caps(i, tmp_sec_caps, tmp_sec_crypto_caps,
276 nb_sec_caps, rte_security_capabilities_get(sec_ctx));
277 }
278
279 sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
280 sizeof(struct rte_security_capability) *
281 (nb_sec_caps + 1), 0, SOCKET_ID_ANY);
282 if (!sched_ctx->sec_capabilities)
283 return -ENOMEM;
284
285 sched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL,
286 sizeof(struct rte_cryptodev_capabilities *) *
287 (nb_sec_caps + 1),
288 0, SOCKET_ID_ANY);
289 if (!sched_ctx->sec_crypto_capabilities)
290 return -ENOMEM;
291
292 for (i = 0; i < nb_sec_caps; i++) {
293 uint16_t nb_sec_crypto_caps = 0;
294
295 copy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]);
296
297 while (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op !=
298 RTE_CRYPTO_OP_TYPE_UNDEFINED)
299 nb_sec_crypto_caps++;
300
301 sched_ctx->sec_crypto_capabilities[i] =
302 rte_zmalloc_socket(NULL,
303 sizeof(struct rte_cryptodev_capabilities) *
304 (nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY);
305 if (!sched_ctx->sec_crypto_capabilities[i])
306 return -ENOMEM;
307
308 rte_memcpy(sched_ctx->sec_crypto_capabilities[i],
309 &tmp_sec_crypto_caps[i][0],
310 sizeof(struct rte_cryptodev_capabilities)
311 * nb_sec_crypto_caps);
312
313 sched_ctx->sec_capabilities[i].crypto_capabilities =
314 sched_ctx->sec_crypto_capabilities[i];
315 }
316
317 return 0;
318 }
319
320 static void
update_scheduler_feature_flag(struct rte_cryptodev * dev)321 update_scheduler_feature_flag(struct rte_cryptodev *dev)
322 {
323 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
324 uint32_t i;
325
326 dev->feature_flags = 0;
327
328 for (i = 0; i < sched_ctx->nb_workers; i++) {
329 struct rte_cryptodev_info dev_info;
330
331 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
332
333 dev->feature_flags |= dev_info.feature_flags;
334 }
335 }
336
337 static void
update_max_nb_qp(struct scheduler_ctx * sched_ctx)338 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
339 {
340 uint32_t i;
341 uint32_t max_nb_qp;
342
343 if (!sched_ctx->nb_workers)
344 return;
345
346 max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;
347
348 for (i = 0; i < sched_ctx->nb_workers; i++) {
349 struct rte_cryptodev_info dev_info;
350
351 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
352 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
353 dev_info.max_nb_queue_pairs : max_nb_qp;
354 }
355
356 sched_ctx->max_nb_queue_pairs = max_nb_qp;
357 }
358
359 /** Attach a device to the scheduler. */
360 int
rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id,uint8_t worker_id)361 rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
362 {
363 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
364 struct scheduler_ctx *sched_ctx;
365 struct scheduler_worker *worker;
366 struct rte_cryptodev_info dev_info;
367 uint32_t i;
368
369 if (!dev) {
370 CR_SCHED_LOG(ERR, "Operation not supported");
371 return -ENOTSUP;
372 }
373
374 if (dev->driver_id != cryptodev_scheduler_driver_id) {
375 CR_SCHED_LOG(ERR, "Operation not supported");
376 return -ENOTSUP;
377 }
378
379 if (dev->data->dev_started) {
380 CR_SCHED_LOG(ERR, "Illegal operation");
381 return -EBUSY;
382 }
383
384 sched_ctx = dev->data->dev_private;
385 if (sched_ctx->nb_workers >=
386 RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
387 CR_SCHED_LOG(ERR, "Too many workers attached");
388 return -ENOMEM;
389 }
390
391 for (i = 0; i < sched_ctx->nb_workers; i++)
392 if (sched_ctx->workers[i].dev_id == worker_id) {
393 CR_SCHED_LOG(ERR, "Worker already added");
394 return -ENOTSUP;
395 }
396
397 worker = &sched_ctx->workers[sched_ctx->nb_workers];
398
399 rte_cryptodev_info_get(worker_id, &dev_info);
400
401 worker->dev_id = worker_id;
402 worker->driver_id = dev_info.driver_id;
403 sched_ctx->nb_workers++;
404
405 if (update_scheduler_capability(sched_ctx) < 0) {
406 scheduler_free_capabilities(sched_ctx);
407 worker->dev_id = 0;
408 worker->driver_id = 0;
409 sched_ctx->nb_workers--;
410
411 CR_SCHED_LOG(ERR, "capabilities update failed");
412 return -ENOTSUP;
413 }
414
415 update_scheduler_feature_flag(dev);
416
417 update_max_nb_qp(sched_ctx);
418
419 return 0;
420 }
421
422 int
rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id,uint8_t worker_id)423 rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
424 {
425 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
426 struct scheduler_ctx *sched_ctx;
427 uint32_t i, worker_pos;
428
429 if (!dev) {
430 CR_SCHED_LOG(ERR, "Operation not supported");
431 return -ENOTSUP;
432 }
433
434 if (dev->driver_id != cryptodev_scheduler_driver_id) {
435 CR_SCHED_LOG(ERR, "Operation not supported");
436 return -ENOTSUP;
437 }
438
439 if (dev->data->dev_started) {
440 CR_SCHED_LOG(ERR, "Illegal operation");
441 return -EBUSY;
442 }
443
444 sched_ctx = dev->data->dev_private;
445
446 for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)
447 if (sched_ctx->workers[worker_pos].dev_id == worker_id)
448 break;
449 if (worker_pos == sched_ctx->nb_workers) {
450 CR_SCHED_LOG(ERR, "Cannot find worker");
451 return -ENOTSUP;
452 }
453
454 if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {
455 CR_SCHED_LOG(ERR, "Failed to detach worker");
456 return -ENOTSUP;
457 }
458
459 for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {
460 memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],
461 sizeof(struct scheduler_worker));
462 }
463 memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,
464 sizeof(struct scheduler_worker));
465 sched_ctx->nb_workers--;
466
467 if (update_scheduler_capability(sched_ctx) < 0) {
468 scheduler_free_capabilities(sched_ctx);
469 CR_SCHED_LOG(ERR, "capabilities update failed");
470 return -ENOTSUP;
471 }
472
473 update_scheduler_feature_flag(dev);
474
475 update_max_nb_qp(sched_ctx);
476
477 return 0;
478 }
479
480 int
rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,enum rte_cryptodev_scheduler_mode mode)481 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
482 enum rte_cryptodev_scheduler_mode mode)
483 {
484 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
485 struct scheduler_ctx *sched_ctx;
486
487 if (!dev) {
488 CR_SCHED_LOG(ERR, "Operation not supported");
489 return -ENOTSUP;
490 }
491
492 if (dev->driver_id != cryptodev_scheduler_driver_id) {
493 CR_SCHED_LOG(ERR, "Operation not supported");
494 return -ENOTSUP;
495 }
496
497 if (dev->data->dev_started) {
498 CR_SCHED_LOG(ERR, "Illegal operation");
499 return -EBUSY;
500 }
501
502 sched_ctx = dev->data->dev_private;
503
504 if (mode == sched_ctx->mode)
505 return 0;
506
507 switch (mode) {
508 case CDEV_SCHED_MODE_ROUNDROBIN:
509 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
510 crypto_scheduler_roundrobin) < 0) {
511 CR_SCHED_LOG(ERR, "Failed to load scheduler");
512 return -1;
513 }
514 break;
515 case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
516 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
517 crypto_scheduler_pkt_size_based_distr) < 0) {
518 CR_SCHED_LOG(ERR, "Failed to load scheduler");
519 return -1;
520 }
521 break;
522 case CDEV_SCHED_MODE_FAILOVER:
523 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
524 crypto_scheduler_failover) < 0) {
525 CR_SCHED_LOG(ERR, "Failed to load scheduler");
526 return -1;
527 }
528 break;
529 case CDEV_SCHED_MODE_MULTICORE:
530 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
531 crypto_scheduler_multicore) < 0) {
532 CR_SCHED_LOG(ERR, "Failed to load scheduler");
533 return -1;
534 }
535 break;
536 default:
537 CR_SCHED_LOG(ERR, "Not yet supported");
538 return -ENOTSUP;
539 }
540
541 return 0;
542 }
543
544 enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)545 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
546 {
547 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
548 struct scheduler_ctx *sched_ctx;
549
550 if (!dev) {
551 CR_SCHED_LOG(ERR, "Operation not supported");
552 return -ENOTSUP;
553 }
554
555 if (dev->driver_id != cryptodev_scheduler_driver_id) {
556 CR_SCHED_LOG(ERR, "Operation not supported");
557 return -ENOTSUP;
558 }
559
560 sched_ctx = dev->data->dev_private;
561
562 return sched_ctx->mode;
563 }
564
565 int
rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,uint32_t enable_reorder)566 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
567 uint32_t enable_reorder)
568 {
569 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
570 struct scheduler_ctx *sched_ctx;
571
572 if (!dev) {
573 CR_SCHED_LOG(ERR, "Operation not supported");
574 return -ENOTSUP;
575 }
576
577 if (dev->driver_id != cryptodev_scheduler_driver_id) {
578 CR_SCHED_LOG(ERR, "Operation not supported");
579 return -ENOTSUP;
580 }
581
582 if (dev->data->dev_started) {
583 CR_SCHED_LOG(ERR, "Illegal operation");
584 return -EBUSY;
585 }
586
587 sched_ctx = dev->data->dev_private;
588
589 sched_ctx->reordering_enabled = enable_reorder;
590
591 return 0;
592 }
593
594 int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)595 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
596 {
597 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
598 struct scheduler_ctx *sched_ctx;
599
600 if (!dev) {
601 CR_SCHED_LOG(ERR, "Operation not supported");
602 return -ENOTSUP;
603 }
604
605 if (dev->driver_id != cryptodev_scheduler_driver_id) {
606 CR_SCHED_LOG(ERR, "Operation not supported");
607 return -ENOTSUP;
608 }
609
610 sched_ctx = dev->data->dev_private;
611
612 return (int)sched_ctx->reordering_enabled;
613 }
614
615 int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,struct rte_cryptodev_scheduler * scheduler)616 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
617 struct rte_cryptodev_scheduler *scheduler) {
618
619 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
620 struct scheduler_ctx *sched_ctx;
621
622 if (!dev) {
623 CR_SCHED_LOG(ERR, "Operation not supported");
624 return -ENOTSUP;
625 }
626
627 if (dev->driver_id != cryptodev_scheduler_driver_id) {
628 CR_SCHED_LOG(ERR, "Operation not supported");
629 return -ENOTSUP;
630 }
631
632 if (dev->data->dev_started) {
633 CR_SCHED_LOG(ERR, "Illegal operation");
634 return -EBUSY;
635 }
636
637 sched_ctx = dev->data->dev_private;
638
639 if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
640 CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
641 "%u bytes.", scheduler->name,
642 RTE_CRYPTODEV_NAME_MAX_LEN);
643 return -EINVAL;
644 }
645 strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name));
646
647 if (strlen(scheduler->description) >
648 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
649 CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
650 "%u bytes.", scheduler->description,
651 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
652 return -EINVAL;
653 }
654 strlcpy(sched_ctx->description, scheduler->description,
655 sizeof(sched_ctx->description));
656
657 /* load scheduler instance operations functions */
658 sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
659 sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
660 sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
661 sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
662 sched_ctx->ops.worker_attach = scheduler->ops->worker_attach;
663 sched_ctx->ops.worker_detach = scheduler->ops->worker_detach;
664 sched_ctx->ops.option_set = scheduler->ops->option_set;
665 sched_ctx->ops.option_get = scheduler->ops->option_get;
666
667 if (sched_ctx->private_ctx) {
668 rte_free(sched_ctx->private_ctx);
669 sched_ctx->private_ctx = NULL;
670 }
671
672 if (sched_ctx->ops.create_private_ctx) {
673 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
674
675 if (ret < 0) {
676 CR_SCHED_LOG(ERR, "Unable to create scheduler private "
677 "context");
678 return ret;
679 }
680 }
681
682 sched_ctx->mode = scheduler->mode;
683
684 return 0;
685 }
686
687 int
rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id,uint8_t * workers)688 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)
689 {
690 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
691 struct scheduler_ctx *sched_ctx;
692 uint32_t nb_workers = 0;
693
694 if (!dev) {
695 CR_SCHED_LOG(ERR, "Operation not supported");
696 return -ENOTSUP;
697 }
698
699 if (dev->driver_id != cryptodev_scheduler_driver_id) {
700 CR_SCHED_LOG(ERR, "Operation not supported");
701 return -ENOTSUP;
702 }
703
704 sched_ctx = dev->data->dev_private;
705
706 nb_workers = sched_ctx->nb_workers;
707
708 if (workers && nb_workers) {
709 uint32_t i;
710
711 for (i = 0; i < nb_workers; i++)
712 workers[i] = sched_ctx->workers[i].dev_id;
713 }
714
715 return (int)nb_workers;
716 }
717
718 int
rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,enum rte_cryptodev_schedule_option_type option_type,void * option)719 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
720 enum rte_cryptodev_schedule_option_type option_type,
721 void *option)
722 {
723 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
724 struct scheduler_ctx *sched_ctx;
725
726 if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
727 option_type >= CDEV_SCHED_OPTION_COUNT) {
728 CR_SCHED_LOG(ERR, "Invalid option parameter");
729 return -EINVAL;
730 }
731
732 if (!option) {
733 CR_SCHED_LOG(ERR, "Invalid option parameter");
734 return -EINVAL;
735 }
736
737 if (dev->data->dev_started) {
738 CR_SCHED_LOG(ERR, "Illegal operation");
739 return -EBUSY;
740 }
741
742 sched_ctx = dev->data->dev_private;
743
744 if (*sched_ctx->ops.option_set == NULL)
745 return -ENOTSUP;
746
747 return (*sched_ctx->ops.option_set)(dev, option_type, option);
748 }
749
750 int
rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,enum rte_cryptodev_schedule_option_type option_type,void * option)751 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
752 enum rte_cryptodev_schedule_option_type option_type,
753 void *option)
754 {
755 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
756 struct scheduler_ctx *sched_ctx;
757
758 if (!dev) {
759 CR_SCHED_LOG(ERR, "Operation not supported");
760 return -ENOTSUP;
761 }
762
763 if (!option) {
764 CR_SCHED_LOG(ERR, "Invalid option parameter");
765 return -EINVAL;
766 }
767
768 if (dev->driver_id != cryptodev_scheduler_driver_id) {
769 CR_SCHED_LOG(ERR, "Operation not supported");
770 return -ENOTSUP;
771 }
772
773 sched_ctx = dev->data->dev_private;
774
775 if (*sched_ctx->ops.option_get == NULL)
776 return -ENOTSUP;
777
778 return (*sched_ctx->ops.option_get)(dev, option_type, option);
779 }
780
781
782 RTE_LOG_REGISTER_DEFAULT(scheduler_logtype_driver, INFO);
783