1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 #include <string.h> 5 6 #include <rte_common.h> 7 #include <rte_malloc.h> 8 #include <rte_dev.h> 9 #include <rte_cryptodev.h> 10 #include <cryptodev_pmd.h> 11 #include <rte_reorder.h> 12 13 #include "scheduler_pmd_private.h" 14 15 /** attaching the workers predefined by scheduler's EAL options */ 16 static int 17 scheduler_attach_init_worker(struct rte_cryptodev *dev) 18 { 19 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 20 uint8_t scheduler_id = dev->data->dev_id; 21 int i; 22 23 for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) { 24 const char *dev_name = sched_ctx->init_worker_names[i]; 25 struct rte_cryptodev *worker_dev = 26 rte_cryptodev_pmd_get_named_dev(dev_name); 27 int status; 28 29 if (!worker_dev) { 30 CR_SCHED_LOG(ERR, "Failed to locate worker dev %s", 31 dev_name); 32 return -EINVAL; 33 } 34 35 status = rte_cryptodev_scheduler_worker_attach( 36 scheduler_id, worker_dev->data->dev_id); 37 38 if (status < 0) { 39 CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u", 40 worker_dev->data->dev_id); 41 return status; 42 } 43 44 CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s", 45 dev->data->name, 46 sched_ctx->init_worker_names[i]); 47 48 rte_free(sched_ctx->init_worker_names[i]); 49 sched_ctx->init_worker_names[i] = NULL; 50 51 sched_ctx->nb_init_workers -= 1; 52 } 53 54 return 0; 55 } 56 /** Configure device */ 57 static int 58 scheduler_pmd_config(struct rte_cryptodev *dev, 59 struct rte_cryptodev_config *config) 60 { 61 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 62 uint32_t i; 63 int ret; 64 65 /* although scheduler_attach_init_worker presents multiple times, 66 * there will be only 1 meaningful execution. 67 */ 68 ret = scheduler_attach_init_worker(dev); 69 if (ret < 0) 70 return ret; 71 72 for (i = 0; i < sched_ctx->nb_workers; i++) { 73 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 74 75 ret = rte_cryptodev_configure(worker_dev_id, config); 76 if (ret < 0) 77 break; 78 } 79 80 return ret; 81 } 82 83 static int 84 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 85 { 86 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 87 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 88 89 if (sched_ctx->reordering_enabled) { 90 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 91 uint32_t buff_size = rte_align32pow2( 92 sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE); 93 94 if (qp_ctx->order_ring) { 95 rte_ring_free(qp_ctx->order_ring); 96 qp_ctx->order_ring = NULL; 97 } 98 99 if (!buff_size) 100 return 0; 101 102 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 103 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 104 dev->data->dev_id, qp_id) < 0) { 105 CR_SCHED_LOG(ERR, "failed to create unique reorder buffer" 106 "name"); 107 return -ENOMEM; 108 } 109 110 qp_ctx->order_ring = rte_ring_create(order_ring_name, 111 buff_size, rte_socket_id(), 112 RING_F_SP_ENQ | RING_F_SC_DEQ); 113 if (!qp_ctx->order_ring) { 114 CR_SCHED_LOG(ERR, "failed to create order ring"); 115 return -ENOMEM; 116 } 117 } else { 118 if (qp_ctx->order_ring) { 119 rte_ring_free(qp_ctx->order_ring); 120 qp_ctx->order_ring = NULL; 121 } 122 } 123 124 return 0; 125 } 126 127 /** Start device */ 128 static int 129 scheduler_pmd_start(struct rte_cryptodev *dev) 130 { 131 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 132 uint32_t i; 133 int ret; 134 135 if (dev->data->dev_started) 136 return 0; 137 138 /* although scheduler_attach_init_worker presents multiple times, 139 * there will be only 1 meaningful execution. 140 */ 141 ret = scheduler_attach_init_worker(dev); 142 if (ret < 0) 143 return ret; 144 145 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 146 ret = update_order_ring(dev, i); 147 if (ret < 0) { 148 CR_SCHED_LOG(ERR, "Failed to update reorder buffer"); 149 return ret; 150 } 151 } 152 153 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 154 CR_SCHED_LOG(ERR, "Scheduler mode is not set"); 155 return -1; 156 } 157 158 if (!sched_ctx->nb_workers) { 159 CR_SCHED_LOG(ERR, "No worker in the scheduler"); 160 return -1; 161 } 162 163 if (*sched_ctx->ops.worker_attach == NULL) 164 return -ENOTSUP; 165 166 for (i = 0; i < sched_ctx->nb_workers; i++) { 167 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 168 169 if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) { 170 CR_SCHED_LOG(ERR, "Failed to attach worker"); 171 return -ENOTSUP; 172 } 173 } 174 175 if (*sched_ctx->ops.scheduler_start == NULL) 176 return -ENOTSUP; 177 178 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 179 CR_SCHED_LOG(ERR, "Scheduler start failed"); 180 return -1; 181 } 182 183 /* start all workers */ 184 for (i = 0; i < sched_ctx->nb_workers; i++) { 185 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 186 ret = rte_cryptodev_start(worker_dev_id); 187 if (ret < 0) { 188 CR_SCHED_LOG(ERR, "Failed to start worker dev %u", 189 worker_dev_id); 190 return ret; 191 } 192 } 193 194 return 0; 195 } 196 197 /** Stop device */ 198 static void 199 scheduler_pmd_stop(struct rte_cryptodev *dev) 200 { 201 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 202 uint32_t i; 203 204 if (!dev->data->dev_started) 205 return; 206 207 /* stop all workers first */ 208 for (i = 0; i < sched_ctx->nb_workers; i++) { 209 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 210 211 rte_cryptodev_stop(worker_dev_id); 212 } 213 214 if (*sched_ctx->ops.scheduler_stop) 215 (*sched_ctx->ops.scheduler_stop)(dev); 216 217 for (i = 0; i < sched_ctx->nb_workers; i++) { 218 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 219 220 if (*sched_ctx->ops.worker_detach) 221 (*sched_ctx->ops.worker_detach)(dev, worker_dev_id); 222 } 223 } 224 225 /** Close device */ 226 static int 227 scheduler_pmd_close(struct rte_cryptodev *dev) 228 { 229 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 230 uint32_t i; 231 int ret; 232 233 /* the dev should be stopped before being closed */ 234 if (dev->data->dev_started) 235 return -EBUSY; 236 237 /* close all workers first */ 238 for (i = 0; i < sched_ctx->nb_workers; i++) { 239 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 240 struct rte_cryptodev *worker_dev = 241 rte_cryptodev_pmd_get_dev(worker_dev_id); 242 243 ret = (*worker_dev->dev_ops->dev_close)(worker_dev); 244 if (ret < 0) 245 return ret; 246 } 247 248 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 249 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 250 251 if (qp_ctx->order_ring) { 252 rte_ring_free(qp_ctx->order_ring); 253 qp_ctx->order_ring = NULL; 254 } 255 256 if (qp_ctx->private_qp_ctx) { 257 rte_free(qp_ctx->private_qp_ctx); 258 qp_ctx->private_qp_ctx = NULL; 259 } 260 } 261 262 if (sched_ctx->private_ctx) { 263 rte_free(sched_ctx->private_ctx); 264 sched_ctx->private_ctx = NULL; 265 } 266 267 if (sched_ctx->capabilities) { 268 rte_free(sched_ctx->capabilities); 269 sched_ctx->capabilities = NULL; 270 } 271 272 return 0; 273 } 274 275 /** Get device statistics */ 276 static void 277 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 278 struct rte_cryptodev_stats *stats) 279 { 280 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 281 uint32_t i; 282 283 for (i = 0; i < sched_ctx->nb_workers; i++) { 284 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 285 struct rte_cryptodev *worker_dev = 286 rte_cryptodev_pmd_get_dev(worker_dev_id); 287 struct rte_cryptodev_stats worker_stats = {0}; 288 289 (*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats); 290 291 stats->enqueued_count += worker_stats.enqueued_count; 292 stats->dequeued_count += worker_stats.dequeued_count; 293 294 stats->enqueue_err_count += worker_stats.enqueue_err_count; 295 stats->dequeue_err_count += worker_stats.dequeue_err_count; 296 } 297 } 298 299 /** Reset device statistics */ 300 static void 301 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 302 { 303 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 304 uint32_t i; 305 306 for (i = 0; i < sched_ctx->nb_workers; i++) { 307 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 308 struct rte_cryptodev *worker_dev = 309 rte_cryptodev_pmd_get_dev(worker_dev_id); 310 311 (*worker_dev->dev_ops->stats_reset)(worker_dev); 312 } 313 } 314 315 /** Get device info */ 316 static void 317 scheduler_pmd_info_get(struct rte_cryptodev *dev, 318 struct rte_cryptodev_info *dev_info) 319 { 320 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 321 uint32_t max_nb_sess = 0; 322 uint16_t headroom_sz = 0; 323 uint16_t tailroom_sz = 0; 324 uint32_t i; 325 326 if (!dev_info) 327 return; 328 329 /* although scheduler_attach_init_worker presents multiple times, 330 * there will be only 1 meaningful execution. 331 */ 332 scheduler_attach_init_worker(dev); 333 334 for (i = 0; i < sched_ctx->nb_workers; i++) { 335 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 336 struct rte_cryptodev_info worker_info; 337 338 rte_cryptodev_info_get(worker_dev_id, &worker_info); 339 uint32_t dev_max_sess = worker_info.sym.max_nb_sessions; 340 if (dev_max_sess != 0) { 341 if (max_nb_sess == 0 || dev_max_sess < max_nb_sess) 342 max_nb_sess = worker_info.sym.max_nb_sessions; 343 } 344 345 /* Get the max headroom requirement among worker PMDs */ 346 headroom_sz = worker_info.min_mbuf_headroom_req > 347 headroom_sz ? 348 worker_info.min_mbuf_headroom_req : 349 headroom_sz; 350 351 /* Get the max tailroom requirement among worker PMDs */ 352 tailroom_sz = worker_info.min_mbuf_tailroom_req > 353 tailroom_sz ? 354 worker_info.min_mbuf_tailroom_req : 355 tailroom_sz; 356 } 357 358 dev_info->driver_id = dev->driver_id; 359 dev_info->feature_flags = dev->feature_flags; 360 dev_info->capabilities = sched_ctx->capabilities; 361 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 362 dev_info->min_mbuf_headroom_req = headroom_sz; 363 dev_info->min_mbuf_tailroom_req = tailroom_sz; 364 dev_info->sym.max_nb_sessions = max_nb_sess; 365 } 366 367 /** Release queue pair */ 368 static int 369 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 370 { 371 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 372 373 if (!qp_ctx) 374 return 0; 375 376 rte_ring_free(qp_ctx->order_ring); 377 rte_free(qp_ctx->private_qp_ctx); 378 379 rte_free(qp_ctx); 380 dev->data->queue_pairs[qp_id] = NULL; 381 382 return 0; 383 } 384 385 /** Setup a queue pair */ 386 static int 387 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 388 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 389 { 390 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 391 struct scheduler_qp_ctx *qp_ctx; 392 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 393 uint32_t i; 394 int ret; 395 396 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 397 "CRYTO_SCHE PMD %u QP %u", 398 dev->data->dev_id, qp_id) < 0) { 399 CR_SCHED_LOG(ERR, "Failed to create unique queue pair name"); 400 return -EFAULT; 401 } 402 403 /* Free memory prior to re-allocation if needed. */ 404 if (dev->data->queue_pairs[qp_id] != NULL) 405 scheduler_pmd_qp_release(dev, qp_id); 406 407 for (i = 0; i < sched_ctx->nb_workers; i++) { 408 uint8_t worker_id = sched_ctx->workers[i].dev_id; 409 410 /* 411 * All workers will share the same session mempool 412 * for session-less operations, so the objects 413 * must be big enough for all the drivers used. 414 */ 415 ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id, 416 qp_conf, socket_id); 417 if (ret < 0) 418 return ret; 419 } 420 421 /* Allocate the queue pair data structure. */ 422 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 423 socket_id); 424 if (qp_ctx == NULL) 425 return -ENOMEM; 426 427 /* The actual available object number = nb_descriptors - 1 */ 428 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 429 430 dev->data->queue_pairs[qp_id] = qp_ctx; 431 432 /* although scheduler_attach_init_worker presents multiple times, 433 * there will be only 1 meaningful execution. 434 */ 435 ret = scheduler_attach_init_worker(dev); 436 if (ret < 0) { 437 CR_SCHED_LOG(ERR, "Failed to attach worker"); 438 scheduler_pmd_qp_release(dev, qp_id); 439 return ret; 440 } 441 442 if (*sched_ctx->ops.config_queue_pair) { 443 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 444 CR_SCHED_LOG(ERR, "Unable to configure queue pair"); 445 return -1; 446 } 447 } 448 449 return 0; 450 } 451 452 static uint32_t 453 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 454 { 455 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 456 uint8_t i = 0; 457 uint32_t max_priv_sess_size = 0; 458 459 /* Check what is the maximum private session size for all workers */ 460 for (i = 0; i < sched_ctx->nb_workers; i++) { 461 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 462 struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id]; 463 uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 464 465 if (max_priv_sess_size < priv_sess_size) 466 max_priv_sess_size = priv_sess_size; 467 } 468 469 return max_priv_sess_size; 470 } 471 472 static int 473 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, 474 struct rte_crypto_sym_xform *xform, 475 struct rte_cryptodev_sym_session *sess, 476 struct rte_mempool *mempool) 477 { 478 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 479 uint32_t i; 480 int ret; 481 482 for (i = 0; i < sched_ctx->nb_workers; i++) { 483 struct scheduler_worker *worker = &sched_ctx->workers[i]; 484 485 ret = rte_cryptodev_sym_session_init(worker->dev_id, sess, 486 xform, mempool); 487 if (ret < 0) { 488 CR_SCHED_LOG(ERR, "unable to config sym session"); 489 return ret; 490 } 491 } 492 493 return 0; 494 } 495 496 /** Clear the memory of session so it doesn't leave key material behind */ 497 static void 498 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, 499 struct rte_cryptodev_sym_session *sess) 500 { 501 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 502 uint32_t i; 503 504 /* Clear private data of workers */ 505 for (i = 0; i < sched_ctx->nb_workers; i++) { 506 struct scheduler_worker *worker = &sched_ctx->workers[i]; 507 508 rte_cryptodev_sym_session_clear(worker->dev_id, sess); 509 } 510 } 511 512 static struct rte_cryptodev_ops scheduler_pmd_ops = { 513 .dev_configure = scheduler_pmd_config, 514 .dev_start = scheduler_pmd_start, 515 .dev_stop = scheduler_pmd_stop, 516 .dev_close = scheduler_pmd_close, 517 518 .stats_get = scheduler_pmd_stats_get, 519 .stats_reset = scheduler_pmd_stats_reset, 520 521 .dev_infos_get = scheduler_pmd_info_get, 522 523 .queue_pair_setup = scheduler_pmd_qp_setup, 524 .queue_pair_release = scheduler_pmd_qp_release, 525 526 .sym_session_get_size = scheduler_pmd_sym_session_get_size, 527 .sym_session_configure = scheduler_pmd_sym_session_configure, 528 .sym_session_clear = scheduler_pmd_sym_session_clear, 529 }; 530 531 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 532