1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 #include <string.h> 5 6 #include <rte_common.h> 7 #include <rte_malloc.h> 8 #include <rte_dev.h> 9 #include <rte_cryptodev.h> 10 #include <cryptodev_pmd.h> 11 #include <rte_reorder.h> 12 13 #include "scheduler_pmd_private.h" 14 15 /** attaching the workers predefined by scheduler's EAL options */ 16 static int 17 scheduler_attach_init_worker(struct rte_cryptodev *dev) 18 { 19 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 20 uint8_t scheduler_id = dev->data->dev_id; 21 int i; 22 23 for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) { 24 const char *dev_name = sched_ctx->init_worker_names[i]; 25 struct rte_cryptodev *worker_dev = 26 rte_cryptodev_pmd_get_named_dev(dev_name); 27 int status; 28 29 if (!worker_dev) { 30 CR_SCHED_LOG(ERR, "Failed to locate worker dev %s", 31 dev_name); 32 return -EINVAL; 33 } 34 35 status = rte_cryptodev_scheduler_worker_attach( 36 scheduler_id, worker_dev->data->dev_id); 37 38 if (status < 0) { 39 CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u", 40 worker_dev->data->dev_id); 41 return status; 42 } 43 44 CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s", 45 dev->data->name, 46 sched_ctx->init_worker_names[i]); 47 48 rte_free(sched_ctx->init_worker_names[i]); 49 sched_ctx->init_worker_names[i] = NULL; 50 51 sched_ctx->nb_init_workers -= 1; 52 } 53 54 return 0; 55 } 56 /** Configure device */ 57 static int 58 scheduler_pmd_config(struct rte_cryptodev *dev, 59 struct rte_cryptodev_config *config) 60 { 61 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 62 uint32_t i; 63 int ret; 64 65 /* although scheduler_attach_init_worker presents multiple times, 66 * there will be only 1 meaningful execution. 67 */ 68 ret = scheduler_attach_init_worker(dev); 69 if (ret < 0) 70 return ret; 71 72 for (i = 0; i < sched_ctx->nb_workers; i++) { 73 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 74 75 ret = rte_cryptodev_configure(worker_dev_id, config); 76 if (ret < 0) 77 break; 78 } 79 80 return ret; 81 } 82 83 static int 84 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 85 { 86 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 87 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 88 89 if (sched_ctx->reordering_enabled) { 90 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 91 uint32_t buff_size = rte_align32pow2( 92 sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE); 93 94 if (qp_ctx->order_ring) { 95 rte_ring_free(qp_ctx->order_ring); 96 qp_ctx->order_ring = NULL; 97 } 98 99 if (!buff_size) 100 return 0; 101 102 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 103 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 104 dev->data->dev_id, qp_id) < 0) { 105 CR_SCHED_LOG(ERR, "failed to create unique reorder buffer" 106 "name"); 107 return -ENOMEM; 108 } 109 110 qp_ctx->order_ring = rte_ring_create(order_ring_name, 111 buff_size, rte_socket_id(), 112 RING_F_SP_ENQ | RING_F_SC_DEQ); 113 if (!qp_ctx->order_ring) { 114 CR_SCHED_LOG(ERR, "failed to create order ring"); 115 return -ENOMEM; 116 } 117 } else { 118 if (qp_ctx->order_ring) { 119 rte_ring_free(qp_ctx->order_ring); 120 qp_ctx->order_ring = NULL; 121 } 122 } 123 124 return 0; 125 } 126 127 /** Start device */ 128 static int 129 scheduler_pmd_start(struct rte_cryptodev *dev) 130 { 131 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 132 uint32_t i; 133 int ret; 134 135 if (dev->data->dev_started) 136 return 0; 137 138 /* although scheduler_attach_init_worker presents multiple times, 139 * there will be only 1 meaningful execution. 140 */ 141 ret = scheduler_attach_init_worker(dev); 142 if (ret < 0) 143 return ret; 144 145 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 146 ret = update_order_ring(dev, i); 147 if (ret < 0) { 148 CR_SCHED_LOG(ERR, "Failed to update reorder buffer"); 149 return ret; 150 } 151 } 152 153 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 154 CR_SCHED_LOG(ERR, "Scheduler mode is not set"); 155 return -1; 156 } 157 158 if (!sched_ctx->nb_workers) { 159 CR_SCHED_LOG(ERR, "No worker in the scheduler"); 160 return -1; 161 } 162 163 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP); 164 165 for (i = 0; i < sched_ctx->nb_workers; i++) { 166 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 167 168 if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) { 169 CR_SCHED_LOG(ERR, "Failed to attach worker"); 170 return -ENOTSUP; 171 } 172 } 173 174 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 175 176 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 177 CR_SCHED_LOG(ERR, "Scheduler start failed"); 178 return -1; 179 } 180 181 /* start all workers */ 182 for (i = 0; i < sched_ctx->nb_workers; i++) { 183 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 184 ret = rte_cryptodev_start(worker_dev_id); 185 if (ret < 0) { 186 CR_SCHED_LOG(ERR, "Failed to start worker dev %u", 187 worker_dev_id); 188 return ret; 189 } 190 } 191 192 return 0; 193 } 194 195 /** Stop device */ 196 static void 197 scheduler_pmd_stop(struct rte_cryptodev *dev) 198 { 199 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 200 uint32_t i; 201 202 if (!dev->data->dev_started) 203 return; 204 205 /* stop all workers first */ 206 for (i = 0; i < sched_ctx->nb_workers; i++) { 207 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 208 209 rte_cryptodev_stop(worker_dev_id); 210 } 211 212 if (*sched_ctx->ops.scheduler_stop) 213 (*sched_ctx->ops.scheduler_stop)(dev); 214 215 for (i = 0; i < sched_ctx->nb_workers; i++) { 216 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 217 218 if (*sched_ctx->ops.worker_detach) 219 (*sched_ctx->ops.worker_detach)(dev, worker_dev_id); 220 } 221 } 222 223 /** Close device */ 224 static int 225 scheduler_pmd_close(struct rte_cryptodev *dev) 226 { 227 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 228 uint32_t i; 229 int ret; 230 231 /* the dev should be stopped before being closed */ 232 if (dev->data->dev_started) 233 return -EBUSY; 234 235 /* close all workers first */ 236 for (i = 0; i < sched_ctx->nb_workers; i++) { 237 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 238 struct rte_cryptodev *worker_dev = 239 rte_cryptodev_pmd_get_dev(worker_dev_id); 240 241 ret = (*worker_dev->dev_ops->dev_close)(worker_dev); 242 if (ret < 0) 243 return ret; 244 } 245 246 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 247 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 248 249 if (qp_ctx->order_ring) { 250 rte_ring_free(qp_ctx->order_ring); 251 qp_ctx->order_ring = NULL; 252 } 253 254 if (qp_ctx->private_qp_ctx) { 255 rte_free(qp_ctx->private_qp_ctx); 256 qp_ctx->private_qp_ctx = NULL; 257 } 258 } 259 260 if (sched_ctx->private_ctx) { 261 rte_free(sched_ctx->private_ctx); 262 sched_ctx->private_ctx = NULL; 263 } 264 265 if (sched_ctx->capabilities) { 266 rte_free(sched_ctx->capabilities); 267 sched_ctx->capabilities = NULL; 268 } 269 270 return 0; 271 } 272 273 /** Get device statistics */ 274 static void 275 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 276 struct rte_cryptodev_stats *stats) 277 { 278 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 279 uint32_t i; 280 281 for (i = 0; i < sched_ctx->nb_workers; i++) { 282 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 283 struct rte_cryptodev *worker_dev = 284 rte_cryptodev_pmd_get_dev(worker_dev_id); 285 struct rte_cryptodev_stats worker_stats = {0}; 286 287 (*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats); 288 289 stats->enqueued_count += worker_stats.enqueued_count; 290 stats->dequeued_count += worker_stats.dequeued_count; 291 292 stats->enqueue_err_count += worker_stats.enqueue_err_count; 293 stats->dequeue_err_count += worker_stats.dequeue_err_count; 294 } 295 } 296 297 /** Reset device statistics */ 298 static void 299 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 300 { 301 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 302 uint32_t i; 303 304 for (i = 0; i < sched_ctx->nb_workers; i++) { 305 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 306 struct rte_cryptodev *worker_dev = 307 rte_cryptodev_pmd_get_dev(worker_dev_id); 308 309 (*worker_dev->dev_ops->stats_reset)(worker_dev); 310 } 311 } 312 313 /** Get device info */ 314 static void 315 scheduler_pmd_info_get(struct rte_cryptodev *dev, 316 struct rte_cryptodev_info *dev_info) 317 { 318 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 319 uint32_t max_nb_sess = 0; 320 uint16_t headroom_sz = 0; 321 uint16_t tailroom_sz = 0; 322 uint32_t i; 323 324 if (!dev_info) 325 return; 326 327 /* although scheduler_attach_init_worker presents multiple times, 328 * there will be only 1 meaningful execution. 329 */ 330 scheduler_attach_init_worker(dev); 331 332 for (i = 0; i < sched_ctx->nb_workers; i++) { 333 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 334 struct rte_cryptodev_info worker_info; 335 336 rte_cryptodev_info_get(worker_dev_id, &worker_info); 337 uint32_t dev_max_sess = worker_info.sym.max_nb_sessions; 338 if (dev_max_sess != 0) { 339 if (max_nb_sess == 0 || dev_max_sess < max_nb_sess) 340 max_nb_sess = worker_info.sym.max_nb_sessions; 341 } 342 343 /* Get the max headroom requirement among worker PMDs */ 344 headroom_sz = worker_info.min_mbuf_headroom_req > 345 headroom_sz ? 346 worker_info.min_mbuf_headroom_req : 347 headroom_sz; 348 349 /* Get the max tailroom requirement among worker PMDs */ 350 tailroom_sz = worker_info.min_mbuf_tailroom_req > 351 tailroom_sz ? 352 worker_info.min_mbuf_tailroom_req : 353 tailroom_sz; 354 } 355 356 dev_info->driver_id = dev->driver_id; 357 dev_info->feature_flags = dev->feature_flags; 358 dev_info->capabilities = sched_ctx->capabilities; 359 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 360 dev_info->min_mbuf_headroom_req = headroom_sz; 361 dev_info->min_mbuf_tailroom_req = tailroom_sz; 362 dev_info->sym.max_nb_sessions = max_nb_sess; 363 } 364 365 /** Release queue pair */ 366 static int 367 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 368 { 369 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 370 371 if (!qp_ctx) 372 return 0; 373 374 rte_ring_free(qp_ctx->order_ring); 375 rte_free(qp_ctx->private_qp_ctx); 376 377 rte_free(qp_ctx); 378 dev->data->queue_pairs[qp_id] = NULL; 379 380 return 0; 381 } 382 383 /** Setup a queue pair */ 384 static int 385 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 386 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 387 { 388 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 389 struct scheduler_qp_ctx *qp_ctx; 390 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 391 uint32_t i; 392 int ret; 393 394 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 395 "CRYTO_SCHE PMD %u QP %u", 396 dev->data->dev_id, qp_id) < 0) { 397 CR_SCHED_LOG(ERR, "Failed to create unique queue pair name"); 398 return -EFAULT; 399 } 400 401 /* Free memory prior to re-allocation if needed. */ 402 if (dev->data->queue_pairs[qp_id] != NULL) 403 scheduler_pmd_qp_release(dev, qp_id); 404 405 for (i = 0; i < sched_ctx->nb_workers; i++) { 406 uint8_t worker_id = sched_ctx->workers[i].dev_id; 407 408 /* 409 * All workers will share the same session mempool 410 * for session-less operations, so the objects 411 * must be big enough for all the drivers used. 412 */ 413 ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id, 414 qp_conf, socket_id); 415 if (ret < 0) 416 return ret; 417 } 418 419 /* Allocate the queue pair data structure. */ 420 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 421 socket_id); 422 if (qp_ctx == NULL) 423 return -ENOMEM; 424 425 /* The actual available object number = nb_descriptors - 1 */ 426 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 427 428 dev->data->queue_pairs[qp_id] = qp_ctx; 429 430 /* although scheduler_attach_init_worker presents multiple times, 431 * there will be only 1 meaningful execution. 432 */ 433 ret = scheduler_attach_init_worker(dev); 434 if (ret < 0) { 435 CR_SCHED_LOG(ERR, "Failed to attach worker"); 436 scheduler_pmd_qp_release(dev, qp_id); 437 return ret; 438 } 439 440 if (*sched_ctx->ops.config_queue_pair) { 441 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 442 CR_SCHED_LOG(ERR, "Unable to configure queue pair"); 443 return -1; 444 } 445 } 446 447 return 0; 448 } 449 450 static uint32_t 451 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 452 { 453 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 454 uint8_t i = 0; 455 uint32_t max_priv_sess_size = 0; 456 457 /* Check what is the maximum private session size for all workers */ 458 for (i = 0; i < sched_ctx->nb_workers; i++) { 459 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; 460 struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id]; 461 uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 462 463 if (max_priv_sess_size < priv_sess_size) 464 max_priv_sess_size = priv_sess_size; 465 } 466 467 return max_priv_sess_size; 468 } 469 470 static int 471 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, 472 struct rte_crypto_sym_xform *xform, 473 struct rte_cryptodev_sym_session *sess, 474 struct rte_mempool *mempool) 475 { 476 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 477 uint32_t i; 478 int ret; 479 480 for (i = 0; i < sched_ctx->nb_workers; i++) { 481 struct scheduler_worker *worker = &sched_ctx->workers[i]; 482 483 ret = rte_cryptodev_sym_session_init(worker->dev_id, sess, 484 xform, mempool); 485 if (ret < 0) { 486 CR_SCHED_LOG(ERR, "unable to config sym session"); 487 return ret; 488 } 489 } 490 491 return 0; 492 } 493 494 /** Clear the memory of session so it doesn't leave key material behind */ 495 static void 496 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, 497 struct rte_cryptodev_sym_session *sess) 498 { 499 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 500 uint32_t i; 501 502 /* Clear private data of workers */ 503 for (i = 0; i < sched_ctx->nb_workers; i++) { 504 struct scheduler_worker *worker = &sched_ctx->workers[i]; 505 506 rte_cryptodev_sym_session_clear(worker->dev_id, sess); 507 } 508 } 509 510 static struct rte_cryptodev_ops scheduler_pmd_ops = { 511 .dev_configure = scheduler_pmd_config, 512 .dev_start = scheduler_pmd_start, 513 .dev_stop = scheduler_pmd_stop, 514 .dev_close = scheduler_pmd_close, 515 516 .stats_get = scheduler_pmd_stats_get, 517 .stats_reset = scheduler_pmd_stats_reset, 518 519 .dev_infos_get = scheduler_pmd_info_get, 520 521 .queue_pair_setup = scheduler_pmd_qp_setup, 522 .queue_pair_release = scheduler_pmd_qp_release, 523 524 .sym_session_get_size = scheduler_pmd_sym_session_get_size, 525 .sym_session_configure = scheduler_pmd_sym_session_configure, 526 .sym_session_clear = scheduler_pmd_sym_session_clear, 527 }; 528 529 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 530