1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 #include <string.h> 5 6 #include <rte_common.h> 7 #include <rte_malloc.h> 8 #include <rte_dev.h> 9 #include <rte_cryptodev.h> 10 #include <rte_cryptodev_pmd.h> 11 #include <rte_reorder.h> 12 13 #include "scheduler_pmd_private.h" 14 15 /** attaching the slaves predefined by scheduler's EAL options */ 16 static int 17 scheduler_attach_init_slave(struct rte_cryptodev *dev) 18 { 19 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 20 uint8_t scheduler_id = dev->data->dev_id; 21 int i; 22 23 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { 24 const char *dev_name = sched_ctx->init_slave_names[i]; 25 struct rte_cryptodev *slave_dev = 26 rte_cryptodev_pmd_get_named_dev(dev_name); 27 int status; 28 29 if (!slave_dev) { 30 CR_SCHED_LOG(ERR, "Failed to locate slave dev %s", 31 dev_name); 32 return -EINVAL; 33 } 34 35 status = rte_cryptodev_scheduler_slave_attach( 36 scheduler_id, slave_dev->data->dev_id); 37 38 if (status < 0) { 39 CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u", 40 slave_dev->data->dev_id); 41 return status; 42 } 43 44 CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s", 45 dev->data->name, 46 sched_ctx->init_slave_names[i]); 47 48 rte_free(sched_ctx->init_slave_names[i]); 49 sched_ctx->init_slave_names[i] = NULL; 50 51 sched_ctx->nb_init_slaves -= 1; 52 } 53 54 return 0; 55 } 56 /** Configure device */ 57 static int 58 scheduler_pmd_config(struct rte_cryptodev *dev, 59 struct rte_cryptodev_config *config) 60 { 61 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 62 uint32_t i; 63 int ret; 64 65 /* although scheduler_attach_init_slave presents multiple times, 66 * there will be only 1 meaningful execution. 67 */ 68 ret = scheduler_attach_init_slave(dev); 69 if (ret < 0) 70 return ret; 71 72 for (i = 0; i < sched_ctx->nb_slaves; i++) { 73 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 74 75 ret = rte_cryptodev_configure(slave_dev_id, config); 76 if (ret < 0) 77 break; 78 } 79 80 return ret; 81 } 82 83 static int 84 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 85 { 86 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 87 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 88 89 if (sched_ctx->reordering_enabled) { 90 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 91 uint32_t buff_size = rte_align32pow2( 92 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 93 94 if (qp_ctx->order_ring) { 95 rte_ring_free(qp_ctx->order_ring); 96 qp_ctx->order_ring = NULL; 97 } 98 99 if (!buff_size) 100 return 0; 101 102 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 103 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 104 dev->data->dev_id, qp_id) < 0) { 105 CR_SCHED_LOG(ERR, "failed to create unique reorder buffer" 106 "name"); 107 return -ENOMEM; 108 } 109 110 qp_ctx->order_ring = rte_ring_create(order_ring_name, 111 buff_size, rte_socket_id(), 112 RING_F_SP_ENQ | RING_F_SC_DEQ); 113 if (!qp_ctx->order_ring) { 114 CR_SCHED_LOG(ERR, "failed to create order ring"); 115 return -ENOMEM; 116 } 117 } else { 118 if (qp_ctx->order_ring) { 119 rte_ring_free(qp_ctx->order_ring); 120 qp_ctx->order_ring = NULL; 121 } 122 } 123 124 return 0; 125 } 126 127 /** Start device */ 128 static int 129 scheduler_pmd_start(struct rte_cryptodev *dev) 130 { 131 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 132 uint32_t i; 133 int ret; 134 135 if (dev->data->dev_started) 136 return 0; 137 138 /* although scheduler_attach_init_slave presents multiple times, 139 * there will be only 1 meaningful execution. 140 */ 141 ret = scheduler_attach_init_slave(dev); 142 if (ret < 0) 143 return ret; 144 145 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 146 ret = update_order_ring(dev, i); 147 if (ret < 0) { 148 CR_SCHED_LOG(ERR, "Failed to update reorder buffer"); 149 return ret; 150 } 151 } 152 153 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 154 CR_SCHED_LOG(ERR, "Scheduler mode is not set"); 155 return -1; 156 } 157 158 if (!sched_ctx->nb_slaves) { 159 CR_SCHED_LOG(ERR, "No slave in the scheduler"); 160 return -1; 161 } 162 163 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 164 165 for (i = 0; i < sched_ctx->nb_slaves; i++) { 166 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 167 168 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 169 CR_SCHED_LOG(ERR, "Failed to attach slave"); 170 return -ENOTSUP; 171 } 172 } 173 174 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 175 176 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 177 CR_SCHED_LOG(ERR, "Scheduler start failed"); 178 return -1; 179 } 180 181 /* start all slaves */ 182 for (i = 0; i < sched_ctx->nb_slaves; i++) { 183 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 184 struct rte_cryptodev *slave_dev = 185 rte_cryptodev_pmd_get_dev(slave_dev_id); 186 187 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 188 if (ret < 0) { 189 CR_SCHED_LOG(ERR, "Failed to start slave dev %u", 190 slave_dev_id); 191 return ret; 192 } 193 } 194 195 return 0; 196 } 197 198 /** Stop device */ 199 static void 200 scheduler_pmd_stop(struct rte_cryptodev *dev) 201 { 202 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 203 uint32_t i; 204 205 if (!dev->data->dev_started) 206 return; 207 208 /* stop all slaves first */ 209 for (i = 0; i < sched_ctx->nb_slaves; i++) { 210 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 211 struct rte_cryptodev *slave_dev = 212 rte_cryptodev_pmd_get_dev(slave_dev_id); 213 214 (*slave_dev->dev_ops->dev_stop)(slave_dev); 215 } 216 217 if (*sched_ctx->ops.scheduler_stop) 218 (*sched_ctx->ops.scheduler_stop)(dev); 219 220 for (i = 0; i < sched_ctx->nb_slaves; i++) { 221 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 222 223 if (*sched_ctx->ops.slave_detach) 224 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 225 } 226 } 227 228 /** Close device */ 229 static int 230 scheduler_pmd_close(struct rte_cryptodev *dev) 231 { 232 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 233 uint32_t i; 234 int ret; 235 236 /* the dev should be stopped before being closed */ 237 if (dev->data->dev_started) 238 return -EBUSY; 239 240 /* close all slaves first */ 241 for (i = 0; i < sched_ctx->nb_slaves; i++) { 242 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 243 struct rte_cryptodev *slave_dev = 244 rte_cryptodev_pmd_get_dev(slave_dev_id); 245 246 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 247 if (ret < 0) 248 return ret; 249 } 250 251 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 252 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 253 254 if (qp_ctx->order_ring) { 255 rte_ring_free(qp_ctx->order_ring); 256 qp_ctx->order_ring = NULL; 257 } 258 259 if (qp_ctx->private_qp_ctx) { 260 rte_free(qp_ctx->private_qp_ctx); 261 qp_ctx->private_qp_ctx = NULL; 262 } 263 } 264 265 if (sched_ctx->private_ctx) { 266 rte_free(sched_ctx->private_ctx); 267 sched_ctx->private_ctx = NULL; 268 } 269 270 if (sched_ctx->capabilities) { 271 rte_free(sched_ctx->capabilities); 272 sched_ctx->capabilities = NULL; 273 } 274 275 return 0; 276 } 277 278 /** Get device statistics */ 279 static void 280 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 281 struct rte_cryptodev_stats *stats) 282 { 283 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 284 uint32_t i; 285 286 for (i = 0; i < sched_ctx->nb_slaves; i++) { 287 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 288 struct rte_cryptodev *slave_dev = 289 rte_cryptodev_pmd_get_dev(slave_dev_id); 290 struct rte_cryptodev_stats slave_stats = {0}; 291 292 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 293 294 stats->enqueued_count += slave_stats.enqueued_count; 295 stats->dequeued_count += slave_stats.dequeued_count; 296 297 stats->enqueue_err_count += slave_stats.enqueue_err_count; 298 stats->dequeue_err_count += slave_stats.dequeue_err_count; 299 } 300 } 301 302 /** Reset device statistics */ 303 static void 304 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 305 { 306 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 307 uint32_t i; 308 309 for (i = 0; i < sched_ctx->nb_slaves; i++) { 310 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 311 struct rte_cryptodev *slave_dev = 312 rte_cryptodev_pmd_get_dev(slave_dev_id); 313 314 (*slave_dev->dev_ops->stats_reset)(slave_dev); 315 } 316 } 317 318 /** Get device info */ 319 static void 320 scheduler_pmd_info_get(struct rte_cryptodev *dev, 321 struct rte_cryptodev_info *dev_info) 322 { 323 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 324 uint32_t max_nb_sess = 0; 325 uint16_t headroom_sz = 0; 326 uint16_t tailroom_sz = 0; 327 uint32_t i; 328 329 if (!dev_info) 330 return; 331 332 /* although scheduler_attach_init_slave presents multiple times, 333 * there will be only 1 meaningful execution. 334 */ 335 scheduler_attach_init_slave(dev); 336 337 for (i = 0; i < sched_ctx->nb_slaves; i++) { 338 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 339 struct rte_cryptodev_info slave_info; 340 341 rte_cryptodev_info_get(slave_dev_id, &slave_info); 342 uint32_t dev_max_sess = slave_info.sym.max_nb_sessions; 343 if (dev_max_sess != 0) { 344 if (max_nb_sess == 0 || dev_max_sess < max_nb_sess) 345 max_nb_sess = slave_info.sym.max_nb_sessions; 346 } 347 348 /* Get the max headroom requirement among slave PMDs */ 349 headroom_sz = slave_info.min_mbuf_headroom_req > 350 headroom_sz ? 351 slave_info.min_mbuf_headroom_req : 352 headroom_sz; 353 354 /* Get the max tailroom requirement among slave PMDs */ 355 tailroom_sz = slave_info.min_mbuf_tailroom_req > 356 tailroom_sz ? 357 slave_info.min_mbuf_tailroom_req : 358 tailroom_sz; 359 } 360 361 dev_info->driver_id = dev->driver_id; 362 dev_info->feature_flags = dev->feature_flags; 363 dev_info->capabilities = sched_ctx->capabilities; 364 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 365 dev_info->min_mbuf_headroom_req = headroom_sz; 366 dev_info->min_mbuf_tailroom_req = tailroom_sz; 367 dev_info->sym.max_nb_sessions = max_nb_sess; 368 } 369 370 /** Release queue pair */ 371 static int 372 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 373 { 374 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 375 376 if (!qp_ctx) 377 return 0; 378 379 if (qp_ctx->order_ring) 380 rte_ring_free(qp_ctx->order_ring); 381 if (qp_ctx->private_qp_ctx) 382 rte_free(qp_ctx->private_qp_ctx); 383 384 rte_free(qp_ctx); 385 dev->data->queue_pairs[qp_id] = NULL; 386 387 return 0; 388 } 389 390 /** Setup a queue pair */ 391 static int 392 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 393 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, 394 struct rte_mempool *session_pool) 395 { 396 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 397 struct scheduler_qp_ctx *qp_ctx; 398 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 399 uint32_t i; 400 int ret; 401 402 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 403 "CRYTO_SCHE PMD %u QP %u", 404 dev->data->dev_id, qp_id) < 0) { 405 CR_SCHED_LOG(ERR, "Failed to create unique queue pair name"); 406 return -EFAULT; 407 } 408 409 /* Free memory prior to re-allocation if needed. */ 410 if (dev->data->queue_pairs[qp_id] != NULL) 411 scheduler_pmd_qp_release(dev, qp_id); 412 413 for (i = 0; i < sched_ctx->nb_slaves; i++) { 414 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 415 416 /* 417 * All slaves will share the same session mempool 418 * for session-less operations, so the objects 419 * must be big enough for all the drivers used. 420 */ 421 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 422 qp_conf, socket_id, session_pool); 423 if (ret < 0) 424 return ret; 425 } 426 427 /* Allocate the queue pair data structure. */ 428 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 429 socket_id); 430 if (qp_ctx == NULL) 431 return -ENOMEM; 432 433 /* The actual available object number = nb_descriptors - 1 */ 434 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 435 436 dev->data->queue_pairs[qp_id] = qp_ctx; 437 438 /* although scheduler_attach_init_slave presents multiple times, 439 * there will be only 1 meaningful execution. 440 */ 441 ret = scheduler_attach_init_slave(dev); 442 if (ret < 0) { 443 CR_SCHED_LOG(ERR, "Failed to attach slave"); 444 scheduler_pmd_qp_release(dev, qp_id); 445 return ret; 446 } 447 448 if (*sched_ctx->ops.config_queue_pair) { 449 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 450 CR_SCHED_LOG(ERR, "Unable to configure queue pair"); 451 return -1; 452 } 453 } 454 455 return 0; 456 } 457 458 /** Return the number of allocated queue pairs */ 459 static uint32_t 460 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 461 { 462 return dev->data->nb_queue_pairs; 463 } 464 465 static uint32_t 466 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 467 { 468 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 469 uint8_t i = 0; 470 uint32_t max_priv_sess_size = 0; 471 472 /* Check what is the maximum private session size for all slaves */ 473 for (i = 0; i < sched_ctx->nb_slaves; i++) { 474 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 475 struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; 476 uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 477 478 if (max_priv_sess_size < priv_sess_size) 479 max_priv_sess_size = priv_sess_size; 480 } 481 482 return max_priv_sess_size; 483 } 484 485 static int 486 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, 487 struct rte_crypto_sym_xform *xform, 488 struct rte_cryptodev_sym_session *sess, 489 struct rte_mempool *mempool) 490 { 491 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 492 uint32_t i; 493 int ret; 494 495 for (i = 0; i < sched_ctx->nb_slaves; i++) { 496 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 497 498 ret = rte_cryptodev_sym_session_init(slave->dev_id, sess, 499 xform, mempool); 500 if (ret < 0) { 501 CR_SCHED_LOG(ERR, "unable to config sym session"); 502 return ret; 503 } 504 } 505 506 return 0; 507 } 508 509 /** Clear the memory of session so it doesn't leave key material behind */ 510 static void 511 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, 512 struct rte_cryptodev_sym_session *sess) 513 { 514 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 515 uint32_t i; 516 517 /* Clear private data of slaves */ 518 for (i = 0; i < sched_ctx->nb_slaves; i++) { 519 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 520 521 rte_cryptodev_sym_session_clear(slave->dev_id, sess); 522 } 523 } 524 525 static struct rte_cryptodev_ops scheduler_pmd_ops = { 526 .dev_configure = scheduler_pmd_config, 527 .dev_start = scheduler_pmd_start, 528 .dev_stop = scheduler_pmd_stop, 529 .dev_close = scheduler_pmd_close, 530 531 .stats_get = scheduler_pmd_stats_get, 532 .stats_reset = scheduler_pmd_stats_reset, 533 534 .dev_infos_get = scheduler_pmd_info_get, 535 536 .queue_pair_setup = scheduler_pmd_qp_setup, 537 .queue_pair_release = scheduler_pmd_qp_release, 538 .queue_pair_count = scheduler_pmd_qp_count, 539 540 .sym_session_get_size = scheduler_pmd_sym_session_get_size, 541 .sym_session_configure = scheduler_pmd_sym_session_configure, 542 .sym_session_clear = scheduler_pmd_sym_session_clear, 543 }; 544 545 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 546