1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 #include <string.h> 5 6 #include <rte_common.h> 7 #include <rte_malloc.h> 8 #include <rte_dev.h> 9 #include <rte_cryptodev.h> 10 #include <rte_cryptodev_pmd.h> 11 #include <rte_reorder.h> 12 13 #include "scheduler_pmd_private.h" 14 15 /** attaching the slaves predefined by scheduler's EAL options */ 16 static int 17 scheduler_attach_init_slave(struct rte_cryptodev *dev) 18 { 19 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 20 uint8_t scheduler_id = dev->data->dev_id; 21 int i; 22 23 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { 24 const char *dev_name = sched_ctx->init_slave_names[i]; 25 struct rte_cryptodev *slave_dev = 26 rte_cryptodev_pmd_get_named_dev(dev_name); 27 int status; 28 29 if (!slave_dev) { 30 CS_LOG_ERR("Failed to locate slave dev %s", 31 dev_name); 32 return -EINVAL; 33 } 34 35 status = rte_cryptodev_scheduler_slave_attach( 36 scheduler_id, slave_dev->data->dev_id); 37 38 if (status < 0) { 39 CS_LOG_ERR("Failed to attach slave cryptodev %u", 40 slave_dev->data->dev_id); 41 return status; 42 } 43 44 CS_LOG_INFO("Scheduler %s attached slave %s\n", 45 dev->data->name, 46 sched_ctx->init_slave_names[i]); 47 48 rte_free(sched_ctx->init_slave_names[i]); 49 50 sched_ctx->nb_init_slaves -= 1; 51 } 52 53 return 0; 54 } 55 /** Configure device */ 56 static int 57 scheduler_pmd_config(struct rte_cryptodev *dev, 58 struct rte_cryptodev_config *config) 59 { 60 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 61 uint32_t i; 62 int ret; 63 64 /* although scheduler_attach_init_slave presents multiple times, 65 * there will be only 1 meaningful execution. 66 */ 67 ret = scheduler_attach_init_slave(dev); 68 if (ret < 0) 69 return ret; 70 71 for (i = 0; i < sched_ctx->nb_slaves; i++) { 72 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 73 74 ret = rte_cryptodev_configure(slave_dev_id, config); 75 if (ret < 0) 76 break; 77 } 78 79 return ret; 80 } 81 82 static int 83 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 84 { 85 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 86 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 87 88 if (sched_ctx->reordering_enabled) { 89 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 90 uint32_t buff_size = rte_align32pow2( 91 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 92 93 if (qp_ctx->order_ring) { 94 rte_ring_free(qp_ctx->order_ring); 95 qp_ctx->order_ring = NULL; 96 } 97 98 if (!buff_size) 99 return 0; 100 101 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 102 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 103 dev->data->dev_id, qp_id) < 0) { 104 CS_LOG_ERR("failed to create unique reorder buffer " 105 "name"); 106 return -ENOMEM; 107 } 108 109 qp_ctx->order_ring = rte_ring_create(order_ring_name, 110 buff_size, rte_socket_id(), 111 RING_F_SP_ENQ | RING_F_SC_DEQ); 112 if (!qp_ctx->order_ring) { 113 CS_LOG_ERR("failed to create order ring"); 114 return -ENOMEM; 115 } 116 } else { 117 if (qp_ctx->order_ring) { 118 rte_ring_free(qp_ctx->order_ring); 119 qp_ctx->order_ring = NULL; 120 } 121 } 122 123 return 0; 124 } 125 126 /** Start device */ 127 static int 128 scheduler_pmd_start(struct rte_cryptodev *dev) 129 { 130 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 131 uint32_t i; 132 int ret; 133 134 if (dev->data->dev_started) 135 return 0; 136 137 /* although scheduler_attach_init_slave presents multiple times, 138 * there will be only 1 meaningful execution. 139 */ 140 ret = scheduler_attach_init_slave(dev); 141 if (ret < 0) 142 return ret; 143 144 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 145 ret = update_order_ring(dev, i); 146 if (ret < 0) { 147 CS_LOG_ERR("Failed to update reorder buffer"); 148 return ret; 149 } 150 } 151 152 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 153 CS_LOG_ERR("Scheduler mode is not set"); 154 return -1; 155 } 156 157 if (!sched_ctx->nb_slaves) { 158 CS_LOG_ERR("No slave in the scheduler"); 159 return -1; 160 } 161 162 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 163 164 for (i = 0; i < sched_ctx->nb_slaves; i++) { 165 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 166 167 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 168 CS_LOG_ERR("Failed to attach slave"); 169 return -ENOTSUP; 170 } 171 } 172 173 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 174 175 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 176 CS_LOG_ERR("Scheduler start failed"); 177 return -1; 178 } 179 180 /* start all slaves */ 181 for (i = 0; i < sched_ctx->nb_slaves; i++) { 182 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 183 struct rte_cryptodev *slave_dev = 184 rte_cryptodev_pmd_get_dev(slave_dev_id); 185 186 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 187 if (ret < 0) { 188 CS_LOG_ERR("Failed to start slave dev %u", 189 slave_dev_id); 190 return ret; 191 } 192 } 193 194 return 0; 195 } 196 197 /** Stop device */ 198 static void 199 scheduler_pmd_stop(struct rte_cryptodev *dev) 200 { 201 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 202 uint32_t i; 203 204 if (!dev->data->dev_started) 205 return; 206 207 /* stop all slaves first */ 208 for (i = 0; i < sched_ctx->nb_slaves; i++) { 209 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 210 struct rte_cryptodev *slave_dev = 211 rte_cryptodev_pmd_get_dev(slave_dev_id); 212 213 (*slave_dev->dev_ops->dev_stop)(slave_dev); 214 } 215 216 if (*sched_ctx->ops.scheduler_stop) 217 (*sched_ctx->ops.scheduler_stop)(dev); 218 219 for (i = 0; i < sched_ctx->nb_slaves; i++) { 220 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 221 222 if (*sched_ctx->ops.slave_detach) 223 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 224 } 225 } 226 227 /** Close device */ 228 static int 229 scheduler_pmd_close(struct rte_cryptodev *dev) 230 { 231 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 232 uint32_t i; 233 int ret; 234 235 /* the dev should be stopped before being closed */ 236 if (dev->data->dev_started) 237 return -EBUSY; 238 239 /* close all slaves first */ 240 for (i = 0; i < sched_ctx->nb_slaves; i++) { 241 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 242 struct rte_cryptodev *slave_dev = 243 rte_cryptodev_pmd_get_dev(slave_dev_id); 244 245 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 246 if (ret < 0) 247 return ret; 248 } 249 250 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 251 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 252 253 if (qp_ctx->order_ring) { 254 rte_ring_free(qp_ctx->order_ring); 255 qp_ctx->order_ring = NULL; 256 } 257 258 if (qp_ctx->private_qp_ctx) { 259 rte_free(qp_ctx->private_qp_ctx); 260 qp_ctx->private_qp_ctx = NULL; 261 } 262 } 263 264 if (sched_ctx->private_ctx) 265 rte_free(sched_ctx->private_ctx); 266 267 if (sched_ctx->capabilities) 268 rte_free(sched_ctx->capabilities); 269 270 return 0; 271 } 272 273 /** Get device statistics */ 274 static void 275 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 276 struct rte_cryptodev_stats *stats) 277 { 278 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 279 uint32_t i; 280 281 for (i = 0; i < sched_ctx->nb_slaves; i++) { 282 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 283 struct rte_cryptodev *slave_dev = 284 rte_cryptodev_pmd_get_dev(slave_dev_id); 285 struct rte_cryptodev_stats slave_stats = {0}; 286 287 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 288 289 stats->enqueued_count += slave_stats.enqueued_count; 290 stats->dequeued_count += slave_stats.dequeued_count; 291 292 stats->enqueue_err_count += slave_stats.enqueue_err_count; 293 stats->dequeue_err_count += slave_stats.dequeue_err_count; 294 } 295 } 296 297 /** Reset device statistics */ 298 static void 299 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 300 { 301 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 302 uint32_t i; 303 304 for (i = 0; i < sched_ctx->nb_slaves; i++) { 305 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 306 struct rte_cryptodev *slave_dev = 307 rte_cryptodev_pmd_get_dev(slave_dev_id); 308 309 (*slave_dev->dev_ops->stats_reset)(slave_dev); 310 } 311 } 312 313 /** Get device info */ 314 static void 315 scheduler_pmd_info_get(struct rte_cryptodev *dev, 316 struct rte_cryptodev_info *dev_info) 317 { 318 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 319 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 320 UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS; 321 uint32_t i; 322 323 if (!dev_info) 324 return; 325 326 /* although scheduler_attach_init_slave presents multiple times, 327 * there will be only 1 meaningful execution. 328 */ 329 scheduler_attach_init_slave(dev); 330 331 for (i = 0; i < sched_ctx->nb_slaves; i++) { 332 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 333 struct rte_cryptodev_info slave_info; 334 335 rte_cryptodev_info_get(slave_dev_id, &slave_info); 336 max_nb_sessions = slave_info.sym.max_nb_sessions < 337 max_nb_sessions ? 338 slave_info.sym.max_nb_sessions : 339 max_nb_sessions; 340 } 341 342 dev_info->driver_id = dev->driver_id; 343 dev_info->feature_flags = dev->feature_flags; 344 dev_info->capabilities = sched_ctx->capabilities; 345 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 346 dev_info->sym.max_nb_sessions = max_nb_sessions; 347 } 348 349 /** Release queue pair */ 350 static int 351 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 352 { 353 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 354 355 if (!qp_ctx) 356 return 0; 357 358 if (qp_ctx->order_ring) 359 rte_ring_free(qp_ctx->order_ring); 360 if (qp_ctx->private_qp_ctx) 361 rte_free(qp_ctx->private_qp_ctx); 362 363 rte_free(qp_ctx); 364 dev->data->queue_pairs[qp_id] = NULL; 365 366 return 0; 367 } 368 369 /** Setup a queue pair */ 370 static int 371 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 372 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, 373 struct rte_mempool *session_pool) 374 { 375 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 376 struct scheduler_qp_ctx *qp_ctx; 377 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 378 uint32_t i; 379 int ret; 380 381 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 382 "CRYTO_SCHE PMD %u QP %u", 383 dev->data->dev_id, qp_id) < 0) { 384 CS_LOG_ERR("Failed to create unique queue pair name"); 385 return -EFAULT; 386 } 387 388 /* Free memory prior to re-allocation if needed. */ 389 if (dev->data->queue_pairs[qp_id] != NULL) 390 scheduler_pmd_qp_release(dev, qp_id); 391 392 for (i = 0; i < sched_ctx->nb_slaves; i++) { 393 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 394 395 /* 396 * All slaves will share the same session mempool 397 * for session-less operations, so the objects 398 * must be big enough for all the drivers used. 399 */ 400 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 401 qp_conf, socket_id, session_pool); 402 if (ret < 0) 403 return ret; 404 } 405 406 /* Allocate the queue pair data structure. */ 407 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 408 socket_id); 409 if (qp_ctx == NULL) 410 return -ENOMEM; 411 412 /* The actual available object number = nb_descriptors - 1 */ 413 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 414 415 dev->data->queue_pairs[qp_id] = qp_ctx; 416 417 /* although scheduler_attach_init_slave presents multiple times, 418 * there will be only 1 meaningful execution. 419 */ 420 ret = scheduler_attach_init_slave(dev); 421 if (ret < 0) { 422 CS_LOG_ERR("Failed to attach slave"); 423 scheduler_pmd_qp_release(dev, qp_id); 424 return ret; 425 } 426 427 if (*sched_ctx->ops.config_queue_pair) { 428 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 429 CS_LOG_ERR("Unable to configure queue pair"); 430 return -1; 431 } 432 } 433 434 return 0; 435 } 436 437 /** Start queue pair */ 438 static int 439 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 440 __rte_unused uint16_t queue_pair_id) 441 { 442 return -ENOTSUP; 443 } 444 445 /** Stop queue pair */ 446 static int 447 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 448 __rte_unused uint16_t queue_pair_id) 449 { 450 return -ENOTSUP; 451 } 452 453 /** Return the number of allocated queue pairs */ 454 static uint32_t 455 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 456 { 457 return dev->data->nb_queue_pairs; 458 } 459 460 static uint32_t 461 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 462 { 463 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 464 uint8_t i = 0; 465 uint32_t max_priv_sess_size = 0; 466 467 /* Check what is the maximum private session size for all slaves */ 468 for (i = 0; i < sched_ctx->nb_slaves; i++) { 469 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 470 struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; 471 uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev); 472 473 if (max_priv_sess_size < priv_sess_size) 474 max_priv_sess_size = priv_sess_size; 475 } 476 477 return max_priv_sess_size; 478 } 479 480 static int 481 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 482 struct rte_crypto_sym_xform *xform, 483 struct rte_cryptodev_sym_session *sess, 484 struct rte_mempool *mempool) 485 { 486 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 487 uint32_t i; 488 int ret; 489 490 for (i = 0; i < sched_ctx->nb_slaves; i++) { 491 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 492 493 ret = rte_cryptodev_sym_session_init(slave->dev_id, sess, 494 xform, mempool); 495 if (ret < 0) { 496 CS_LOG_ERR("unabled to config sym session"); 497 return ret; 498 } 499 } 500 501 return 0; 502 } 503 504 /** Clear the memory of session so it doesn't leave key material behind */ 505 static void 506 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 507 struct rte_cryptodev_sym_session *sess) 508 { 509 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 510 uint32_t i; 511 512 /* Clear private data of slaves */ 513 for (i = 0; i < sched_ctx->nb_slaves; i++) { 514 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 515 516 rte_cryptodev_sym_session_clear(slave->dev_id, sess); 517 } 518 } 519 520 struct rte_cryptodev_ops scheduler_pmd_ops = { 521 .dev_configure = scheduler_pmd_config, 522 .dev_start = scheduler_pmd_start, 523 .dev_stop = scheduler_pmd_stop, 524 .dev_close = scheduler_pmd_close, 525 526 .stats_get = scheduler_pmd_stats_get, 527 .stats_reset = scheduler_pmd_stats_reset, 528 529 .dev_infos_get = scheduler_pmd_info_get, 530 531 .queue_pair_setup = scheduler_pmd_qp_setup, 532 .queue_pair_release = scheduler_pmd_qp_release, 533 .queue_pair_start = scheduler_pmd_qp_start, 534 .queue_pair_stop = scheduler_pmd_qp_stop, 535 .queue_pair_count = scheduler_pmd_qp_count, 536 537 .session_get_size = scheduler_pmd_session_get_size, 538 .session_configure = scheduler_pmd_session_configure, 539 .session_clear = scheduler_pmd_session_clear, 540 }; 541 542 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 543