1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_cryptodev_vdev.h> 41 #include <rte_reorder.h> 42 43 #include "scheduler_pmd_private.h" 44 45 /** attaching the slaves predefined by scheduler's EAL options */ 46 static int 47 scheduler_attach_init_slave(struct rte_cryptodev *dev) 48 { 49 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 50 uint8_t scheduler_id = dev->data->dev_id; 51 int i; 52 53 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { 54 const char *dev_name = sched_ctx->init_slave_names[i]; 55 struct rte_cryptodev *slave_dev = 56 rte_cryptodev_pmd_get_named_dev(dev_name); 57 int status; 58 59 if (!slave_dev) { 60 CS_LOG_ERR("Failed to locate slave dev %s", 61 dev_name); 62 return -EINVAL; 63 } 64 65 status = rte_cryptodev_scheduler_slave_attach( 66 scheduler_id, slave_dev->data->dev_id); 67 68 if (status < 0) { 69 CS_LOG_ERR("Failed to attach slave cryptodev %u", 70 slave_dev->data->dev_id); 71 return status; 72 } 73 74 CS_LOG_INFO("Scheduler %s attached slave %s\n", 75 dev->data->name, 76 sched_ctx->init_slave_names[i]); 77 78 rte_free(sched_ctx->init_slave_names[i]); 79 80 sched_ctx->nb_init_slaves -= 1; 81 } 82 83 return 0; 84 } 85 /** Configure device */ 86 static int 87 scheduler_pmd_config(struct rte_cryptodev *dev, 88 struct rte_cryptodev_config *config) 89 { 90 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 91 uint32_t i; 92 int ret; 93 94 /* although scheduler_attach_init_slave presents multiple times, 95 * there will be only 1 meaningful execution. 96 */ 97 ret = scheduler_attach_init_slave(dev); 98 if (ret < 0) 99 return ret; 100 101 for (i = 0; i < sched_ctx->nb_slaves; i++) { 102 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 103 104 ret = rte_cryptodev_configure(slave_dev_id, config, 105 dev->data->session_pool); 106 if (ret < 0) 107 break; 108 } 109 110 return ret; 111 } 112 113 static int 114 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 115 { 116 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 117 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 118 119 if (sched_ctx->reordering_enabled) { 120 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 121 uint32_t buff_size = rte_align32pow2( 122 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 123 124 if (qp_ctx->order_ring) { 125 rte_ring_free(qp_ctx->order_ring); 126 qp_ctx->order_ring = NULL; 127 } 128 129 if (!buff_size) 130 return 0; 131 132 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 133 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 134 dev->data->dev_id, qp_id) < 0) { 135 CS_LOG_ERR("failed to create unique reorder buffer " 136 "name"); 137 return -ENOMEM; 138 } 139 140 qp_ctx->order_ring = rte_ring_create(order_ring_name, 141 buff_size, rte_socket_id(), 142 RING_F_SP_ENQ | RING_F_SC_DEQ); 143 if (!qp_ctx->order_ring) { 144 CS_LOG_ERR("failed to create order ring"); 145 return -ENOMEM; 146 } 147 } else { 148 if (qp_ctx->order_ring) { 149 rte_ring_free(qp_ctx->order_ring); 150 qp_ctx->order_ring = NULL; 151 } 152 } 153 154 return 0; 155 } 156 157 /** Start device */ 158 static int 159 scheduler_pmd_start(struct rte_cryptodev *dev) 160 { 161 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 162 uint32_t i; 163 int ret; 164 165 if (dev->data->dev_started) 166 return 0; 167 168 /* although scheduler_attach_init_slave presents multiple times, 169 * there will be only 1 meaningful execution. 170 */ 171 ret = scheduler_attach_init_slave(dev); 172 if (ret < 0) 173 return ret; 174 175 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 176 ret = update_order_ring(dev, i); 177 if (ret < 0) { 178 CS_LOG_ERR("Failed to update reorder buffer"); 179 return ret; 180 } 181 } 182 183 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 184 CS_LOG_ERR("Scheduler mode is not set"); 185 return -1; 186 } 187 188 if (!sched_ctx->nb_slaves) { 189 CS_LOG_ERR("No slave in the scheduler"); 190 return -1; 191 } 192 193 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 194 195 for (i = 0; i < sched_ctx->nb_slaves; i++) { 196 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 197 198 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 199 CS_LOG_ERR("Failed to attach slave"); 200 return -ENOTSUP; 201 } 202 } 203 204 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 205 206 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 207 CS_LOG_ERR("Scheduler start failed"); 208 return -1; 209 } 210 211 /* start all slaves */ 212 for (i = 0; i < sched_ctx->nb_slaves; i++) { 213 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 214 struct rte_cryptodev *slave_dev = 215 rte_cryptodev_pmd_get_dev(slave_dev_id); 216 217 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 218 if (ret < 0) { 219 CS_LOG_ERR("Failed to start slave dev %u", 220 slave_dev_id); 221 return ret; 222 } 223 } 224 225 return 0; 226 } 227 228 /** Stop device */ 229 static void 230 scheduler_pmd_stop(struct rte_cryptodev *dev) 231 { 232 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 233 uint32_t i; 234 235 if (!dev->data->dev_started) 236 return; 237 238 /* stop all slaves first */ 239 for (i = 0; i < sched_ctx->nb_slaves; i++) { 240 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 241 struct rte_cryptodev *slave_dev = 242 rte_cryptodev_pmd_get_dev(slave_dev_id); 243 244 (*slave_dev->dev_ops->dev_stop)(slave_dev); 245 } 246 247 if (*sched_ctx->ops.scheduler_stop) 248 (*sched_ctx->ops.scheduler_stop)(dev); 249 250 for (i = 0; i < sched_ctx->nb_slaves; i++) { 251 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 252 253 if (*sched_ctx->ops.slave_detach) 254 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 255 } 256 } 257 258 /** Close device */ 259 static int 260 scheduler_pmd_close(struct rte_cryptodev *dev) 261 { 262 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 263 uint32_t i; 264 int ret; 265 266 /* the dev should be stopped before being closed */ 267 if (dev->data->dev_started) 268 return -EBUSY; 269 270 /* close all slaves first */ 271 for (i = 0; i < sched_ctx->nb_slaves; i++) { 272 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 273 struct rte_cryptodev *slave_dev = 274 rte_cryptodev_pmd_get_dev(slave_dev_id); 275 276 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 277 if (ret < 0) 278 return ret; 279 } 280 281 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 282 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 283 284 if (qp_ctx->order_ring) { 285 rte_ring_free(qp_ctx->order_ring); 286 qp_ctx->order_ring = NULL; 287 } 288 289 if (qp_ctx->private_qp_ctx) { 290 rte_free(qp_ctx->private_qp_ctx); 291 qp_ctx->private_qp_ctx = NULL; 292 } 293 } 294 295 if (sched_ctx->private_ctx) 296 rte_free(sched_ctx->private_ctx); 297 298 if (sched_ctx->capabilities) 299 rte_free(sched_ctx->capabilities); 300 301 return 0; 302 } 303 304 /** Get device statistics */ 305 static void 306 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 307 struct rte_cryptodev_stats *stats) 308 { 309 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 310 uint32_t i; 311 312 for (i = 0; i < sched_ctx->nb_slaves; i++) { 313 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 314 struct rte_cryptodev *slave_dev = 315 rte_cryptodev_pmd_get_dev(slave_dev_id); 316 struct rte_cryptodev_stats slave_stats = {0}; 317 318 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 319 320 stats->enqueued_count += slave_stats.enqueued_count; 321 stats->dequeued_count += slave_stats.dequeued_count; 322 323 stats->enqueue_err_count += slave_stats.enqueue_err_count; 324 stats->dequeue_err_count += slave_stats.dequeue_err_count; 325 } 326 } 327 328 /** Reset device statistics */ 329 static void 330 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 331 { 332 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 333 uint32_t i; 334 335 for (i = 0; i < sched_ctx->nb_slaves; i++) { 336 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 337 struct rte_cryptodev *slave_dev = 338 rte_cryptodev_pmd_get_dev(slave_dev_id); 339 340 (*slave_dev->dev_ops->stats_reset)(slave_dev); 341 } 342 } 343 344 /** Get device info */ 345 static void 346 scheduler_pmd_info_get(struct rte_cryptodev *dev, 347 struct rte_cryptodev_info *dev_info) 348 { 349 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 350 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 351 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 352 uint32_t i; 353 354 if (!dev_info) 355 return; 356 357 /* although scheduler_attach_init_slave presents multiple times, 358 * there will be only 1 meaningful execution. 359 */ 360 scheduler_attach_init_slave(dev); 361 362 for (i = 0; i < sched_ctx->nb_slaves; i++) { 363 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 364 struct rte_cryptodev_info slave_info; 365 366 rte_cryptodev_info_get(slave_dev_id, &slave_info); 367 max_nb_sessions = slave_info.sym.max_nb_sessions < 368 max_nb_sessions ? 369 slave_info.sym.max_nb_sessions : 370 max_nb_sessions; 371 } 372 373 dev_info->driver_id = dev->driver_id; 374 dev_info->feature_flags = dev->feature_flags; 375 dev_info->capabilities = sched_ctx->capabilities; 376 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 377 dev_info->sym.max_nb_sessions = max_nb_sessions; 378 } 379 380 /** Release queue pair */ 381 static int 382 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 383 { 384 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 385 386 if (!qp_ctx) 387 return 0; 388 389 if (qp_ctx->order_ring) 390 rte_ring_free(qp_ctx->order_ring); 391 if (qp_ctx->private_qp_ctx) 392 rte_free(qp_ctx->private_qp_ctx); 393 394 rte_free(qp_ctx); 395 dev->data->queue_pairs[qp_id] = NULL; 396 397 return 0; 398 } 399 400 /** Setup a queue pair */ 401 static int 402 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 403 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 404 { 405 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 406 struct scheduler_qp_ctx *qp_ctx; 407 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 408 uint32_t i; 409 int ret; 410 411 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 412 "CRYTO_SCHE PMD %u QP %u", 413 dev->data->dev_id, qp_id) < 0) { 414 CS_LOG_ERR("Failed to create unique queue pair name"); 415 return -EFAULT; 416 } 417 418 /* Free memory prior to re-allocation if needed. */ 419 if (dev->data->queue_pairs[qp_id] != NULL) 420 scheduler_pmd_qp_release(dev, qp_id); 421 422 for (i = 0; i < sched_ctx->nb_slaves; i++) { 423 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 424 425 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 426 qp_conf, socket_id); 427 if (ret < 0) 428 return ret; 429 } 430 431 /* Allocate the queue pair data structure. */ 432 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 433 socket_id); 434 if (qp_ctx == NULL) 435 return -ENOMEM; 436 437 /* The actual available object number = nb_descriptors - 1 */ 438 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 439 440 dev->data->queue_pairs[qp_id] = qp_ctx; 441 442 /* although scheduler_attach_init_slave presents multiple times, 443 * there will be only 1 meaningful execution. 444 */ 445 ret = scheduler_attach_init_slave(dev); 446 if (ret < 0) { 447 CS_LOG_ERR("Failed to attach slave"); 448 scheduler_pmd_qp_release(dev, qp_id); 449 return ret; 450 } 451 452 if (*sched_ctx->ops.config_queue_pair) { 453 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 454 CS_LOG_ERR("Unable to configure queue pair"); 455 return -1; 456 } 457 } 458 459 return 0; 460 } 461 462 /** Start queue pair */ 463 static int 464 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 465 __rte_unused uint16_t queue_pair_id) 466 { 467 return -ENOTSUP; 468 } 469 470 /** Stop queue pair */ 471 static int 472 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 473 __rte_unused uint16_t queue_pair_id) 474 { 475 return -ENOTSUP; 476 } 477 478 /** Return the number of allocated queue pairs */ 479 static uint32_t 480 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 481 { 482 return dev->data->nb_queue_pairs; 483 } 484 485 static uint32_t 486 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 487 { 488 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 489 uint8_t i = 0; 490 uint32_t max_priv_sess_size = 0; 491 492 /* Check what is the maximum private session size for all slaves */ 493 for (i = 0; i < sched_ctx->nb_slaves; i++) { 494 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 495 struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; 496 uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev); 497 498 if (max_priv_sess_size < priv_sess_size) 499 max_priv_sess_size = priv_sess_size; 500 } 501 502 return max_priv_sess_size; 503 } 504 505 static int 506 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 507 struct rte_crypto_sym_xform *xform, 508 struct rte_cryptodev_sym_session *sess, 509 struct rte_mempool *mempool) 510 { 511 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 512 uint32_t i; 513 514 for (i = 0; i < sched_ctx->nb_slaves; i++) { 515 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 516 517 if (rte_cryptodev_sym_session_init(slave->dev_id, sess, 518 xform, mempool) < 0) { 519 CS_LOG_ERR("unabled to config sym session"); 520 return -1; 521 } 522 } 523 524 return 0; 525 } 526 527 /** Clear the memory of session so it doesn't leave key material behind */ 528 static void 529 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 530 struct rte_cryptodev_sym_session *sess) 531 { 532 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 533 uint32_t i; 534 535 /* Clear private data of slaves */ 536 for (i = 0; i < sched_ctx->nb_slaves; i++) { 537 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 538 539 rte_cryptodev_sym_session_clear(slave->dev_id, sess); 540 } 541 } 542 543 struct rte_cryptodev_ops scheduler_pmd_ops = { 544 .dev_configure = scheduler_pmd_config, 545 .dev_start = scheduler_pmd_start, 546 .dev_stop = scheduler_pmd_stop, 547 .dev_close = scheduler_pmd_close, 548 549 .stats_get = scheduler_pmd_stats_get, 550 .stats_reset = scheduler_pmd_stats_reset, 551 552 .dev_infos_get = scheduler_pmd_info_get, 553 554 .queue_pair_setup = scheduler_pmd_qp_setup, 555 .queue_pair_release = scheduler_pmd_qp_release, 556 .queue_pair_start = scheduler_pmd_qp_start, 557 .queue_pair_stop = scheduler_pmd_qp_stop, 558 .queue_pair_count = scheduler_pmd_qp_count, 559 560 .session_get_size = scheduler_pmd_session_get_size, 561 .session_configure = scheduler_pmd_session_configure, 562 .session_clear = scheduler_pmd_session_clear, 563 }; 564 565 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 566