1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_cryptodev_vdev.h> 41 #include <rte_reorder.h> 42 43 #include "scheduler_pmd_private.h" 44 45 /** attaching the slaves predefined by scheduler's EAL options */ 46 static int 47 scheduler_attach_init_slave(struct rte_cryptodev *dev) 48 { 49 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 50 uint8_t scheduler_id = dev->data->dev_id; 51 int i; 52 53 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { 54 const char *dev_name = sched_ctx->init_slave_names[i]; 55 struct rte_cryptodev *slave_dev = 56 rte_cryptodev_pmd_get_named_dev(dev_name); 57 int status; 58 59 if (!slave_dev) { 60 CS_LOG_ERR("Failed to locate slave dev %s", 61 dev_name); 62 return -EINVAL; 63 } 64 65 status = rte_cryptodev_scheduler_slave_attach( 66 scheduler_id, slave_dev->data->dev_id); 67 68 if (status < 0) { 69 CS_LOG_ERR("Failed to attach slave cryptodev %u", 70 slave_dev->data->dev_id); 71 return status; 72 } 73 74 CS_LOG_INFO("Scheduler %s attached slave %s\n", 75 dev->data->name, 76 sched_ctx->init_slave_names[i]); 77 78 rte_free(sched_ctx->init_slave_names[i]); 79 80 sched_ctx->nb_init_slaves -= 1; 81 } 82 83 return 0; 84 } 85 /** Configure device */ 86 static int 87 scheduler_pmd_config(struct rte_cryptodev *dev, 88 struct rte_cryptodev_config *config) 89 { 90 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 91 uint32_t i; 92 int ret; 93 94 /* although scheduler_attach_init_slave presents multiple times, 95 * there will be only 1 meaningful execution. 96 */ 97 ret = scheduler_attach_init_slave(dev); 98 if (ret < 0) 99 return ret; 100 101 for (i = 0; i < sched_ctx->nb_slaves; i++) { 102 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 103 104 ret = rte_cryptodev_configure(slave_dev_id, config); 105 if (ret < 0) 106 break; 107 } 108 109 return ret; 110 } 111 112 static int 113 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 114 { 115 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 116 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 117 118 if (sched_ctx->reordering_enabled) { 119 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 120 uint32_t buff_size = rte_align32pow2( 121 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 122 123 if (qp_ctx->order_ring) { 124 rte_ring_free(qp_ctx->order_ring); 125 qp_ctx->order_ring = NULL; 126 } 127 128 if (!buff_size) 129 return 0; 130 131 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 132 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 133 dev->data->dev_id, qp_id) < 0) { 134 CS_LOG_ERR("failed to create unique reorder buffer " 135 "name"); 136 return -ENOMEM; 137 } 138 139 qp_ctx->order_ring = rte_ring_create(order_ring_name, 140 buff_size, rte_socket_id(), 141 RING_F_SP_ENQ | RING_F_SC_DEQ); 142 if (!qp_ctx->order_ring) { 143 CS_LOG_ERR("failed to create order ring"); 144 return -ENOMEM; 145 } 146 } else { 147 if (qp_ctx->order_ring) { 148 rte_ring_free(qp_ctx->order_ring); 149 qp_ctx->order_ring = NULL; 150 } 151 } 152 153 return 0; 154 } 155 156 /** Start device */ 157 static int 158 scheduler_pmd_start(struct rte_cryptodev *dev) 159 { 160 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 161 uint32_t i; 162 int ret; 163 164 if (dev->data->dev_started) 165 return 0; 166 167 /* although scheduler_attach_init_slave presents multiple times, 168 * there will be only 1 meaningful execution. 169 */ 170 ret = scheduler_attach_init_slave(dev); 171 if (ret < 0) 172 return ret; 173 174 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 175 ret = update_order_ring(dev, i); 176 if (ret < 0) { 177 CS_LOG_ERR("Failed to update reorder buffer"); 178 return ret; 179 } 180 } 181 182 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 183 CS_LOG_ERR("Scheduler mode is not set"); 184 return -1; 185 } 186 187 if (!sched_ctx->nb_slaves) { 188 CS_LOG_ERR("No slave in the scheduler"); 189 return -1; 190 } 191 192 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 193 194 for (i = 0; i < sched_ctx->nb_slaves; i++) { 195 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 196 197 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 198 CS_LOG_ERR("Failed to attach slave"); 199 return -ENOTSUP; 200 } 201 } 202 203 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 204 205 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 206 CS_LOG_ERR("Scheduler start failed"); 207 return -1; 208 } 209 210 /* start all slaves */ 211 for (i = 0; i < sched_ctx->nb_slaves; i++) { 212 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 213 struct rte_cryptodev *slave_dev = 214 rte_cryptodev_pmd_get_dev(slave_dev_id); 215 216 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 217 if (ret < 0) { 218 CS_LOG_ERR("Failed to start slave dev %u", 219 slave_dev_id); 220 return ret; 221 } 222 } 223 224 return 0; 225 } 226 227 /** Stop device */ 228 static void 229 scheduler_pmd_stop(struct rte_cryptodev *dev) 230 { 231 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 232 uint32_t i; 233 234 if (!dev->data->dev_started) 235 return; 236 237 /* stop all slaves first */ 238 for (i = 0; i < sched_ctx->nb_slaves; i++) { 239 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 240 struct rte_cryptodev *slave_dev = 241 rte_cryptodev_pmd_get_dev(slave_dev_id); 242 243 (*slave_dev->dev_ops->dev_stop)(slave_dev); 244 } 245 246 if (*sched_ctx->ops.scheduler_stop) 247 (*sched_ctx->ops.scheduler_stop)(dev); 248 249 for (i = 0; i < sched_ctx->nb_slaves; i++) { 250 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 251 252 if (*sched_ctx->ops.slave_detach) 253 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 254 } 255 } 256 257 /** Close device */ 258 static int 259 scheduler_pmd_close(struct rte_cryptodev *dev) 260 { 261 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 262 uint32_t i; 263 int ret; 264 265 /* the dev should be stopped before being closed */ 266 if (dev->data->dev_started) 267 return -EBUSY; 268 269 /* close all slaves first */ 270 for (i = 0; i < sched_ctx->nb_slaves; i++) { 271 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 272 struct rte_cryptodev *slave_dev = 273 rte_cryptodev_pmd_get_dev(slave_dev_id); 274 275 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 276 if (ret < 0) 277 return ret; 278 } 279 280 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 281 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 282 283 if (qp_ctx->order_ring) { 284 rte_ring_free(qp_ctx->order_ring); 285 qp_ctx->order_ring = NULL; 286 } 287 288 if (qp_ctx->private_qp_ctx) { 289 rte_free(qp_ctx->private_qp_ctx); 290 qp_ctx->private_qp_ctx = NULL; 291 } 292 } 293 294 if (sched_ctx->private_ctx) 295 rte_free(sched_ctx->private_ctx); 296 297 if (sched_ctx->capabilities) 298 rte_free(sched_ctx->capabilities); 299 300 return 0; 301 } 302 303 /** Get device statistics */ 304 static void 305 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 306 struct rte_cryptodev_stats *stats) 307 { 308 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 309 uint32_t i; 310 311 for (i = 0; i < sched_ctx->nb_slaves; i++) { 312 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 313 struct rte_cryptodev *slave_dev = 314 rte_cryptodev_pmd_get_dev(slave_dev_id); 315 struct rte_cryptodev_stats slave_stats = {0}; 316 317 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 318 319 stats->enqueued_count += slave_stats.enqueued_count; 320 stats->dequeued_count += slave_stats.dequeued_count; 321 322 stats->enqueue_err_count += slave_stats.enqueue_err_count; 323 stats->dequeue_err_count += slave_stats.dequeue_err_count; 324 } 325 } 326 327 /** Reset device statistics */ 328 static void 329 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 330 { 331 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 332 uint32_t i; 333 334 for (i = 0; i < sched_ctx->nb_slaves; i++) { 335 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 336 struct rte_cryptodev *slave_dev = 337 rte_cryptodev_pmd_get_dev(slave_dev_id); 338 339 (*slave_dev->dev_ops->stats_reset)(slave_dev); 340 } 341 } 342 343 /** Get device info */ 344 static void 345 scheduler_pmd_info_get(struct rte_cryptodev *dev, 346 struct rte_cryptodev_info *dev_info) 347 { 348 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 349 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 350 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 351 uint32_t i; 352 353 if (!dev_info) 354 return; 355 356 /* although scheduler_attach_init_slave presents multiple times, 357 * there will be only 1 meaningful execution. 358 */ 359 scheduler_attach_init_slave(dev); 360 361 for (i = 0; i < sched_ctx->nb_slaves; i++) { 362 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 363 struct rte_cryptodev_info slave_info; 364 365 rte_cryptodev_info_get(slave_dev_id, &slave_info); 366 max_nb_sessions = slave_info.sym.max_nb_sessions < 367 max_nb_sessions ? 368 slave_info.sym.max_nb_sessions : 369 max_nb_sessions; 370 } 371 372 dev_info->dev_type = dev->dev_type; 373 dev_info->feature_flags = dev->feature_flags; 374 dev_info->capabilities = sched_ctx->capabilities; 375 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 376 dev_info->sym.max_nb_sessions = max_nb_sessions; 377 } 378 379 /** Release queue pair */ 380 static int 381 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 382 { 383 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 384 385 if (!qp_ctx) 386 return 0; 387 388 if (qp_ctx->order_ring) 389 rte_ring_free(qp_ctx->order_ring); 390 if (qp_ctx->private_qp_ctx) 391 rte_free(qp_ctx->private_qp_ctx); 392 393 rte_free(qp_ctx); 394 dev->data->queue_pairs[qp_id] = NULL; 395 396 return 0; 397 } 398 399 /** Setup a queue pair */ 400 static int 401 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 402 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 403 { 404 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 405 struct scheduler_qp_ctx *qp_ctx; 406 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 407 uint32_t i; 408 int ret; 409 410 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 411 "CRYTO_SCHE PMD %u QP %u", 412 dev->data->dev_id, qp_id) < 0) { 413 CS_LOG_ERR("Failed to create unique queue pair name"); 414 return -EFAULT; 415 } 416 417 /* Free memory prior to re-allocation if needed. */ 418 if (dev->data->queue_pairs[qp_id] != NULL) 419 scheduler_pmd_qp_release(dev, qp_id); 420 421 for (i = 0; i < sched_ctx->nb_slaves; i++) { 422 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 423 424 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 425 qp_conf, socket_id); 426 if (ret < 0) 427 return ret; 428 } 429 430 /* Allocate the queue pair data structure. */ 431 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 432 socket_id); 433 if (qp_ctx == NULL) 434 return -ENOMEM; 435 436 /* The actual available object number = nb_descriptors - 1 */ 437 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 438 439 dev->data->queue_pairs[qp_id] = qp_ctx; 440 441 /* although scheduler_attach_init_slave presents multiple times, 442 * there will be only 1 meaningful execution. 443 */ 444 ret = scheduler_attach_init_slave(dev); 445 if (ret < 0) { 446 CS_LOG_ERR("Failed to attach slave"); 447 scheduler_pmd_qp_release(dev, qp_id); 448 return ret; 449 } 450 451 if (*sched_ctx->ops.config_queue_pair) { 452 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 453 CS_LOG_ERR("Unable to configure queue pair"); 454 return -1; 455 } 456 } 457 458 return 0; 459 } 460 461 /** Start queue pair */ 462 static int 463 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 464 __rte_unused uint16_t queue_pair_id) 465 { 466 return -ENOTSUP; 467 } 468 469 /** Stop queue pair */ 470 static int 471 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 472 __rte_unused uint16_t queue_pair_id) 473 { 474 return -ENOTSUP; 475 } 476 477 /** Return the number of allocated queue pairs */ 478 static uint32_t 479 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 480 { 481 return dev->data->nb_queue_pairs; 482 } 483 484 static uint32_t 485 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 486 { 487 return sizeof(struct scheduler_session); 488 } 489 490 static int 491 config_slave_sess(struct scheduler_ctx *sched_ctx, 492 struct rte_crypto_sym_xform *xform, 493 struct scheduler_session *sess, 494 uint32_t create) 495 { 496 uint32_t i; 497 498 for (i = 0; i < sched_ctx->nb_slaves; i++) { 499 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 500 501 if (sess->sessions[i]) { 502 if (create) 503 continue; 504 /* !create */ 505 sess->sessions[i] = rte_cryptodev_sym_session_free( 506 slave->dev_id, sess->sessions[i]); 507 } else { 508 if (!create) 509 continue; 510 /* create */ 511 sess->sessions[i] = 512 rte_cryptodev_sym_session_create( 513 slave->dev_id, xform); 514 if (!sess->sessions[i]) { 515 config_slave_sess(sched_ctx, NULL, sess, 0); 516 return -1; 517 } 518 } 519 } 520 521 return 0; 522 } 523 524 /** Clear the memory of session so it doesn't leave key material behind */ 525 static void 526 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 527 void *sess) 528 { 529 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 530 531 config_slave_sess(sched_ctx, NULL, sess, 0); 532 533 memset(sess, 0, sizeof(struct scheduler_session)); 534 } 535 536 static void * 537 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 538 struct rte_crypto_sym_xform *xform, void *sess) 539 { 540 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 541 542 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { 543 CS_LOG_ERR("unabled to config sym session"); 544 return NULL; 545 } 546 547 return sess; 548 } 549 550 struct rte_cryptodev_ops scheduler_pmd_ops = { 551 .dev_configure = scheduler_pmd_config, 552 .dev_start = scheduler_pmd_start, 553 .dev_stop = scheduler_pmd_stop, 554 .dev_close = scheduler_pmd_close, 555 556 .stats_get = scheduler_pmd_stats_get, 557 .stats_reset = scheduler_pmd_stats_reset, 558 559 .dev_infos_get = scheduler_pmd_info_get, 560 561 .queue_pair_setup = scheduler_pmd_qp_setup, 562 .queue_pair_release = scheduler_pmd_qp_release, 563 .queue_pair_start = scheduler_pmd_qp_start, 564 .queue_pair_stop = scheduler_pmd_qp_stop, 565 .queue_pair_count = scheduler_pmd_qp_count, 566 567 .session_get_size = scheduler_pmd_session_get_size, 568 .session_configure = scheduler_pmd_session_configure, 569 .session_clear = scheduler_pmd_session_clear, 570 }; 571 572 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 573