1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_reorder.h> 41 42 #include "scheduler_pmd_private.h" 43 44 /** attaching the slaves predefined by scheduler's EAL options */ 45 static int 46 scheduler_attach_init_slave(struct rte_cryptodev *dev) 47 { 48 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 49 uint8_t scheduler_id = dev->data->dev_id; 50 int i; 51 52 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { 53 const char *dev_name = sched_ctx->init_slave_names[i]; 54 struct rte_cryptodev *slave_dev = 55 rte_cryptodev_pmd_get_named_dev(dev_name); 56 int status; 57 58 if (!slave_dev) { 59 CS_LOG_ERR("Failed to locate slave dev %s", 60 dev_name); 61 return -EINVAL; 62 } 63 64 status = rte_cryptodev_scheduler_slave_attach( 65 scheduler_id, slave_dev->data->dev_id); 66 67 if (status < 0) { 68 CS_LOG_ERR("Failed to attach slave cryptodev %u", 69 slave_dev->data->dev_id); 70 return status; 71 } 72 73 CS_LOG_INFO("Scheduler %s attached slave %s\n", 74 dev->data->name, 75 sched_ctx->init_slave_names[i]); 76 77 rte_free(sched_ctx->init_slave_names[i]); 78 79 sched_ctx->nb_init_slaves -= 1; 80 } 81 82 return 0; 83 } 84 /** Configure device */ 85 static int 86 scheduler_pmd_config(struct rte_cryptodev *dev, 87 struct rte_cryptodev_config *config) 88 { 89 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 90 uint32_t i; 91 int ret; 92 93 /* although scheduler_attach_init_slave presents multiple times, 94 * there will be only 1 meaningful execution. 95 */ 96 ret = scheduler_attach_init_slave(dev); 97 if (ret < 0) 98 return ret; 99 100 for (i = 0; i < sched_ctx->nb_slaves; i++) { 101 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 102 103 ret = rte_cryptodev_configure(slave_dev_id, config); 104 if (ret < 0) 105 break; 106 } 107 108 return ret; 109 } 110 111 static int 112 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 113 { 114 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 115 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 116 117 if (sched_ctx->reordering_enabled) { 118 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 119 uint32_t buff_size = rte_align32pow2( 120 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 121 122 if (qp_ctx->order_ring) { 123 rte_ring_free(qp_ctx->order_ring); 124 qp_ctx->order_ring = NULL; 125 } 126 127 if (!buff_size) 128 return 0; 129 130 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 131 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 132 dev->data->dev_id, qp_id) < 0) { 133 CS_LOG_ERR("failed to create unique reorder buffer " 134 "name"); 135 return -ENOMEM; 136 } 137 138 qp_ctx->order_ring = rte_ring_create(order_ring_name, 139 buff_size, rte_socket_id(), 140 RING_F_SP_ENQ | RING_F_SC_DEQ); 141 if (!qp_ctx->order_ring) { 142 CS_LOG_ERR("failed to create order ring"); 143 return -ENOMEM; 144 } 145 } else { 146 if (qp_ctx->order_ring) { 147 rte_ring_free(qp_ctx->order_ring); 148 qp_ctx->order_ring = NULL; 149 } 150 } 151 152 return 0; 153 } 154 155 /** Start device */ 156 static int 157 scheduler_pmd_start(struct rte_cryptodev *dev) 158 { 159 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 160 uint32_t i; 161 int ret; 162 163 if (dev->data->dev_started) 164 return 0; 165 166 /* although scheduler_attach_init_slave presents multiple times, 167 * there will be only 1 meaningful execution. 168 */ 169 ret = scheduler_attach_init_slave(dev); 170 if (ret < 0) 171 return ret; 172 173 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 174 ret = update_order_ring(dev, i); 175 if (ret < 0) { 176 CS_LOG_ERR("Failed to update reorder buffer"); 177 return ret; 178 } 179 } 180 181 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 182 CS_LOG_ERR("Scheduler mode is not set"); 183 return -1; 184 } 185 186 if (!sched_ctx->nb_slaves) { 187 CS_LOG_ERR("No slave in the scheduler"); 188 return -1; 189 } 190 191 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 192 193 for (i = 0; i < sched_ctx->nb_slaves; i++) { 194 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 195 196 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 197 CS_LOG_ERR("Failed to attach slave"); 198 return -ENOTSUP; 199 } 200 } 201 202 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 203 204 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 205 CS_LOG_ERR("Scheduler start failed"); 206 return -1; 207 } 208 209 /* start all slaves */ 210 for (i = 0; i < sched_ctx->nb_slaves; i++) { 211 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 212 struct rte_cryptodev *slave_dev = 213 rte_cryptodev_pmd_get_dev(slave_dev_id); 214 215 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 216 if (ret < 0) { 217 CS_LOG_ERR("Failed to start slave dev %u", 218 slave_dev_id); 219 return ret; 220 } 221 } 222 223 return 0; 224 } 225 226 /** Stop device */ 227 static void 228 scheduler_pmd_stop(struct rte_cryptodev *dev) 229 { 230 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 231 uint32_t i; 232 233 if (!dev->data->dev_started) 234 return; 235 236 /* stop all slaves first */ 237 for (i = 0; i < sched_ctx->nb_slaves; i++) { 238 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 239 struct rte_cryptodev *slave_dev = 240 rte_cryptodev_pmd_get_dev(slave_dev_id); 241 242 (*slave_dev->dev_ops->dev_stop)(slave_dev); 243 } 244 245 if (*sched_ctx->ops.scheduler_stop) 246 (*sched_ctx->ops.scheduler_stop)(dev); 247 248 for (i = 0; i < sched_ctx->nb_slaves; i++) { 249 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 250 251 if (*sched_ctx->ops.slave_detach) 252 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 253 } 254 } 255 256 /** Close device */ 257 static int 258 scheduler_pmd_close(struct rte_cryptodev *dev) 259 { 260 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 261 uint32_t i; 262 int ret; 263 264 /* the dev should be stopped before being closed */ 265 if (dev->data->dev_started) 266 return -EBUSY; 267 268 /* close all slaves first */ 269 for (i = 0; i < sched_ctx->nb_slaves; i++) { 270 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 271 struct rte_cryptodev *slave_dev = 272 rte_cryptodev_pmd_get_dev(slave_dev_id); 273 274 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 275 if (ret < 0) 276 return ret; 277 } 278 279 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 280 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 281 282 if (qp_ctx->order_ring) { 283 rte_ring_free(qp_ctx->order_ring); 284 qp_ctx->order_ring = NULL; 285 } 286 287 if (qp_ctx->private_qp_ctx) { 288 rte_free(qp_ctx->private_qp_ctx); 289 qp_ctx->private_qp_ctx = NULL; 290 } 291 } 292 293 if (sched_ctx->private_ctx) 294 rte_free(sched_ctx->private_ctx); 295 296 if (sched_ctx->capabilities) 297 rte_free(sched_ctx->capabilities); 298 299 return 0; 300 } 301 302 /** Get device statistics */ 303 static void 304 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 305 struct rte_cryptodev_stats *stats) 306 { 307 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 308 uint32_t i; 309 310 for (i = 0; i < sched_ctx->nb_slaves; i++) { 311 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 312 struct rte_cryptodev *slave_dev = 313 rte_cryptodev_pmd_get_dev(slave_dev_id); 314 struct rte_cryptodev_stats slave_stats = {0}; 315 316 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 317 318 stats->enqueued_count += slave_stats.enqueued_count; 319 stats->dequeued_count += slave_stats.dequeued_count; 320 321 stats->enqueue_err_count += slave_stats.enqueue_err_count; 322 stats->dequeue_err_count += slave_stats.dequeue_err_count; 323 } 324 } 325 326 /** Reset device statistics */ 327 static void 328 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 329 { 330 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 331 uint32_t i; 332 333 for (i = 0; i < sched_ctx->nb_slaves; i++) { 334 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 335 struct rte_cryptodev *slave_dev = 336 rte_cryptodev_pmd_get_dev(slave_dev_id); 337 338 (*slave_dev->dev_ops->stats_reset)(slave_dev); 339 } 340 } 341 342 /** Get device info */ 343 static void 344 scheduler_pmd_info_get(struct rte_cryptodev *dev, 345 struct rte_cryptodev_info *dev_info) 346 { 347 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 348 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 349 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 350 uint32_t i; 351 352 if (!dev_info) 353 return; 354 355 /* although scheduler_attach_init_slave presents multiple times, 356 * there will be only 1 meaningful execution. 357 */ 358 scheduler_attach_init_slave(dev); 359 360 for (i = 0; i < sched_ctx->nb_slaves; i++) { 361 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 362 struct rte_cryptodev_info slave_info; 363 364 rte_cryptodev_info_get(slave_dev_id, &slave_info); 365 max_nb_sessions = slave_info.sym.max_nb_sessions < 366 max_nb_sessions ? 367 slave_info.sym.max_nb_sessions : 368 max_nb_sessions; 369 } 370 371 dev_info->dev_type = dev->dev_type; 372 dev_info->feature_flags = dev->feature_flags; 373 dev_info->capabilities = sched_ctx->capabilities; 374 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 375 dev_info->sym.max_nb_sessions = max_nb_sessions; 376 } 377 378 /** Release queue pair */ 379 static int 380 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 381 { 382 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 383 384 if (!qp_ctx) 385 return 0; 386 387 if (qp_ctx->order_ring) 388 rte_ring_free(qp_ctx->order_ring); 389 if (qp_ctx->private_qp_ctx) 390 rte_free(qp_ctx->private_qp_ctx); 391 392 rte_free(qp_ctx); 393 dev->data->queue_pairs[qp_id] = NULL; 394 395 return 0; 396 } 397 398 /** Setup a queue pair */ 399 static int 400 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 401 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 402 { 403 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 404 struct scheduler_qp_ctx *qp_ctx; 405 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 406 uint32_t i; 407 int ret; 408 409 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 410 "CRYTO_SCHE PMD %u QP %u", 411 dev->data->dev_id, qp_id) < 0) { 412 CS_LOG_ERR("Failed to create unique queue pair name"); 413 return -EFAULT; 414 } 415 416 /* Free memory prior to re-allocation if needed. */ 417 if (dev->data->queue_pairs[qp_id] != NULL) 418 scheduler_pmd_qp_release(dev, qp_id); 419 420 for (i = 0; i < sched_ctx->nb_slaves; i++) { 421 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 422 423 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 424 qp_conf, socket_id); 425 if (ret < 0) 426 return ret; 427 } 428 429 /* Allocate the queue pair data structure. */ 430 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 431 socket_id); 432 if (qp_ctx == NULL) 433 return -ENOMEM; 434 435 /* The actual available object number = nb_descriptors - 1 */ 436 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; 437 438 dev->data->queue_pairs[qp_id] = qp_ctx; 439 440 /* although scheduler_attach_init_slave presents multiple times, 441 * there will be only 1 meaningful execution. 442 */ 443 ret = scheduler_attach_init_slave(dev); 444 if (ret < 0) { 445 CS_LOG_ERR("Failed to attach slave"); 446 scheduler_pmd_qp_release(dev, qp_id); 447 return ret; 448 } 449 450 if (*sched_ctx->ops.config_queue_pair) { 451 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 452 CS_LOG_ERR("Unable to configure queue pair"); 453 return -1; 454 } 455 } 456 457 return 0; 458 } 459 460 /** Start queue pair */ 461 static int 462 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 463 __rte_unused uint16_t queue_pair_id) 464 { 465 return -ENOTSUP; 466 } 467 468 /** Stop queue pair */ 469 static int 470 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 471 __rte_unused uint16_t queue_pair_id) 472 { 473 return -ENOTSUP; 474 } 475 476 /** Return the number of allocated queue pairs */ 477 static uint32_t 478 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 479 { 480 return dev->data->nb_queue_pairs; 481 } 482 483 static uint32_t 484 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 485 { 486 return sizeof(struct scheduler_session); 487 } 488 489 static int 490 config_slave_sess(struct scheduler_ctx *sched_ctx, 491 struct rte_crypto_sym_xform *xform, 492 struct scheduler_session *sess, 493 uint32_t create) 494 { 495 uint32_t i; 496 497 for (i = 0; i < sched_ctx->nb_slaves; i++) { 498 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 499 500 if (sess->sessions[i]) { 501 if (create) 502 continue; 503 /* !create */ 504 sess->sessions[i] = rte_cryptodev_sym_session_free( 505 slave->dev_id, sess->sessions[i]); 506 } else { 507 if (!create) 508 continue; 509 /* create */ 510 sess->sessions[i] = 511 rte_cryptodev_sym_session_create( 512 slave->dev_id, xform); 513 if (!sess->sessions[i]) { 514 config_slave_sess(sched_ctx, NULL, sess, 0); 515 return -1; 516 } 517 } 518 } 519 520 return 0; 521 } 522 523 /** Clear the memory of session so it doesn't leave key material behind */ 524 static void 525 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 526 void *sess) 527 { 528 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 529 530 config_slave_sess(sched_ctx, NULL, sess, 0); 531 532 memset(sess, 0, sizeof(struct scheduler_session)); 533 } 534 535 static void * 536 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 537 struct rte_crypto_sym_xform *xform, void *sess) 538 { 539 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 540 541 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { 542 CS_LOG_ERR("unabled to config sym session"); 543 return NULL; 544 } 545 546 return sess; 547 } 548 549 struct rte_cryptodev_ops scheduler_pmd_ops = { 550 .dev_configure = scheduler_pmd_config, 551 .dev_start = scheduler_pmd_start, 552 .dev_stop = scheduler_pmd_stop, 553 .dev_close = scheduler_pmd_close, 554 555 .stats_get = scheduler_pmd_stats_get, 556 .stats_reset = scheduler_pmd_stats_reset, 557 558 .dev_infos_get = scheduler_pmd_info_get, 559 560 .queue_pair_setup = scheduler_pmd_qp_setup, 561 .queue_pair_release = scheduler_pmd_qp_release, 562 .queue_pair_start = scheduler_pmd_qp_start, 563 .queue_pair_stop = scheduler_pmd_qp_stop, 564 .queue_pair_count = scheduler_pmd_qp_count, 565 566 .session_get_size = scheduler_pmd_session_get_size, 567 .session_configure = scheduler_pmd_session_configure, 568 .session_clear = scheduler_pmd_session_clear, 569 }; 570 571 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 572