1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <rte_reorder.h> 33 #include <rte_cryptodev.h> 34 #include <rte_cryptodev_pmd.h> 35 #include <rte_malloc.h> 36 37 #include "rte_cryptodev_scheduler.h" 38 #include "scheduler_pmd_private.h" 39 40 /** update the scheduler pmd's capability with attaching device's 41 * capability. 42 * For each device to be attached, the scheduler's capability should be 43 * the common capability set of all slaves 44 **/ 45 static uint32_t 46 sync_caps(struct rte_cryptodev_capabilities *caps, 47 uint32_t nb_caps, 48 const struct rte_cryptodev_capabilities *slave_caps) 49 { 50 uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0; 51 uint32_t i; 52 53 while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED) 54 nb_slave_caps++; 55 56 if (nb_caps == 0) { 57 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps); 58 return nb_slave_caps; 59 } 60 61 for (i = 0; i < sync_nb_caps; i++) { 62 struct rte_cryptodev_capabilities *cap = &caps[i]; 63 uint32_t j; 64 65 for (j = 0; j < nb_slave_caps; j++) { 66 const struct rte_cryptodev_capabilities *s_cap = 67 &slave_caps[j]; 68 69 if (s_cap->op != cap->op || s_cap->sym.xform_type != 70 cap->sym.xform_type) 71 continue; 72 73 if (s_cap->sym.xform_type == 74 RTE_CRYPTO_SYM_XFORM_AUTH) { 75 if (s_cap->sym.auth.algo != 76 cap->sym.auth.algo) 77 continue; 78 79 cap->sym.auth.digest_size.min = 80 s_cap->sym.auth.digest_size.min < 81 cap->sym.auth.digest_size.min ? 82 s_cap->sym.auth.digest_size.min : 83 cap->sym.auth.digest_size.min; 84 cap->sym.auth.digest_size.max = 85 s_cap->sym.auth.digest_size.max < 86 cap->sym.auth.digest_size.max ? 87 s_cap->sym.auth.digest_size.max : 88 cap->sym.auth.digest_size.max; 89 90 } 91 92 if (s_cap->sym.xform_type == 93 RTE_CRYPTO_SYM_XFORM_CIPHER) 94 if (s_cap->sym.cipher.algo != 95 cap->sym.cipher.algo) 96 continue; 97 98 /* no common cap found */ 99 break; 100 } 101 102 if (j < nb_slave_caps) 103 continue; 104 105 /* remove a uncommon cap from the array */ 106 for (j = i; j < sync_nb_caps - 1; j++) 107 rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap)); 108 109 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap)); 110 sync_nb_caps--; 111 } 112 113 return sync_nb_caps; 114 } 115 116 static int 117 update_scheduler_capability(struct scheduler_ctx *sched_ctx) 118 { 119 struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; 120 uint32_t nb_caps = 0, i; 121 122 if (sched_ctx->capabilities) 123 rte_free(sched_ctx->capabilities); 124 125 for (i = 0; i < sched_ctx->nb_slaves; i++) { 126 struct rte_cryptodev_info dev_info; 127 128 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); 129 130 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities); 131 if (nb_caps == 0) 132 return -1; 133 } 134 135 sched_ctx->capabilities = rte_zmalloc_socket(NULL, 136 sizeof(struct rte_cryptodev_capabilities) * 137 (nb_caps + 1), 0, SOCKET_ID_ANY); 138 if (!sched_ctx->capabilities) 139 return -ENOMEM; 140 141 rte_memcpy(sched_ctx->capabilities, tmp_caps, 142 sizeof(struct rte_cryptodev_capabilities) * nb_caps); 143 144 return 0; 145 } 146 147 static void 148 update_scheduler_feature_flag(struct rte_cryptodev *dev) 149 { 150 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 151 uint32_t i; 152 153 dev->feature_flags = 0; 154 155 for (i = 0; i < sched_ctx->nb_slaves; i++) { 156 struct rte_cryptodev_info dev_info; 157 158 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); 159 160 dev->feature_flags |= dev_info.feature_flags; 161 } 162 } 163 164 static void 165 update_max_nb_qp(struct scheduler_ctx *sched_ctx) 166 { 167 uint32_t i; 168 uint32_t max_nb_qp; 169 170 if (!sched_ctx->nb_slaves) 171 return; 172 173 max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0; 174 175 for (i = 0; i < sched_ctx->nb_slaves; i++) { 176 struct rte_cryptodev_info dev_info; 177 178 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); 179 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ? 180 dev_info.max_nb_queue_pairs : max_nb_qp; 181 } 182 183 sched_ctx->max_nb_queue_pairs = max_nb_qp; 184 } 185 186 /** Attach a device to the scheduler. */ 187 int 188 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) 189 { 190 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 191 struct scheduler_ctx *sched_ctx; 192 struct scheduler_slave *slave; 193 struct rte_cryptodev_info dev_info; 194 uint32_t i; 195 196 if (!dev) { 197 CS_LOG_ERR("Operation not supported"); 198 return -ENOTSUP; 199 } 200 201 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 202 CS_LOG_ERR("Operation not supported"); 203 return -ENOTSUP; 204 } 205 206 if (dev->data->dev_started) { 207 CS_LOG_ERR("Illegal operation"); 208 return -EBUSY; 209 } 210 211 sched_ctx = dev->data->dev_private; 212 if (sched_ctx->nb_slaves >= 213 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { 214 CS_LOG_ERR("Too many slaves attached"); 215 return -ENOMEM; 216 } 217 218 for (i = 0; i < sched_ctx->nb_slaves; i++) 219 if (sched_ctx->slaves[i].dev_id == slave_id) { 220 CS_LOG_ERR("Slave already added"); 221 return -ENOTSUP; 222 } 223 224 slave = &sched_ctx->slaves[sched_ctx->nb_slaves]; 225 226 rte_cryptodev_info_get(slave_id, &dev_info); 227 228 slave->dev_id = slave_id; 229 slave->dev_type = dev_info.dev_type; 230 sched_ctx->nb_slaves++; 231 232 if (update_scheduler_capability(sched_ctx) < 0) { 233 slave->dev_id = 0; 234 slave->dev_type = 0; 235 sched_ctx->nb_slaves--; 236 237 CS_LOG_ERR("capabilities update failed"); 238 return -ENOTSUP; 239 } 240 241 update_scheduler_feature_flag(dev); 242 243 update_max_nb_qp(sched_ctx); 244 245 return 0; 246 } 247 248 int 249 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) 250 { 251 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 252 struct scheduler_ctx *sched_ctx; 253 uint32_t i, slave_pos; 254 255 if (!dev) { 256 CS_LOG_ERR("Operation not supported"); 257 return -ENOTSUP; 258 } 259 260 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 261 CS_LOG_ERR("Operation not supported"); 262 return -ENOTSUP; 263 } 264 265 if (dev->data->dev_started) { 266 CS_LOG_ERR("Illegal operation"); 267 return -EBUSY; 268 } 269 270 sched_ctx = dev->data->dev_private; 271 272 for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++) 273 if (sched_ctx->slaves[slave_pos].dev_id == slave_id) 274 break; 275 if (slave_pos == sched_ctx->nb_slaves) { 276 CS_LOG_ERR("Cannot find slave"); 277 return -ENOTSUP; 278 } 279 280 if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) { 281 CS_LOG_ERR("Failed to detach slave"); 282 return -ENOTSUP; 283 } 284 285 for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) { 286 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1], 287 sizeof(struct scheduler_slave)); 288 } 289 memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0, 290 sizeof(struct scheduler_slave)); 291 sched_ctx->nb_slaves--; 292 293 if (update_scheduler_capability(sched_ctx) < 0) { 294 CS_LOG_ERR("capabilities update failed"); 295 return -ENOTSUP; 296 } 297 298 update_scheduler_feature_flag(dev); 299 300 update_max_nb_qp(sched_ctx); 301 302 return 0; 303 } 304 305 int 306 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, 307 enum rte_cryptodev_scheduler_mode mode) 308 { 309 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 310 struct scheduler_ctx *sched_ctx; 311 312 if (!dev) { 313 CS_LOG_ERR("Operation not supported"); 314 return -ENOTSUP; 315 } 316 317 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 318 CS_LOG_ERR("Operation not supported"); 319 return -ENOTSUP; 320 } 321 322 if (dev->data->dev_started) { 323 CS_LOG_ERR("Illegal operation"); 324 return -EBUSY; 325 } 326 327 sched_ctx = dev->data->dev_private; 328 329 if (mode == sched_ctx->mode) 330 return 0; 331 332 switch (mode) { 333 case CDEV_SCHED_MODE_ROUNDROBIN: 334 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, 335 roundrobin_scheduler) < 0) { 336 CS_LOG_ERR("Failed to load scheduler"); 337 return -1; 338 } 339 break; 340 case CDEV_SCHED_MODE_PKT_SIZE_DISTR: 341 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, 342 pkt_size_based_distr_scheduler) < 0) { 343 CS_LOG_ERR("Failed to load scheduler"); 344 return -1; 345 } 346 break; 347 case CDEV_SCHED_MODE_FAILOVER: 348 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, 349 failover_scheduler) < 0) { 350 CS_LOG_ERR("Failed to load scheduler"); 351 return -1; 352 } 353 break; 354 default: 355 CS_LOG_ERR("Not yet supported"); 356 return -ENOTSUP; 357 } 358 359 return 0; 360 } 361 362 enum rte_cryptodev_scheduler_mode 363 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) 364 { 365 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 366 struct scheduler_ctx *sched_ctx; 367 368 if (!dev) { 369 CS_LOG_ERR("Operation not supported"); 370 return -ENOTSUP; 371 } 372 373 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 374 CS_LOG_ERR("Operation not supported"); 375 return -ENOTSUP; 376 } 377 378 sched_ctx = dev->data->dev_private; 379 380 return sched_ctx->mode; 381 } 382 383 int 384 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, 385 uint32_t enable_reorder) 386 { 387 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 388 struct scheduler_ctx *sched_ctx; 389 390 if (!dev) { 391 CS_LOG_ERR("Operation not supported"); 392 return -ENOTSUP; 393 } 394 395 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 396 CS_LOG_ERR("Operation not supported"); 397 return -ENOTSUP; 398 } 399 400 if (dev->data->dev_started) { 401 CS_LOG_ERR("Illegal operation"); 402 return -EBUSY; 403 } 404 405 sched_ctx = dev->data->dev_private; 406 407 sched_ctx->reordering_enabled = enable_reorder; 408 409 return 0; 410 } 411 412 int 413 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id) 414 { 415 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 416 struct scheduler_ctx *sched_ctx; 417 418 if (!dev) { 419 CS_LOG_ERR("Operation not supported"); 420 return -ENOTSUP; 421 } 422 423 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 424 CS_LOG_ERR("Operation not supported"); 425 return -ENOTSUP; 426 } 427 428 sched_ctx = dev->data->dev_private; 429 430 return (int)sched_ctx->reordering_enabled; 431 } 432 433 int 434 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, 435 struct rte_cryptodev_scheduler *scheduler) { 436 437 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 438 struct scheduler_ctx *sched_ctx; 439 440 if (!dev) { 441 CS_LOG_ERR("Operation not supported"); 442 return -ENOTSUP; 443 } 444 445 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 446 CS_LOG_ERR("Operation not supported"); 447 return -ENOTSUP; 448 } 449 450 if (dev->data->dev_started) { 451 CS_LOG_ERR("Illegal operation"); 452 return -EBUSY; 453 } 454 455 sched_ctx = dev->data->dev_private; 456 457 strncpy(sched_ctx->name, scheduler->name, 458 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); 459 strncpy(sched_ctx->description, scheduler->description, 460 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN); 461 462 /* load scheduler instance operations functions */ 463 sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair; 464 sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx; 465 sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start; 466 sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop; 467 sched_ctx->ops.slave_attach = scheduler->ops->slave_attach; 468 sched_ctx->ops.slave_detach = scheduler->ops->slave_detach; 469 sched_ctx->ops.option_set = scheduler->ops->option_set; 470 sched_ctx->ops.option_get = scheduler->ops->option_get; 471 472 if (sched_ctx->private_ctx) 473 rte_free(sched_ctx->private_ctx); 474 475 if (sched_ctx->ops.create_private_ctx) { 476 int ret = (*sched_ctx->ops.create_private_ctx)(dev); 477 478 if (ret < 0) { 479 CS_LOG_ERR("Unable to create scheduler private " 480 "context"); 481 return ret; 482 } 483 } 484 485 sched_ctx->mode = scheduler->mode; 486 487 return 0; 488 } 489 490 int 491 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves) 492 { 493 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 494 struct scheduler_ctx *sched_ctx; 495 uint32_t nb_slaves = 0; 496 497 if (!dev) { 498 CS_LOG_ERR("Operation not supported"); 499 return -ENOTSUP; 500 } 501 502 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 503 CS_LOG_ERR("Operation not supported"); 504 return -ENOTSUP; 505 } 506 507 sched_ctx = dev->data->dev_private; 508 509 nb_slaves = sched_ctx->nb_slaves; 510 511 if (slaves && nb_slaves) { 512 uint32_t i; 513 514 for (i = 0; i < nb_slaves; i++) 515 slaves[i] = sched_ctx->slaves[i].dev_id; 516 } 517 518 return (int)nb_slaves; 519 } 520 521 int 522 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id, 523 enum rte_cryptodev_schedule_option_type option_type, 524 void *option) 525 { 526 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 527 struct scheduler_ctx *sched_ctx; 528 529 if (option_type == CDEV_SCHED_OPTION_NOT_SET || 530 option_type >= CDEV_SCHED_OPTION_COUNT) { 531 CS_LOG_ERR("Invalid option parameter"); 532 return -EINVAL; 533 } 534 535 if (!option) { 536 CS_LOG_ERR("Invalid option parameter"); 537 return -EINVAL; 538 } 539 540 if (dev->data->dev_started) { 541 CS_LOG_ERR("Illegal operation"); 542 return -EBUSY; 543 } 544 545 sched_ctx = dev->data->dev_private; 546 547 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP); 548 549 return (*sched_ctx->ops.option_set)(dev, option_type, option); 550 } 551 552 int 553 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, 554 enum rte_cryptodev_schedule_option_type option_type, 555 void *option) 556 { 557 struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); 558 struct scheduler_ctx *sched_ctx; 559 560 if (!dev) { 561 CS_LOG_ERR("Operation not supported"); 562 return -ENOTSUP; 563 } 564 565 if (!option) { 566 CS_LOG_ERR("Invalid option parameter"); 567 return -EINVAL; 568 } 569 570 if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { 571 CS_LOG_ERR("Operation not supported"); 572 return -ENOTSUP; 573 } 574 575 sched_ctx = dev->data->dev_private; 576 577 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP); 578 579 return (*sched_ctx->ops.option_get)(dev, option_type, option); 580 } 581