1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_reorder.h> 41 42 #include "scheduler_pmd_private.h" 43 44 /** Configure device */ 45 static int 46 scheduler_pmd_config(struct rte_cryptodev *dev, 47 struct rte_cryptodev_config *config) 48 { 49 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 50 uint32_t i; 51 int ret = 0; 52 53 for (i = 0; i < sched_ctx->nb_slaves; i++) { 54 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 55 56 ret = rte_cryptodev_configure(slave_dev_id, config); 57 if (ret < 0) 58 break; 59 } 60 61 return ret; 62 } 63 64 static int 65 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 66 { 67 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 68 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 69 70 if (sched_ctx->reordering_enabled) { 71 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 72 uint32_t buff_size = rte_align32pow2( 73 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 74 75 if (qp_ctx->order_ring) { 76 rte_ring_free(qp_ctx->order_ring); 77 qp_ctx->order_ring = NULL; 78 } 79 80 if (!buff_size) 81 return 0; 82 83 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 84 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 85 dev->data->dev_id, qp_id) < 0) { 86 CS_LOG_ERR("failed to create unique reorder buffer " 87 "name"); 88 return -ENOMEM; 89 } 90 91 qp_ctx->order_ring = rte_ring_create(order_ring_name, 92 buff_size, rte_socket_id(), 93 RING_F_SP_ENQ | RING_F_SC_DEQ); 94 if (!qp_ctx->order_ring) { 95 CS_LOG_ERR("failed to create order ring"); 96 return -ENOMEM; 97 } 98 } else { 99 if (qp_ctx->order_ring) { 100 rte_ring_free(qp_ctx->order_ring); 101 qp_ctx->order_ring = NULL; 102 } 103 } 104 105 return 0; 106 } 107 108 /** Start device */ 109 static int 110 scheduler_pmd_start(struct rte_cryptodev *dev) 111 { 112 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 113 uint32_t i; 114 int ret; 115 116 if (dev->data->dev_started) 117 return 0; 118 119 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 120 ret = update_order_ring(dev, i); 121 if (ret < 0) { 122 CS_LOG_ERR("Failed to update reorder buffer"); 123 return ret; 124 } 125 } 126 127 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 128 CS_LOG_ERR("Scheduler mode is not set"); 129 return -1; 130 } 131 132 if (!sched_ctx->nb_slaves) { 133 CS_LOG_ERR("No slave in the scheduler"); 134 return -1; 135 } 136 137 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 138 139 for (i = 0; i < sched_ctx->nb_slaves; i++) { 140 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 141 142 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 143 CS_LOG_ERR("Failed to attach slave"); 144 return -ENOTSUP; 145 } 146 } 147 148 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 149 150 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 151 CS_LOG_ERR("Scheduler start failed"); 152 return -1; 153 } 154 155 /* start all slaves */ 156 for (i = 0; i < sched_ctx->nb_slaves; i++) { 157 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 158 struct rte_cryptodev *slave_dev = 159 rte_cryptodev_pmd_get_dev(slave_dev_id); 160 161 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 162 if (ret < 0) { 163 CS_LOG_ERR("Failed to start slave dev %u", 164 slave_dev_id); 165 return ret; 166 } 167 } 168 169 return 0; 170 } 171 172 /** Stop device */ 173 static void 174 scheduler_pmd_stop(struct rte_cryptodev *dev) 175 { 176 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 177 uint32_t i; 178 179 if (!dev->data->dev_started) 180 return; 181 182 /* stop all slaves first */ 183 for (i = 0; i < sched_ctx->nb_slaves; i++) { 184 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 185 struct rte_cryptodev *slave_dev = 186 rte_cryptodev_pmd_get_dev(slave_dev_id); 187 188 (*slave_dev->dev_ops->dev_stop)(slave_dev); 189 } 190 191 if (*sched_ctx->ops.scheduler_stop) 192 (*sched_ctx->ops.scheduler_stop)(dev); 193 194 for (i = 0; i < sched_ctx->nb_slaves; i++) { 195 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 196 197 if (*sched_ctx->ops.slave_detach) 198 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 199 } 200 } 201 202 /** Close device */ 203 static int 204 scheduler_pmd_close(struct rte_cryptodev *dev) 205 { 206 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 207 uint32_t i; 208 int ret; 209 210 /* the dev should be stopped before being closed */ 211 if (dev->data->dev_started) 212 return -EBUSY; 213 214 /* close all slaves first */ 215 for (i = 0; i < sched_ctx->nb_slaves; i++) { 216 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 217 struct rte_cryptodev *slave_dev = 218 rte_cryptodev_pmd_get_dev(slave_dev_id); 219 220 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 221 if (ret < 0) 222 return ret; 223 } 224 225 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 226 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 227 228 if (qp_ctx->order_ring) { 229 rte_ring_free(qp_ctx->order_ring); 230 qp_ctx->order_ring = NULL; 231 } 232 233 if (qp_ctx->private_qp_ctx) { 234 rte_free(qp_ctx->private_qp_ctx); 235 qp_ctx->private_qp_ctx = NULL; 236 } 237 } 238 239 if (sched_ctx->private_ctx) 240 rte_free(sched_ctx->private_ctx); 241 242 if (sched_ctx->capabilities) 243 rte_free(sched_ctx->capabilities); 244 245 return 0; 246 } 247 248 /** Get device statistics */ 249 static void 250 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 251 struct rte_cryptodev_stats *stats) 252 { 253 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 254 uint32_t i; 255 256 for (i = 0; i < sched_ctx->nb_slaves; i++) { 257 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 258 struct rte_cryptodev *slave_dev = 259 rte_cryptodev_pmd_get_dev(slave_dev_id); 260 struct rte_cryptodev_stats slave_stats = {0}; 261 262 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 263 264 stats->enqueued_count += slave_stats.enqueued_count; 265 stats->dequeued_count += slave_stats.dequeued_count; 266 267 stats->enqueue_err_count += slave_stats.enqueue_err_count; 268 stats->dequeue_err_count += slave_stats.dequeue_err_count; 269 } 270 } 271 272 /** Reset device statistics */ 273 static void 274 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 275 { 276 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 277 uint32_t i; 278 279 for (i = 0; i < sched_ctx->nb_slaves; i++) { 280 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 281 struct rte_cryptodev *slave_dev = 282 rte_cryptodev_pmd_get_dev(slave_dev_id); 283 284 (*slave_dev->dev_ops->stats_reset)(slave_dev); 285 } 286 } 287 288 /** Get device info */ 289 static void 290 scheduler_pmd_info_get(struct rte_cryptodev *dev, 291 struct rte_cryptodev_info *dev_info) 292 { 293 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 294 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 295 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 296 uint32_t i; 297 298 if (!dev_info) 299 return; 300 301 for (i = 0; i < sched_ctx->nb_slaves; i++) { 302 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 303 struct rte_cryptodev_info slave_info; 304 305 rte_cryptodev_info_get(slave_dev_id, &slave_info); 306 max_nb_sessions = slave_info.sym.max_nb_sessions < 307 max_nb_sessions ? 308 slave_info.sym.max_nb_sessions : 309 max_nb_sessions; 310 } 311 312 dev_info->dev_type = dev->dev_type; 313 dev_info->feature_flags = dev->feature_flags; 314 dev_info->capabilities = sched_ctx->capabilities; 315 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 316 dev_info->sym.max_nb_sessions = max_nb_sessions; 317 } 318 319 /** Release queue pair */ 320 static int 321 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 322 { 323 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 324 325 if (!qp_ctx) 326 return 0; 327 328 if (qp_ctx->order_ring) 329 rte_ring_free(qp_ctx->order_ring); 330 if (qp_ctx->private_qp_ctx) 331 rte_free(qp_ctx->private_qp_ctx); 332 333 rte_free(qp_ctx); 334 dev->data->queue_pairs[qp_id] = NULL; 335 336 return 0; 337 } 338 339 /** Setup a queue pair */ 340 static int 341 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 342 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 343 { 344 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 345 struct scheduler_qp_ctx *qp_ctx; 346 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 347 uint32_t i; 348 int ret; 349 350 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 351 "CRYTO_SCHE PMD %u QP %u", 352 dev->data->dev_id, qp_id) < 0) { 353 CS_LOG_ERR("Failed to create unique queue pair name"); 354 return -EFAULT; 355 } 356 357 /* Free memory prior to re-allocation if needed. */ 358 if (dev->data->queue_pairs[qp_id] != NULL) 359 scheduler_pmd_qp_release(dev, qp_id); 360 361 for (i = 0; i < sched_ctx->nb_slaves; i++) { 362 uint8_t slave_id = sched_ctx->slaves[i].dev_id; 363 364 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, 365 qp_conf, socket_id); 366 if (ret < 0) 367 return ret; 368 } 369 370 sched_ctx->qp_conf.nb_descriptors = qp_conf->nb_descriptors; 371 372 /* Allocate the queue pair data structure. */ 373 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 374 socket_id); 375 if (qp_ctx == NULL) 376 return -ENOMEM; 377 378 dev->data->queue_pairs[qp_id] = qp_ctx; 379 380 if (*sched_ctx->ops.config_queue_pair) { 381 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 382 CS_LOG_ERR("Unable to configure queue pair"); 383 return -1; 384 } 385 } 386 387 return 0; 388 } 389 390 /** Start queue pair */ 391 static int 392 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 393 __rte_unused uint16_t queue_pair_id) 394 { 395 return -ENOTSUP; 396 } 397 398 /** Stop queue pair */ 399 static int 400 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 401 __rte_unused uint16_t queue_pair_id) 402 { 403 return -ENOTSUP; 404 } 405 406 /** Return the number of allocated queue pairs */ 407 static uint32_t 408 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 409 { 410 return dev->data->nb_queue_pairs; 411 } 412 413 static uint32_t 414 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 415 { 416 return sizeof(struct scheduler_session); 417 } 418 419 static int 420 config_slave_sess(struct scheduler_ctx *sched_ctx, 421 struct rte_crypto_sym_xform *xform, 422 struct scheduler_session *sess, 423 uint32_t create) 424 { 425 uint32_t i; 426 427 for (i = 0; i < sched_ctx->nb_slaves; i++) { 428 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 429 430 if (sess->sessions[i]) { 431 if (create) 432 continue; 433 /* !create */ 434 sess->sessions[i] = rte_cryptodev_sym_session_free( 435 slave->dev_id, sess->sessions[i]); 436 } else { 437 if (!create) 438 continue; 439 /* create */ 440 sess->sessions[i] = 441 rte_cryptodev_sym_session_create( 442 slave->dev_id, xform); 443 if (!sess->sessions[i]) { 444 config_slave_sess(sched_ctx, NULL, sess, 0); 445 return -1; 446 } 447 } 448 } 449 450 return 0; 451 } 452 453 /** Clear the memory of session so it doesn't leave key material behind */ 454 static void 455 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 456 void *sess) 457 { 458 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 459 460 config_slave_sess(sched_ctx, NULL, sess, 0); 461 462 memset(sess, 0, sizeof(struct scheduler_session)); 463 } 464 465 static void * 466 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 467 struct rte_crypto_sym_xform *xform, void *sess) 468 { 469 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 470 471 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { 472 CS_LOG_ERR("unabled to config sym session"); 473 return NULL; 474 } 475 476 return sess; 477 } 478 479 struct rte_cryptodev_ops scheduler_pmd_ops = { 480 .dev_configure = scheduler_pmd_config, 481 .dev_start = scheduler_pmd_start, 482 .dev_stop = scheduler_pmd_stop, 483 .dev_close = scheduler_pmd_close, 484 485 .stats_get = scheduler_pmd_stats_get, 486 .stats_reset = scheduler_pmd_stats_reset, 487 488 .dev_infos_get = scheduler_pmd_info_get, 489 490 .queue_pair_setup = scheduler_pmd_qp_setup, 491 .queue_pair_release = scheduler_pmd_qp_release, 492 .queue_pair_start = scheduler_pmd_qp_start, 493 .queue_pair_stop = scheduler_pmd_qp_stop, 494 .queue_pair_count = scheduler_pmd_qp_count, 495 496 .session_get_size = scheduler_pmd_session_get_size, 497 .session_configure = scheduler_pmd_session_configure, 498 .session_clear = scheduler_pmd_session_clear, 499 }; 500 501 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 502