1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_reorder.h> 41 42 #include "scheduler_pmd_private.h" 43 44 /** Configure device */ 45 static int 46 scheduler_pmd_config(struct rte_cryptodev *dev) 47 { 48 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 49 uint32_t i; 50 int ret = 0; 51 52 for (i = 0; i < sched_ctx->nb_slaves; i++) { 53 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 54 struct rte_cryptodev *slave_dev = 55 rte_cryptodev_pmd_get_dev(slave_dev_id); 56 57 ret = (*slave_dev->dev_ops->dev_configure)(slave_dev); 58 if (ret < 0) 59 break; 60 } 61 62 return ret; 63 } 64 65 static int 66 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) 67 { 68 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 69 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 70 71 if (sched_ctx->reordering_enabled) { 72 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 73 uint32_t buff_size = rte_align32pow2( 74 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); 75 76 if (qp_ctx->order_ring) { 77 rte_ring_free(qp_ctx->order_ring); 78 qp_ctx->order_ring = NULL; 79 } 80 81 if (!buff_size) 82 return 0; 83 84 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, 85 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 86 dev->data->dev_id, qp_id) < 0) { 87 CS_LOG_ERR("failed to create unique reorder buffer " 88 "name"); 89 return -ENOMEM; 90 } 91 92 qp_ctx->order_ring = rte_ring_create(order_ring_name, 93 buff_size, rte_socket_id(), 94 RING_F_SP_ENQ | RING_F_SC_DEQ); 95 if (!qp_ctx->order_ring) { 96 CS_LOG_ERR("failed to create order ring"); 97 return -ENOMEM; 98 } 99 } else { 100 if (qp_ctx->order_ring) { 101 rte_ring_free(qp_ctx->order_ring); 102 qp_ctx->order_ring = NULL; 103 } 104 } 105 106 return 0; 107 } 108 109 /** Start device */ 110 static int 111 scheduler_pmd_start(struct rte_cryptodev *dev) 112 { 113 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 114 uint32_t i; 115 int ret; 116 117 if (dev->data->dev_started) 118 return 0; 119 120 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 121 ret = update_order_ring(dev, i); 122 if (ret < 0) { 123 CS_LOG_ERR("Failed to update reorder buffer"); 124 return ret; 125 } 126 } 127 128 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 129 CS_LOG_ERR("Scheduler mode is not set"); 130 return -1; 131 } 132 133 if (!sched_ctx->nb_slaves) { 134 CS_LOG_ERR("No slave in the scheduler"); 135 return -1; 136 } 137 138 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 139 140 for (i = 0; i < sched_ctx->nb_slaves; i++) { 141 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 142 143 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 144 CS_LOG_ERR("Failed to attach slave"); 145 return -ENOTSUP; 146 } 147 } 148 149 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 150 151 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 152 CS_LOG_ERR("Scheduler start failed"); 153 return -1; 154 } 155 156 /* start all slaves */ 157 for (i = 0; i < sched_ctx->nb_slaves; i++) { 158 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 159 struct rte_cryptodev *slave_dev = 160 rte_cryptodev_pmd_get_dev(slave_dev_id); 161 162 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 163 if (ret < 0) { 164 CS_LOG_ERR("Failed to start slave dev %u", 165 slave_dev_id); 166 return ret; 167 } 168 } 169 170 return 0; 171 } 172 173 /** Stop device */ 174 static void 175 scheduler_pmd_stop(struct rte_cryptodev *dev) 176 { 177 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 178 uint32_t i; 179 180 if (!dev->data->dev_started) 181 return; 182 183 /* stop all slaves first */ 184 for (i = 0; i < sched_ctx->nb_slaves; i++) { 185 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 186 struct rte_cryptodev *slave_dev = 187 rte_cryptodev_pmd_get_dev(slave_dev_id); 188 189 (*slave_dev->dev_ops->dev_stop)(slave_dev); 190 } 191 192 if (*sched_ctx->ops.scheduler_stop) 193 (*sched_ctx->ops.scheduler_stop)(dev); 194 195 for (i = 0; i < sched_ctx->nb_slaves; i++) { 196 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 197 198 if (*sched_ctx->ops.slave_detach) 199 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 200 } 201 } 202 203 /** Close device */ 204 static int 205 scheduler_pmd_close(struct rte_cryptodev *dev) 206 { 207 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 208 uint32_t i; 209 int ret; 210 211 /* the dev should be stopped before being closed */ 212 if (dev->data->dev_started) 213 return -EBUSY; 214 215 /* close all slaves first */ 216 for (i = 0; i < sched_ctx->nb_slaves; i++) { 217 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 218 struct rte_cryptodev *slave_dev = 219 rte_cryptodev_pmd_get_dev(slave_dev_id); 220 221 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 222 if (ret < 0) 223 return ret; 224 } 225 226 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 227 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 228 229 if (qp_ctx->order_ring) { 230 rte_ring_free(qp_ctx->order_ring); 231 qp_ctx->order_ring = NULL; 232 } 233 234 if (qp_ctx->private_qp_ctx) { 235 rte_free(qp_ctx->private_qp_ctx); 236 qp_ctx->private_qp_ctx = NULL; 237 } 238 } 239 240 if (sched_ctx->private_ctx) 241 rte_free(sched_ctx->private_ctx); 242 243 if (sched_ctx->capabilities) 244 rte_free(sched_ctx->capabilities); 245 246 return 0; 247 } 248 249 /** Get device statistics */ 250 static void 251 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 252 struct rte_cryptodev_stats *stats) 253 { 254 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 255 uint32_t i; 256 257 for (i = 0; i < sched_ctx->nb_slaves; i++) { 258 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 259 struct rte_cryptodev *slave_dev = 260 rte_cryptodev_pmd_get_dev(slave_dev_id); 261 struct rte_cryptodev_stats slave_stats = {0}; 262 263 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 264 265 stats->enqueued_count += slave_stats.enqueued_count; 266 stats->dequeued_count += slave_stats.dequeued_count; 267 268 stats->enqueue_err_count += slave_stats.enqueue_err_count; 269 stats->dequeue_err_count += slave_stats.dequeue_err_count; 270 } 271 } 272 273 /** Reset device statistics */ 274 static void 275 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 276 { 277 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 278 uint32_t i; 279 280 for (i = 0; i < sched_ctx->nb_slaves; i++) { 281 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 282 struct rte_cryptodev *slave_dev = 283 rte_cryptodev_pmd_get_dev(slave_dev_id); 284 285 (*slave_dev->dev_ops->stats_reset)(slave_dev); 286 } 287 } 288 289 /** Get device info */ 290 static void 291 scheduler_pmd_info_get(struct rte_cryptodev *dev, 292 struct rte_cryptodev_info *dev_info) 293 { 294 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 295 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 296 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 297 uint32_t i; 298 299 if (!dev_info) 300 return; 301 302 for (i = 0; i < sched_ctx->nb_slaves; i++) { 303 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 304 struct rte_cryptodev_info slave_info; 305 306 rte_cryptodev_info_get(slave_dev_id, &slave_info); 307 max_nb_sessions = slave_info.sym.max_nb_sessions < 308 max_nb_sessions ? 309 slave_info.sym.max_nb_sessions : 310 max_nb_sessions; 311 } 312 313 dev_info->dev_type = dev->dev_type; 314 dev_info->feature_flags = dev->feature_flags; 315 dev_info->capabilities = sched_ctx->capabilities; 316 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 317 dev_info->sym.max_nb_sessions = max_nb_sessions; 318 } 319 320 /** Release queue pair */ 321 static int 322 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 323 { 324 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 325 326 if (!qp_ctx) 327 return 0; 328 329 if (qp_ctx->order_ring) 330 rte_ring_free(qp_ctx->order_ring); 331 if (qp_ctx->private_qp_ctx) 332 rte_free(qp_ctx->private_qp_ctx); 333 334 rte_free(qp_ctx); 335 dev->data->queue_pairs[qp_id] = NULL; 336 337 return 0; 338 } 339 340 /** Setup a queue pair */ 341 static int 342 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 343 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 344 { 345 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 346 struct scheduler_qp_ctx *qp_ctx; 347 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 348 349 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 350 "CRYTO_SCHE PMD %u QP %u", 351 dev->data->dev_id, qp_id) < 0) { 352 CS_LOG_ERR("Failed to create unique queue pair name"); 353 return -EFAULT; 354 } 355 356 /* Free memory prior to re-allocation if needed. */ 357 if (dev->data->queue_pairs[qp_id] != NULL) 358 scheduler_pmd_qp_release(dev, qp_id); 359 360 /* Allocate the queue pair data structure. */ 361 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 362 socket_id); 363 if (qp_ctx == NULL) 364 return -ENOMEM; 365 366 dev->data->queue_pairs[qp_id] = qp_ctx; 367 368 if (*sched_ctx->ops.config_queue_pair) { 369 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 370 CS_LOG_ERR("Unable to configure queue pair"); 371 return -1; 372 } 373 } 374 375 return 0; 376 } 377 378 /** Start queue pair */ 379 static int 380 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 381 __rte_unused uint16_t queue_pair_id) 382 { 383 return -ENOTSUP; 384 } 385 386 /** Stop queue pair */ 387 static int 388 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 389 __rte_unused uint16_t queue_pair_id) 390 { 391 return -ENOTSUP; 392 } 393 394 /** Return the number of allocated queue pairs */ 395 static uint32_t 396 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 397 { 398 return dev->data->nb_queue_pairs; 399 } 400 401 static uint32_t 402 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 403 { 404 return sizeof(struct scheduler_session); 405 } 406 407 static int 408 config_slave_sess(struct scheduler_ctx *sched_ctx, 409 struct rte_crypto_sym_xform *xform, 410 struct scheduler_session *sess, 411 uint32_t create) 412 { 413 uint32_t i; 414 415 for (i = 0; i < sched_ctx->nb_slaves; i++) { 416 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 417 struct rte_cryptodev *dev = 418 rte_cryptodev_pmd_get_dev(slave->dev_id); 419 420 if (sess->sessions[i]) { 421 if (create) 422 continue; 423 /* !create */ 424 (*dev->dev_ops->session_clear)(dev, 425 (void *)sess->sessions[i]); 426 sess->sessions[i] = NULL; 427 } else { 428 if (!create) 429 continue; 430 /* create */ 431 sess->sessions[i] = 432 rte_cryptodev_sym_session_create( 433 slave->dev_id, xform); 434 if (!sess->sessions[i]) { 435 config_slave_sess(sched_ctx, NULL, sess, 0); 436 return -1; 437 } 438 } 439 } 440 441 return 0; 442 } 443 444 /** Clear the memory of session so it doesn't leave key material behind */ 445 static void 446 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 447 void *sess) 448 { 449 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 450 451 config_slave_sess(sched_ctx, NULL, sess, 0); 452 453 memset(sess, 0, sizeof(struct scheduler_session)); 454 } 455 456 static void * 457 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 458 struct rte_crypto_sym_xform *xform, void *sess) 459 { 460 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 461 462 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { 463 CS_LOG_ERR("unabled to config sym session"); 464 return NULL; 465 } 466 467 return sess; 468 } 469 470 struct rte_cryptodev_ops scheduler_pmd_ops = { 471 .dev_configure = scheduler_pmd_config, 472 .dev_start = scheduler_pmd_start, 473 .dev_stop = scheduler_pmd_stop, 474 .dev_close = scheduler_pmd_close, 475 476 .stats_get = scheduler_pmd_stats_get, 477 .stats_reset = scheduler_pmd_stats_reset, 478 479 .dev_infos_get = scheduler_pmd_info_get, 480 481 .queue_pair_setup = scheduler_pmd_qp_setup, 482 .queue_pair_release = scheduler_pmd_qp_release, 483 .queue_pair_start = scheduler_pmd_qp_start, 484 .queue_pair_stop = scheduler_pmd_qp_stop, 485 .queue_pair_count = scheduler_pmd_qp_count, 486 487 .session_get_size = scheduler_pmd_session_get_size, 488 .session_configure = scheduler_pmd_session_configure, 489 .session_clear = scheduler_pmd_session_clear, 490 }; 491 492 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 493