1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 #include <string.h> 33 34 #include <rte_config.h> 35 #include <rte_common.h> 36 #include <rte_malloc.h> 37 #include <rte_dev.h> 38 #include <rte_cryptodev.h> 39 #include <rte_cryptodev_pmd.h> 40 #include <rte_reorder.h> 41 42 #include "scheduler_pmd_private.h" 43 44 /** Configure device */ 45 static int 46 scheduler_pmd_config(struct rte_cryptodev *dev) 47 { 48 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 49 uint32_t i; 50 int ret = 0; 51 52 for (i = 0; i < sched_ctx->nb_slaves; i++) { 53 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 54 struct rte_cryptodev *slave_dev = 55 rte_cryptodev_pmd_get_dev(slave_dev_id); 56 57 ret = (*slave_dev->dev_ops->dev_configure)(slave_dev); 58 if (ret < 0) 59 break; 60 } 61 62 return ret; 63 } 64 65 static int 66 update_reorder_buff(struct rte_cryptodev *dev, uint16_t qp_id) 67 { 68 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 69 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 70 71 if (sched_ctx->reordering_enabled) { 72 char reorder_buff_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 73 uint32_t buff_size = sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE; 74 75 if (qp_ctx->reorder_buf) { 76 rte_reorder_free(qp_ctx->reorder_buf); 77 qp_ctx->reorder_buf = NULL; 78 } 79 80 if (!buff_size) 81 return 0; 82 83 if (snprintf(reorder_buff_name, RTE_CRYPTODEV_NAME_MAX_LEN, 84 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), 85 dev->data->dev_id, qp_id) < 0) { 86 CS_LOG_ERR("failed to create unique reorder buffer " 87 "name"); 88 return -ENOMEM; 89 } 90 91 qp_ctx->reorder_buf = rte_reorder_create(reorder_buff_name, 92 rte_socket_id(), buff_size); 93 if (!qp_ctx->reorder_buf) { 94 CS_LOG_ERR("failed to create reorder buffer"); 95 return -ENOMEM; 96 } 97 } else { 98 if (qp_ctx->reorder_buf) { 99 rte_reorder_free(qp_ctx->reorder_buf); 100 qp_ctx->reorder_buf = NULL; 101 } 102 } 103 104 return 0; 105 } 106 107 /** Start device */ 108 static int 109 scheduler_pmd_start(struct rte_cryptodev *dev) 110 { 111 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 112 uint32_t i; 113 int ret; 114 115 if (dev->data->dev_started) 116 return 0; 117 118 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 119 ret = update_reorder_buff(dev, i); 120 if (ret < 0) { 121 CS_LOG_ERR("Failed to update reorder buffer"); 122 return ret; 123 } 124 } 125 126 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { 127 CS_LOG_ERR("Scheduler mode is not set"); 128 return -1; 129 } 130 131 if (!sched_ctx->nb_slaves) { 132 CS_LOG_ERR("No slave in the scheduler"); 133 return -1; 134 } 135 136 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); 137 138 for (i = 0; i < sched_ctx->nb_slaves; i++) { 139 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 140 141 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { 142 CS_LOG_ERR("Failed to attach slave"); 143 return -ENOTSUP; 144 } 145 } 146 147 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); 148 149 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { 150 CS_LOG_ERR("Scheduler start failed"); 151 return -1; 152 } 153 154 /* start all slaves */ 155 for (i = 0; i < sched_ctx->nb_slaves; i++) { 156 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 157 struct rte_cryptodev *slave_dev = 158 rte_cryptodev_pmd_get_dev(slave_dev_id); 159 160 ret = (*slave_dev->dev_ops->dev_start)(slave_dev); 161 if (ret < 0) { 162 CS_LOG_ERR("Failed to start slave dev %u", 163 slave_dev_id); 164 return ret; 165 } 166 } 167 168 return 0; 169 } 170 171 /** Stop device */ 172 static void 173 scheduler_pmd_stop(struct rte_cryptodev *dev) 174 { 175 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 176 uint32_t i; 177 178 if (!dev->data->dev_started) 179 return; 180 181 /* stop all slaves first */ 182 for (i = 0; i < sched_ctx->nb_slaves; i++) { 183 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 184 struct rte_cryptodev *slave_dev = 185 rte_cryptodev_pmd_get_dev(slave_dev_id); 186 187 (*slave_dev->dev_ops->dev_stop)(slave_dev); 188 } 189 190 if (*sched_ctx->ops.scheduler_stop) 191 (*sched_ctx->ops.scheduler_stop)(dev); 192 193 for (i = 0; i < sched_ctx->nb_slaves; i++) { 194 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 195 196 if (*sched_ctx->ops.slave_detach) 197 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id); 198 } 199 } 200 201 /** Close device */ 202 static int 203 scheduler_pmd_close(struct rte_cryptodev *dev) 204 { 205 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 206 uint32_t i; 207 int ret; 208 209 /* the dev should be stopped before being closed */ 210 if (dev->data->dev_started) 211 return -EBUSY; 212 213 /* close all slaves first */ 214 for (i = 0; i < sched_ctx->nb_slaves; i++) { 215 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 216 struct rte_cryptodev *slave_dev = 217 rte_cryptodev_pmd_get_dev(slave_dev_id); 218 219 ret = (*slave_dev->dev_ops->dev_close)(slave_dev); 220 if (ret < 0) 221 return ret; 222 } 223 224 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 225 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 226 227 if (qp_ctx->reorder_buf) { 228 rte_reorder_free(qp_ctx->reorder_buf); 229 qp_ctx->reorder_buf = NULL; 230 } 231 232 if (qp_ctx->private_qp_ctx) { 233 rte_free(qp_ctx->private_qp_ctx); 234 qp_ctx->private_qp_ctx = NULL; 235 } 236 } 237 238 if (sched_ctx->private_ctx) 239 rte_free(sched_ctx->private_ctx); 240 241 if (sched_ctx->capabilities) 242 rte_free(sched_ctx->capabilities); 243 244 return 0; 245 } 246 247 /** Get device statistics */ 248 static void 249 scheduler_pmd_stats_get(struct rte_cryptodev *dev, 250 struct rte_cryptodev_stats *stats) 251 { 252 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 253 uint32_t i; 254 255 for (i = 0; i < sched_ctx->nb_slaves; i++) { 256 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 257 struct rte_cryptodev *slave_dev = 258 rte_cryptodev_pmd_get_dev(slave_dev_id); 259 struct rte_cryptodev_stats slave_stats = {0}; 260 261 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats); 262 263 stats->enqueued_count += slave_stats.enqueued_count; 264 stats->dequeued_count += slave_stats.dequeued_count; 265 266 stats->enqueue_err_count += slave_stats.enqueue_err_count; 267 stats->dequeue_err_count += slave_stats.dequeue_err_count; 268 } 269 } 270 271 /** Reset device statistics */ 272 static void 273 scheduler_pmd_stats_reset(struct rte_cryptodev *dev) 274 { 275 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 276 uint32_t i; 277 278 for (i = 0; i < sched_ctx->nb_slaves; i++) { 279 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 280 struct rte_cryptodev *slave_dev = 281 rte_cryptodev_pmd_get_dev(slave_dev_id); 282 283 (*slave_dev->dev_ops->stats_reset)(slave_dev); 284 } 285 } 286 287 /** Get device info */ 288 static void 289 scheduler_pmd_info_get(struct rte_cryptodev *dev, 290 struct rte_cryptodev_info *dev_info) 291 { 292 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 293 uint32_t max_nb_sessions = sched_ctx->nb_slaves ? 294 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS; 295 uint32_t i; 296 297 if (!dev_info) 298 return; 299 300 for (i = 0; i < sched_ctx->nb_slaves; i++) { 301 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; 302 struct rte_cryptodev_info slave_info; 303 304 rte_cryptodev_info_get(slave_dev_id, &slave_info); 305 max_nb_sessions = slave_info.sym.max_nb_sessions < 306 max_nb_sessions ? 307 slave_info.sym.max_nb_sessions : 308 max_nb_sessions; 309 } 310 311 dev_info->dev_type = dev->dev_type; 312 dev_info->feature_flags = dev->feature_flags; 313 dev_info->capabilities = sched_ctx->capabilities; 314 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; 315 dev_info->sym.max_nb_sessions = max_nb_sessions; 316 } 317 318 /** Release queue pair */ 319 static int 320 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 321 { 322 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 323 324 if (!qp_ctx) 325 return 0; 326 327 if (qp_ctx->reorder_buf) 328 rte_reorder_free(qp_ctx->reorder_buf); 329 if (qp_ctx->private_qp_ctx) 330 rte_free(qp_ctx->private_qp_ctx); 331 332 rte_free(qp_ctx); 333 dev->data->queue_pairs[qp_id] = NULL; 334 335 return 0; 336 } 337 338 /** Setup a queue pair */ 339 static int 340 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 341 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 342 { 343 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 344 struct scheduler_qp_ctx *qp_ctx; 345 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 346 347 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 348 "CRYTO_SCHE PMD %u QP %u", 349 dev->data->dev_id, qp_id) < 0) { 350 CS_LOG_ERR("Failed to create unique queue pair name"); 351 return -EFAULT; 352 } 353 354 /* Free memory prior to re-allocation if needed. */ 355 if (dev->data->queue_pairs[qp_id] != NULL) 356 scheduler_pmd_qp_release(dev, qp_id); 357 358 /* Allocate the queue pair data structure. */ 359 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, 360 socket_id); 361 if (qp_ctx == NULL) 362 return -ENOMEM; 363 364 dev->data->queue_pairs[qp_id] = qp_ctx; 365 366 if (*sched_ctx->ops.config_queue_pair) { 367 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { 368 CS_LOG_ERR("Unable to configure queue pair"); 369 return -1; 370 } 371 } 372 373 return 0; 374 } 375 376 /** Start queue pair */ 377 static int 378 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, 379 __rte_unused uint16_t queue_pair_id) 380 { 381 return -ENOTSUP; 382 } 383 384 /** Stop queue pair */ 385 static int 386 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, 387 __rte_unused uint16_t queue_pair_id) 388 { 389 return -ENOTSUP; 390 } 391 392 /** Return the number of allocated queue pairs */ 393 static uint32_t 394 scheduler_pmd_qp_count(struct rte_cryptodev *dev) 395 { 396 return dev->data->nb_queue_pairs; 397 } 398 399 static uint32_t 400 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) 401 { 402 return sizeof(struct scheduler_session); 403 } 404 405 static int 406 config_slave_sess(struct scheduler_ctx *sched_ctx, 407 struct rte_crypto_sym_xform *xform, 408 struct scheduler_session *sess, 409 uint32_t create) 410 { 411 uint32_t i; 412 413 for (i = 0; i < sched_ctx->nb_slaves; i++) { 414 struct scheduler_slave *slave = &sched_ctx->slaves[i]; 415 struct rte_cryptodev *dev = 416 rte_cryptodev_pmd_get_dev(slave->dev_id); 417 418 if (sess->sessions[i]) { 419 if (create) 420 continue; 421 /* !create */ 422 (*dev->dev_ops->session_clear)(dev, 423 (void *)sess->sessions[i]); 424 sess->sessions[i] = NULL; 425 } else { 426 if (!create) 427 continue; 428 /* create */ 429 sess->sessions[i] = 430 rte_cryptodev_sym_session_create( 431 slave->dev_id, xform); 432 if (!sess->sessions[i]) { 433 config_slave_sess(sched_ctx, NULL, sess, 0); 434 return -1; 435 } 436 } 437 } 438 439 return 0; 440 } 441 442 /** Clear the memory of session so it doesn't leave key material behind */ 443 static void 444 scheduler_pmd_session_clear(struct rte_cryptodev *dev, 445 void *sess) 446 { 447 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 448 449 config_slave_sess(sched_ctx, NULL, sess, 0); 450 451 memset(sess, 0, sizeof(struct scheduler_session)); 452 } 453 454 static void * 455 scheduler_pmd_session_configure(struct rte_cryptodev *dev, 456 struct rte_crypto_sym_xform *xform, void *sess) 457 { 458 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 459 460 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) { 461 CS_LOG_ERR("unabled to config sym session"); 462 return NULL; 463 } 464 465 return sess; 466 } 467 468 struct rte_cryptodev_ops scheduler_pmd_ops = { 469 .dev_configure = scheduler_pmd_config, 470 .dev_start = scheduler_pmd_start, 471 .dev_stop = scheduler_pmd_stop, 472 .dev_close = scheduler_pmd_close, 473 474 .stats_get = scheduler_pmd_stats_get, 475 .stats_reset = scheduler_pmd_stats_reset, 476 477 .dev_infos_get = scheduler_pmd_info_get, 478 479 .queue_pair_setup = scheduler_pmd_qp_setup, 480 .queue_pair_release = scheduler_pmd_qp_release, 481 .queue_pair_start = scheduler_pmd_qp_start, 482 .queue_pair_stop = scheduler_pmd_qp_stop, 483 .queue_pair_count = scheduler_pmd_qp_count, 484 485 .session_get_size = scheduler_pmd_session_get_size, 486 .session_configure = scheduler_pmd_session_configure, 487 .session_clear = scheduler_pmd_session_clear, 488 }; 489 490 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; 491