1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 #include <rte_common.h> 5 #include <rte_hexdump.h> 6 #include <rte_cryptodev.h> 7 #include <rte_cryptodev_pmd.h> 8 #include <rte_bus_vdev.h> 9 #include <rte_malloc.h> 10 #include <rte_cpuflags.h> 11 #include <rte_reorder.h> 12 13 #include "rte_cryptodev_scheduler.h" 14 #include "scheduler_pmd_private.h" 15 16 uint8_t cryptodev_driver_id; 17 18 struct scheduler_init_params { 19 struct rte_cryptodev_pmd_init_params def_p; 20 uint32_t nb_slaves; 21 enum rte_cryptodev_scheduler_mode mode; 22 uint32_t enable_ordering; 23 uint64_t wcmask; 24 char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES] 25 [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; 26 }; 27 28 #define RTE_CRYPTODEV_VDEV_NAME ("name") 29 #define RTE_CRYPTODEV_VDEV_SLAVE ("slave") 30 #define RTE_CRYPTODEV_VDEV_MODE ("mode") 31 #define RTE_CRYPTODEV_VDEV_ORDERING ("ordering") 32 #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") 33 #define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") 34 #define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") 35 #define RTE_CRYPTODEV_VDEV_COREMASK ("coremask") 36 #define RTE_CRYPTODEV_VDEV_CORELIST ("corelist") 37 38 const char *scheduler_valid_params[] = { 39 RTE_CRYPTODEV_VDEV_NAME, 40 RTE_CRYPTODEV_VDEV_SLAVE, 41 RTE_CRYPTODEV_VDEV_MODE, 42 RTE_CRYPTODEV_VDEV_ORDERING, 43 RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, 44 RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, 45 RTE_CRYPTODEV_VDEV_SOCKET_ID, 46 RTE_CRYPTODEV_VDEV_COREMASK, 47 RTE_CRYPTODEV_VDEV_CORELIST 48 }; 49 50 struct scheduler_parse_map { 51 const char *name; 52 uint32_t val; 53 }; 54 55 const struct scheduler_parse_map scheduler_mode_map[] = { 56 {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN), 57 CDEV_SCHED_MODE_ROUNDROBIN}, 58 {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR), 59 CDEV_SCHED_MODE_PKT_SIZE_DISTR}, 60 {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER), 61 CDEV_SCHED_MODE_FAILOVER}, 62 {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE), 63 CDEV_SCHED_MODE_MULTICORE} 64 }; 65 66 const struct scheduler_parse_map scheduler_ordering_map[] = { 67 {"enable", 1}, 68 {"disable", 0} 69 }; 70 71 static int 72 cryptodev_scheduler_create(const char *name, 73 struct rte_vdev_device *vdev, 74 struct scheduler_init_params *init_params) 75 { 76 struct rte_cryptodev *dev; 77 struct scheduler_ctx *sched_ctx; 78 uint32_t i; 79 int ret; 80 81 dev = rte_cryptodev_pmd_create(name, &vdev->device, 82 &init_params->def_p); 83 if (dev == NULL) { 84 CS_LOG_ERR("driver %s: failed to create cryptodev vdev", 85 name); 86 return -EFAULT; 87 } 88 89 if (init_params->wcmask != 0) 90 RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n", 91 init_params->wcmask); 92 93 dev->driver_id = cryptodev_driver_id; 94 dev->dev_ops = rte_crypto_scheduler_pmd_ops; 95 96 sched_ctx = dev->data->dev_private; 97 sched_ctx->max_nb_queue_pairs = 98 init_params->def_p.max_nb_queue_pairs; 99 100 if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) { 101 uint16_t i; 102 103 sched_ctx->nb_wc = 0; 104 105 for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) { 106 if (init_params->wcmask & (1ULL << i)) { 107 sched_ctx->wc_pool[sched_ctx->nb_wc++] = i; 108 RTE_LOG(INFO, PMD, 109 " Worker core[%u]=%u added\n", 110 sched_ctx->nb_wc-1, i); 111 } 112 } 113 } 114 115 if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED && 116 init_params->mode < CDEV_SCHED_MODE_COUNT) { 117 ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id, 118 init_params->mode); 119 if (ret < 0) { 120 rte_cryptodev_pmd_release_device(dev); 121 return ret; 122 } 123 124 for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) { 125 if (scheduler_mode_map[i].val != sched_ctx->mode) 126 continue; 127 128 RTE_LOG(INFO, PMD, " Scheduling mode = %s\n", 129 scheduler_mode_map[i].name); 130 break; 131 } 132 } 133 134 sched_ctx->reordering_enabled = init_params->enable_ordering; 135 136 for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) { 137 if (scheduler_ordering_map[i].val != 138 sched_ctx->reordering_enabled) 139 continue; 140 141 RTE_LOG(INFO, PMD, " Packet ordering = %s\n", 142 scheduler_ordering_map[i].name); 143 144 break; 145 } 146 147 for (i = 0; i < init_params->nb_slaves; i++) { 148 sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] = 149 rte_zmalloc_socket( 150 NULL, 151 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0, 152 SOCKET_ID_ANY); 153 154 if (!sched_ctx->init_slave_names[ 155 sched_ctx->nb_init_slaves]) { 156 CS_LOG_ERR("driver %s: Insufficient memory", 157 name); 158 return -ENOMEM; 159 } 160 161 strncpy(sched_ctx->init_slave_names[ 162 sched_ctx->nb_init_slaves], 163 init_params->slave_names[i], 164 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1); 165 166 sched_ctx->nb_init_slaves++; 167 } 168 169 /* 170 * Initialize capabilities structure as an empty structure, 171 * in case device information is requested when no slaves are attached 172 */ 173 sched_ctx->capabilities = rte_zmalloc_socket(NULL, 174 sizeof(struct rte_cryptodev_capabilities), 175 0, SOCKET_ID_ANY); 176 177 if (!sched_ctx->capabilities) { 178 RTE_LOG(ERR, PMD, "Not enough memory for capability " 179 "information\n"); 180 return -ENOMEM; 181 } 182 183 return 0; 184 } 185 186 static int 187 cryptodev_scheduler_remove(struct rte_vdev_device *vdev) 188 { 189 const char *name; 190 struct rte_cryptodev *dev; 191 struct scheduler_ctx *sched_ctx; 192 193 if (vdev == NULL) 194 return -EINVAL; 195 196 name = rte_vdev_device_name(vdev); 197 dev = rte_cryptodev_pmd_get_named_dev(name); 198 if (dev == NULL) 199 return -EINVAL; 200 201 sched_ctx = dev->data->dev_private; 202 203 if (sched_ctx->nb_slaves) { 204 uint32_t i; 205 206 for (i = 0; i < sched_ctx->nb_slaves; i++) 207 rte_cryptodev_scheduler_slave_detach(dev->data->dev_id, 208 sched_ctx->slaves[i].dev_id); 209 } 210 211 return rte_cryptodev_pmd_destroy(dev); 212 } 213 214 /** Parse integer from integer argument */ 215 static int 216 parse_integer_arg(const char *key __rte_unused, 217 const char *value, void *extra_args) 218 { 219 int *i = (int *) extra_args; 220 221 *i = atoi(value); 222 if (*i < 0) { 223 CS_LOG_ERR("Argument has to be positive.\n"); 224 return -EINVAL; 225 } 226 227 return 0; 228 } 229 230 /** Parse integer from hexadecimal integer argument */ 231 static int 232 parse_coremask_arg(const char *key __rte_unused, 233 const char *value, void *extra_args) 234 { 235 struct scheduler_init_params *params = extra_args; 236 237 params->wcmask = strtoull(value, NULL, 16); 238 239 return 0; 240 } 241 242 /** Parse integer from list of integers argument */ 243 static int 244 parse_corelist_arg(const char *key __rte_unused, 245 const char *value, void *extra_args) 246 { 247 struct scheduler_init_params *params = extra_args; 248 249 params->wcmask = 0ULL; 250 251 const char *token = value; 252 253 while (isdigit(token[0])) { 254 char *rval; 255 unsigned int core = strtoul(token, &rval, 10); 256 257 params->wcmask |= 1ULL << core; 258 token = (const char *)rval; 259 if (token[0] == '\0') 260 break; 261 token++; 262 } 263 264 return 0; 265 } 266 267 /** Parse name */ 268 static int 269 parse_name_arg(const char *key __rte_unused, 270 const char *value, void *extra_args) 271 { 272 struct rte_cryptodev_pmd_init_params *params = extra_args; 273 274 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { 275 CS_LOG_ERR("Invalid name %s, should be less than " 276 "%u bytes.\n", value, 277 RTE_CRYPTODEV_NAME_MAX_LEN - 1); 278 return -EINVAL; 279 } 280 281 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); 282 283 return 0; 284 } 285 286 /** Parse slave */ 287 static int 288 parse_slave_arg(const char *key __rte_unused, 289 const char *value, void *extra_args) 290 { 291 struct scheduler_init_params *param = extra_args; 292 293 if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { 294 CS_LOG_ERR("Too many slaves.\n"); 295 return -ENOMEM; 296 } 297 298 strncpy(param->slave_names[param->nb_slaves++], value, 299 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1); 300 301 return 0; 302 } 303 304 static int 305 parse_mode_arg(const char *key __rte_unused, 306 const char *value, void *extra_args) 307 { 308 struct scheduler_init_params *param = extra_args; 309 uint32_t i; 310 311 for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) { 312 if (strcmp(value, scheduler_mode_map[i].name) == 0) { 313 param->mode = (enum rte_cryptodev_scheduler_mode) 314 scheduler_mode_map[i].val; 315 break; 316 } 317 } 318 319 if (i == RTE_DIM(scheduler_mode_map)) { 320 CS_LOG_ERR("Unrecognized input.\n"); 321 return -EINVAL; 322 } 323 324 return 0; 325 } 326 327 static int 328 parse_ordering_arg(const char *key __rte_unused, 329 const char *value, void *extra_args) 330 { 331 struct scheduler_init_params *param = extra_args; 332 uint32_t i; 333 334 for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) { 335 if (strcmp(value, scheduler_ordering_map[i].name) == 0) { 336 param->enable_ordering = 337 scheduler_ordering_map[i].val; 338 break; 339 } 340 } 341 342 if (i == RTE_DIM(scheduler_ordering_map)) { 343 CS_LOG_ERR("Unrecognized input.\n"); 344 return -EINVAL; 345 } 346 347 return 0; 348 } 349 350 static int 351 scheduler_parse_init_params(struct scheduler_init_params *params, 352 const char *input_args) 353 { 354 struct rte_kvargs *kvlist = NULL; 355 int ret = 0; 356 357 if (params == NULL) 358 return -EINVAL; 359 360 if (input_args) { 361 kvlist = rte_kvargs_parse(input_args, 362 scheduler_valid_params); 363 if (kvlist == NULL) 364 return -1; 365 366 ret = rte_kvargs_process(kvlist, 367 RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, 368 &parse_integer_arg, 369 ¶ms->def_p.max_nb_queue_pairs); 370 if (ret < 0) 371 goto free_kvlist; 372 373 ret = rte_kvargs_process(kvlist, 374 RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, 375 &parse_integer_arg, 376 ¶ms->def_p.max_nb_sessions); 377 if (ret < 0) 378 goto free_kvlist; 379 380 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID, 381 &parse_integer_arg, 382 ¶ms->def_p.socket_id); 383 if (ret < 0) 384 goto free_kvlist; 385 386 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK, 387 &parse_coremask_arg, 388 params); 389 if (ret < 0) 390 goto free_kvlist; 391 392 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST, 393 &parse_corelist_arg, 394 params); 395 if (ret < 0) 396 goto free_kvlist; 397 398 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME, 399 &parse_name_arg, 400 ¶ms->def_p); 401 if (ret < 0) 402 goto free_kvlist; 403 404 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE, 405 &parse_slave_arg, params); 406 if (ret < 0) 407 goto free_kvlist; 408 409 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE, 410 &parse_mode_arg, params); 411 if (ret < 0) 412 goto free_kvlist; 413 414 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING, 415 &parse_ordering_arg, params); 416 if (ret < 0) 417 goto free_kvlist; 418 } 419 420 free_kvlist: 421 rte_kvargs_free(kvlist); 422 return ret; 423 } 424 425 static int 426 cryptodev_scheduler_probe(struct rte_vdev_device *vdev) 427 { 428 struct scheduler_init_params init_params = { 429 .def_p = { 430 "", 431 sizeof(struct scheduler_ctx), 432 rte_socket_id(), 433 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, 434 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS 435 }, 436 .nb_slaves = 0, 437 .mode = CDEV_SCHED_MODE_NOT_SET, 438 .enable_ordering = 0, 439 .slave_names = { {0} } 440 }; 441 const char *name; 442 443 name = rte_vdev_device_name(vdev); 444 if (name == NULL) 445 return -EINVAL; 446 447 scheduler_parse_init_params(&init_params, 448 rte_vdev_device_args(vdev)); 449 450 451 return cryptodev_scheduler_create(name, 452 vdev, 453 &init_params); 454 } 455 456 static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = { 457 .probe = cryptodev_scheduler_probe, 458 .remove = cryptodev_scheduler_remove 459 }; 460 461 static struct cryptodev_driver scheduler_crypto_drv; 462 463 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD, 464 cryptodev_scheduler_pmd_drv); 465 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD, 466 "max_nb_queue_pairs=<int> " 467 "max_nb_sessions=<int> " 468 "socket_id=<int> " 469 "slave=<name>"); 470 RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv, 471 cryptodev_scheduler_pmd_drv, 472 cryptodev_driver_id); 473