1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "accel_dpdk_cryptodev.h" 8 9 #include "spdk/accel.h" 10 #include "spdk/accel_module.h" 11 #include "spdk/env.h" 12 #include "spdk/likely.h" 13 #include "spdk/thread.h" 14 #include "spdk/util.h" 15 #include "spdk/log.h" 16 #include "spdk/json.h" 17 #include "spdk_internal/sgl.h" 18 19 #include <rte_bus_vdev.h> 20 #include <rte_crypto.h> 21 #include <rte_cryptodev.h> 22 #include <rte_mbuf_dyn.h> 23 #include <rte_version.h> 24 25 /* The VF spread is the number of queue pairs between virtual functions, we use this to 26 * load balance the QAT device. 27 */ 28 #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD 32 29 30 /* This controls how many ops will be dequeued from the crypto driver in one run 31 * of the poller. It is mainly a performance knob as it effectively determines how 32 * much work the poller has to do. However even that can vary between crypto drivers 33 * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the 34 * QAT driver just dequeues what has been completed already. 35 */ 36 #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE 64 37 38 #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128) 39 40 /* The number of MBUFS we need must be a power of two and to support other small IOs 41 * in addition to the limits mentioned above, we go to the next power of two. It is 42 * big number because it is one mempool for source and destination mbufs. It may 43 * need to be bigger to support multiple crypto drivers at once. 44 */ 45 #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS 32768 46 #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE 256 47 #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES 128 48 #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS (2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES) 49 #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE 0 50 51 /* This is the max number of IOs we can supply to any crypto device QP at one time. 52 * It can vary between drivers. 53 */ 54 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS 2048 55 56 /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512 57 * as a compromise value between performance and the time spent for initialization. */ 58 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5 512 59 60 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP 64 61 62 /* Common for suported devices. */ 63 #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS 2 64 #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \ 65 sizeof(struct rte_crypto_sym_op) + \ 66 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \ 67 sizeof(struct rte_crypto_sym_xform))) 68 #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16 69 70 /* Driver names */ 71 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb" 72 #define ACCEL_DPDK_CRYPTODEV_QAT "crypto_qat" 73 #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM "crypto_qat_asym" 74 #define ACCEL_DPDK_CRYPTODEV_MLX5 "mlx5_pci" 75 76 /* Supported ciphers */ 77 #define ACCEL_DPDK_CRYPTODEV_AES_CBC "AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */ 78 #define ACCEL_DPDK_CRYPTODEV_AES_XTS "AES_XTS" /* QAT and MLX5 */ 79 80 /* Specific to AES_CBC. */ 81 #define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH 16 82 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH 16 /* AES-XTS-128 block key size. */ 83 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH 32 /* AES-XTS-256 block key size. */ 84 85 /* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len` 86 * parameter, we use closes aligned value 32768 for better performance */ 87 #define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN 32768 88 89 /* Used to store IO context in mbuf */ 90 static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { 91 .name = "context_accel_dpdk_cryptodev", 92 .size = sizeof(uint64_t), 93 .align = __alignof__(uint64_t), 94 .flags = 0, 95 }; 96 97 struct accel_dpdk_cryptodev_device; 98 99 enum accel_dpdk_cryptodev_driver_type { 100 ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0, 101 ACCEL_DPDK_CRYPTODEV_DRIVER_QAT, 102 ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI, 103 ACCEL_DPDK_CRYPTODEV_DRIVER_LAST 104 }; 105 106 enum accel_dpdk_crypto_dev_cipher_type { 107 ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC, 108 ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS 109 }; 110 111 struct accel_dpdk_cryptodev_qp { 112 struct accel_dpdk_cryptodev_device *device; /* ptr to crypto device */ 113 uint32_t num_enqueued_ops; /* Used to decide whether to poll the qp or not */ 114 uint8_t qp; /* queue identifier */ 115 bool in_use; /* whether this node is in use or not */ 116 uint8_t index; /* used by QAT to load balance placement of qpairs */ 117 TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link; 118 }; 119 120 struct accel_dpdk_cryptodev_device { 121 enum accel_dpdk_cryptodev_driver_type type; 122 struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */ 123 uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */ 124 uint8_t cdev_id; /* identifier for the device */ 125 TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs; 126 TAILQ_ENTRY(accel_dpdk_cryptodev_device) link; 127 }; 128 129 struct accel_dpdk_cryptodev_key_handle { 130 struct accel_dpdk_cryptodev_device *device; 131 TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link; 132 void *session_encrypt; /* encryption session for this key */ 133 void *session_decrypt; /* decryption session for this key */ 134 struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this key */ 135 }; 136 137 struct accel_dpdk_cryptodev_key_priv { 138 enum accel_dpdk_cryptodev_driver_type driver; 139 enum accel_dpdk_crypto_dev_cipher_type cipher; 140 char *xts_key; 141 TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys; 142 }; 143 144 /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code. 145 * We store things in here that are needed on per thread basis like the base_channel for this thread, 146 * and the poller for this thread. 147 */ 148 struct accel_dpdk_cryptodev_io_channel { 149 /* completion poller */ 150 struct spdk_poller *poller; 151 /* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */ 152 struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST]; 153 /* Used to queue tasks when qpair is full or only part of crypto ops was submitted to the PMD */ 154 TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks; 155 /* Used to queue tasks that were completed in submission path - to avoid calling cpl_cb and possibly overflow 156 * call stack */ 157 TAILQ_HEAD(, accel_dpdk_cryptodev_task) completed_tasks; 158 }; 159 160 struct accel_dpdk_cryptodev_task { 161 struct spdk_accel_task base; 162 uint32_t cryop_completed; /* The number of crypto operations completed by HW */ 163 uint32_t cryop_submitted; /* The number of crypto operations submitted to HW */ 164 uint32_t cryop_total; /* Total number of crypto operations in this task */ 165 bool is_failed; 166 bool inplace; 167 TAILQ_ENTRY(accel_dpdk_cryptodev_task) link; 168 }; 169 170 /* Shared mempools between all devices on this system */ 171 static struct rte_mempool *g_session_mp = NULL; 172 static struct rte_mempool *g_session_mp_priv = NULL; 173 static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ 174 static int g_mbuf_offset; 175 static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */ 176 177 static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */ 178 179 static uint8_t g_qat_total_qp = 0; 180 static uint8_t g_next_qat_index; 181 182 static const char *g_driver_names[] = { 183 [ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = ACCEL_DPDK_CRYPTODEV_AESNI_MB, 184 [ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = ACCEL_DPDK_CRYPTODEV_QAT, 185 [ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = ACCEL_DPDK_CRYPTODEV_MLX5 186 }; 187 static const char *g_cipher_names[] = { 188 [ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC] = ACCEL_DPDK_CRYPTODEV_AES_CBC, 189 [ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS] = ACCEL_DPDK_CRYPTODEV_AES_XTS, 190 }; 191 192 static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver = 193 ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 194 195 /* Global list of all crypto devices */ 196 static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER( 197 g_crypto_devices); 198 static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER; 199 200 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module; 201 202 static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 203 struct accel_dpdk_cryptodev_task *task); 204 205 void 206 accel_dpdk_cryptodev_enable(void) 207 { 208 spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module); 209 } 210 211 int 212 accel_dpdk_cryptodev_set_driver(const char *driver_name) 213 { 214 if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { 215 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; 216 } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { 217 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 218 } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { 219 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; 220 } else { 221 SPDK_ERRLOG("Unsupported driver %s\n", driver_name); 222 return -EINVAL; 223 } 224 225 SPDK_NOTICELOG("Using driver %s\n", driver_name); 226 227 return 0; 228 } 229 230 const char * 231 accel_dpdk_cryptodev_get_driver(void) 232 { 233 return g_driver_names[g_dpdk_cryptodev_driver]; 234 } 235 236 static inline uint16_t 237 accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp, 238 struct accel_dpdk_cryptodev_io_channel *crypto_ch) 239 { 240 struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; 241 struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; 242 struct accel_dpdk_cryptodev_task *task; 243 uint32_t num_mbufs = 0; 244 int i; 245 uint16_t num_dequeued_ops; 246 247 /* Each run of the poller will get just what the device has available 248 * at the moment we call it, we don't check again after draining the 249 * first batch. 250 */ 251 num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp, 252 dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE); 253 /* Check if operation was processed successfully */ 254 for (i = 0; i < num_dequeued_ops; i++) { 255 256 /* We don't know the order or association of the crypto ops wrt any 257 * particular task so need to look at each and determine if it's 258 * the last one for it's task or not. 259 */ 260 task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, 261 g_mbuf_offset, uint64_t *); 262 assert(task != NULL); 263 264 if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 265 SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status); 266 /* Update the task status to error, we'll still process the 267 * rest of the crypto ops for this task though so they 268 * aren't left hanging. 269 */ 270 task->is_failed = true; 271 } 272 273 /* Return the associated src and dst mbufs by collecting them into 274 * an array that we can use the bulk API to free after the loop. 275 */ 276 *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0; 277 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; 278 if (dequeued_ops[i]->sym->m_dst) { 279 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; 280 } 281 282 task->cryop_completed++; 283 if (task->cryop_completed == task->cryop_total) { 284 /* Complete the IO */ 285 spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0); 286 } else if (task->cryop_completed == task->cryop_submitted) { 287 /* submit remaining crypto ops */ 288 int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task); 289 290 if (spdk_unlikely(rc)) { 291 if (rc == -ENOMEM) { 292 TAILQ_INSERT_TAIL(&crypto_ch->queued_tasks, task, link); 293 continue; 294 } else if (rc == -EALREADY) { 295 /* -EALREADY means that a task is completed, but it might be unsafe to complete 296 * it if we are in the submission path. Since we are in the poller context, we can 297 * complete th task immediately */ 298 rc = 0; 299 } 300 spdk_accel_task_complete(&task->base, rc); 301 } 302 } 303 } 304 305 /* Now bulk free both mbufs and crypto operations. */ 306 if (num_dequeued_ops > 0) { 307 rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops); 308 assert(num_mbufs > 0); 309 /* This also releases chained mbufs if any. */ 310 rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); 311 } 312 313 assert(qp->num_enqueued_ops >= num_dequeued_ops); 314 qp->num_enqueued_ops -= num_dequeued_ops; 315 316 return num_dequeued_ops; 317 } 318 319 /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at 320 * the device. Then we need to decide if what we've got so far (including previous poller 321 * runs) totals up to one or more complete task */ 322 static int 323 accel_dpdk_cryptodev_poller(void *args) 324 { 325 struct accel_dpdk_cryptodev_io_channel *crypto_ch = args; 326 struct accel_dpdk_cryptodev_qp *qp; 327 struct accel_dpdk_cryptodev_task *task, *task_tmp; 328 TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp; 329 uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0, num_completed_tasks = 0; 330 int i, rc; 331 332 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { 333 qp = crypto_ch->device_qp[i]; 334 /* Avoid polling "idle" qps since it may affect performance */ 335 if (qp && qp->num_enqueued_ops) { 336 num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch); 337 } 338 } 339 340 if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) { 341 TAILQ_INIT(&queued_tasks_tmp); 342 343 TAILQ_FOREACH_SAFE(task, &crypto_ch->queued_tasks, link, task_tmp) { 344 TAILQ_REMOVE(&crypto_ch->queued_tasks, task, link); 345 rc = accel_dpdk_cryptodev_process_task(crypto_ch, task); 346 if (spdk_unlikely(rc)) { 347 if (rc == -ENOMEM) { 348 TAILQ_INSERT_TAIL(&queued_tasks_tmp, task, link); 349 /* Other queued tasks may belong to other qpairs, 350 * so process the whole list */ 351 continue; 352 } else if (rc == -EALREADY) { 353 /* -EALREADY means that a task is completed, but it might be unsafe to complete 354 * it if we are in the submission path. Since we are in the poller context, we can 355 * complete th task immediately */ 356 rc = 0; 357 } 358 spdk_accel_task_complete(&task->base, rc); 359 num_completed_tasks++; 360 } else { 361 num_enqueued_ops++; 362 } 363 } 364 365 TAILQ_SWAP(&crypto_ch->queued_tasks, &queued_tasks_tmp, accel_dpdk_cryptodev_task, link); 366 } 367 368 TAILQ_FOREACH_SAFE(task, &crypto_ch->completed_tasks, link, task_tmp) { 369 TAILQ_REMOVE(&crypto_ch->completed_tasks, task, link); 370 spdk_accel_task_complete(&task->base, 0); 371 num_completed_tasks++; 372 } 373 374 return !!(num_dequeued_ops + num_enqueued_ops + num_completed_tasks); 375 } 376 377 /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach 378 * it to the @orig_mbuf. */ 379 static inline int 380 accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task, 381 struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder) 382 { 383 uint64_t phys_addr, phys_len, remainder = *_remainder; 384 struct rte_mbuf *chain_mbuf; 385 int rc; 386 387 phys_len = remainder; 388 phys_addr = spdk_vtophys((void *)addr, &phys_len); 389 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) { 390 return -EFAULT; 391 } 392 remainder = spdk_min(remainder, phys_len); 393 remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 394 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1); 395 if (spdk_unlikely(rc)) { 396 return -ENOMEM; 397 } 398 /* Store context in every mbuf as we don't know anything about completion order */ 399 *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; 400 rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo); 401 rte_pktmbuf_append(chain_mbuf, remainder); 402 403 /* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */ 404 rte_pktmbuf_chain(orig_mbuf, chain_mbuf); 405 *_remainder = remainder; 406 407 return 0; 408 } 409 410 /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the 411 * contiguous space that was physically available. */ 412 static inline uint64_t 413 accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf, 414 uint8_t *addr, uint32_t len) 415 { 416 uint64_t phys_addr, phys_len; 417 418 /* Store context in every mbuf as we don't know anything about completion order */ 419 *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; 420 421 phys_len = len; 422 phys_addr = spdk_vtophys((void *)addr, &phys_len); 423 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) { 424 return 0; 425 } 426 assert(phys_len <= len); 427 phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 428 429 /* Set the mbuf elements address and length. */ 430 rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo); 431 rte_pktmbuf_append(mbuf, phys_len); 432 433 return phys_len; 434 } 435 436 static inline struct accel_dpdk_cryptodev_key_handle * 437 accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 438 struct accel_dpdk_cryptodev_key_priv *key) 439 { 440 struct accel_dpdk_cryptodev_key_handle *key_handle; 441 442 if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { 443 /* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device. 444 * We need to iterate a list of key entries to find a suitable device */ 445 TAILQ_FOREACH(key_handle, &key->dev_keys, link) { 446 if (key_handle->device->cdev_id == 447 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) { 448 return key_handle; 449 } 450 } 451 return NULL; 452 } else { 453 return TAILQ_FIRST(&key->dev_keys); 454 } 455 } 456 457 static inline int 458 accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs, 459 struct rte_crypto_op **crypto_ops, int count) 460 { 461 int rc; 462 463 /* Get the number of source mbufs that we need. These will always be 1:1 because we 464 * don't support chaining. The reason we don't is because of our decision to use 465 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the 466 * op would be > 1 LBA. 467 */ 468 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count); 469 if (rc) { 470 SPDK_ERRLOG("Failed to get src_mbufs!\n"); 471 return -ENOMEM; 472 } 473 474 /* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */ 475 if (dst_mbufs) { 476 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count); 477 if (rc) { 478 SPDK_ERRLOG("Failed to get dst_mbufs!\n"); 479 goto err_free_src; 480 } 481 } 482 483 #ifdef __clang_analyzer__ 484 /* silence scan-build false positive */ 485 SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE, 486 0x1000); 487 #endif 488 /* Allocate crypto operations. */ 489 rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp, 490 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 491 crypto_ops, count); 492 if (rc < count) { 493 SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc); 494 goto err_free_ops; 495 } 496 497 return 0; 498 499 err_free_ops: 500 if (rc > 0) { 501 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc); 502 } 503 if (dst_mbufs) { 504 /* This also releases chained mbufs if any. */ 505 rte_pktmbuf_free_bulk(dst_mbufs, count); 506 } 507 err_free_src: 508 /* This also releases chained mbufs if any. */ 509 rte_pktmbuf_free_bulk(src_mbufs, count); 510 511 return -ENOMEM; 512 } 513 514 static inline int 515 accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf, 516 struct accel_dpdk_cryptodev_task *task) 517 { 518 int rc; 519 uint8_t *buf_addr; 520 uint64_t phys_len; 521 uint64_t remainder; 522 uint64_t buf_len; 523 524 assert(sgl->iov->iov_len > sgl->iov_offset); 525 buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset); 526 buf_addr = sgl->iov->iov_base + sgl->iov_offset; 527 phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len); 528 if (spdk_unlikely(phys_len == 0)) { 529 return -EFAULT; 530 } 531 buf_len = spdk_min(buf_len, phys_len); 532 spdk_iov_sgl_advance(sgl, buf_len); 533 534 /* Handle the case of page boundary. */ 535 assert(task->base.block_size >= buf_len); 536 remainder = task->base.block_size - buf_len; 537 while (remainder) { 538 buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset); 539 buf_addr = sgl->iov->iov_base + sgl->iov_offset; 540 rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len); 541 if (spdk_unlikely(rc)) { 542 return rc; 543 } 544 spdk_iov_sgl_advance(sgl, buf_len); 545 remainder -= buf_len; 546 } 547 548 return 0; 549 } 550 551 static inline void 552 accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv) 553 { 554 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET); 555 556 /* Set the IV - we use the LBA of the crypto_op */ 557 memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH); 558 rte_memcpy(iv_ptr, &iv, sizeof(uint64_t)); 559 } 560 561 static inline void 562 accel_dpdk_cryptodev_update_resources_from_pools(struct rte_crypto_op **crypto_ops, 563 struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs, 564 uint32_t num_enqueued_ops, uint32_t cryop_cnt) 565 { 566 memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt); 567 memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt); 568 if (dst_mbufs) { 569 memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt); 570 } 571 } 572 573 static int 574 accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 575 struct accel_dpdk_cryptodev_task *task) 576 { 577 uint16_t num_enqueued_ops; 578 uint32_t cryop_cnt; 579 uint32_t crypto_len = task->base.block_size; 580 uint64_t dst_length, total_length; 581 uint32_t sgl_offset; 582 uint32_t qp_capacity; 583 uint64_t iv_start; 584 uint32_t i, crypto_index; 585 struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 586 struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 587 struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 588 void *session; 589 struct accel_dpdk_cryptodev_key_priv *priv; 590 struct accel_dpdk_cryptodev_key_handle *key_handle; 591 struct accel_dpdk_cryptodev_qp *qp; 592 struct accel_dpdk_cryptodev_device *dev; 593 struct spdk_iov_sgl src, dst = {}; 594 int rc; 595 bool inplace = task->inplace; 596 597 if (spdk_unlikely(!task->base.crypto_key || 598 task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) { 599 return -EINVAL; 600 } 601 602 priv = task->base.crypto_key->priv; 603 assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST); 604 605 if (task->cryop_completed) { 606 /* We continue to process remaining blocks */ 607 assert(task->cryop_submitted == task->cryop_completed); 608 assert(task->cryop_total > task->cryop_completed); 609 cryop_cnt = task->cryop_total - task->cryop_completed; 610 sgl_offset = task->cryop_completed * crypto_len; 611 iv_start = task->base.iv + task->cryop_completed; 612 } else { 613 /* That is a new task */ 614 total_length = 0; 615 for (i = 0; i < task->base.s.iovcnt; i++) { 616 total_length += task->base.s.iovs[i].iov_len; 617 } 618 dst_length = 0; 619 for (i = 0; i < task->base.d.iovcnt; i++) { 620 dst_length += task->base.d.iovs[i].iov_len; 621 } 622 623 if (spdk_unlikely(total_length != dst_length || !total_length)) { 624 return -ERANGE; 625 } 626 if (spdk_unlikely(total_length % task->base.block_size != 0)) { 627 return -EINVAL; 628 } 629 630 cryop_cnt = total_length / task->base.block_size; 631 task->cryop_total = cryop_cnt; 632 sgl_offset = 0; 633 iv_start = task->base.iv; 634 } 635 636 /* Limit the number of crypto ops that we can process once */ 637 cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 638 639 qp = crypto_ch->device_qp[priv->driver]; 640 assert(qp); 641 dev = qp->device; 642 assert(dev); 643 assert(dev->qp_desc_nr >= qp->num_enqueued_ops); 644 645 qp_capacity = dev->qp_desc_nr - qp->num_enqueued_ops; 646 cryop_cnt = spdk_min(cryop_cnt, qp_capacity); 647 if (spdk_unlikely(cryop_cnt == 0)) { 648 /* QP is full */ 649 return -ENOMEM; 650 } 651 652 key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv); 653 if (spdk_unlikely(!key_handle)) { 654 SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver], 655 g_cipher_names[priv->cipher]); 656 return -EINVAL; 657 } 658 /* mlx5_pci binds keys to a specific device, we can't use a key with any device */ 659 assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); 660 661 if (task->base.op_code == ACCEL_OPC_ENCRYPT) { 662 session = key_handle->session_encrypt; 663 } else if (task->base.op_code == ACCEL_OPC_DECRYPT) { 664 session = key_handle->session_decrypt; 665 } else { 666 return -EINVAL; 667 } 668 669 rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs, 670 crypto_ops, cryop_cnt); 671 if (rc) { 672 return rc; 673 } 674 675 /* As we don't support chaining because of a decision to use LBA as IV, construction 676 * of crypto operations is straightforward. We build both the op, the mbuf and the 677 * dst_mbuf in our local arrays by looping through the length of the accel task and 678 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each 679 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single 680 * mbuf per crypto operation. 681 */ 682 spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0); 683 spdk_iov_sgl_advance(&src, sgl_offset); 684 if (!inplace) { 685 spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0); 686 spdk_iov_sgl_advance(&dst, sgl_offset); 687 } 688 689 for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) { 690 rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task); 691 if (spdk_unlikely(rc)) { 692 goto free_ops; 693 } 694 accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start); 695 iv_start++; 696 697 /* Set the data to encrypt/decrypt length */ 698 crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; 699 crypto_ops[crypto_index]->sym->cipher.data.offset = 0; 700 rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session); 701 702 /* link the mbuf to the crypto op. */ 703 crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; 704 705 if (inplace) { 706 crypto_ops[crypto_index]->sym->m_dst = NULL; 707 } else { 708 #ifndef __clang_analyzer__ 709 /* scan-build thinks that dst_mbufs is not initialized */ 710 rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task); 711 if (spdk_unlikely(rc)) { 712 goto free_ops; 713 } 714 crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; 715 #endif 716 } 717 } 718 719 /* Enqueue everything we've got but limit by the max number of descriptors we 720 * configured the crypto device for. 721 */ 722 num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt); 723 /* This value is used in the completion callback to determine when the accel task is complete. */ 724 task->cryop_submitted += num_enqueued_ops; 725 qp->num_enqueued_ops += num_enqueued_ops; 726 /* We were unable to enqueue everything but did get some, so need to decide what 727 * to do based on the status of the last op. 728 */ 729 if (num_enqueued_ops < cryop_cnt) { 730 switch (crypto_ops[num_enqueued_ops]->status) { 731 case RTE_CRYPTO_OP_STATUS_SUCCESS: 732 /* Crypto operation might be completed successfully but enqueuing to a completion ring might fail. 733 * That might happen with SW PMDs like openssl 734 * We can't retry such operation on next turn since if crypto operation was inplace, we can encrypt/ 735 * decrypt already processed buffer. See github issue #2907 for more details. 736 * Handle this case as the crypto op was completed successfully - increment cryop_submitted and 737 * cryop_completed. 738 * We won't receive a completion for such operation, so we need to cleanup mbufs and crypto_ops */ 739 assert(task->cryop_total > task->cryop_completed); 740 task->cryop_completed++; 741 task->cryop_submitted++; 742 if (task->cryop_completed == task->cryop_total) { 743 assert(num_enqueued_ops == 0); 744 /* All crypto ops are completed. We can't complete the task immediately since this function might be 745 * called in scope of spdk_accel_submit_* function and user's logic in the completion callback 746 * might lead to stack overflow */ 747 cryop_cnt -= num_enqueued_ops; 748 accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs, 749 num_enqueued_ops, cryop_cnt); 750 rc = -EALREADY; 751 goto free_ops; 752 } 753 /* fallthrough */ 754 case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: 755 if (num_enqueued_ops == 0) { 756 /* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */ 757 rc = -ENOMEM; 758 goto free_ops; 759 } 760 /* Part of the crypto operations were not submitted, release mbufs and crypto ops. 761 * The rest crypto ops will be submitted again once current batch is completed */ 762 cryop_cnt -= num_enqueued_ops; 763 accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs, 764 num_enqueued_ops, cryop_cnt); 765 rc = 0; 766 goto free_ops; 767 default: 768 /* For all other statuses, mark task as failed so that the poller will pick 769 * the failure up for the overall task status. 770 */ 771 task->is_failed = true; 772 if (num_enqueued_ops == 0) { 773 /* If nothing was enqueued, but the last one wasn't because of 774 * busy, fail it now as the poller won't know anything about it. 775 */ 776 rc = -EINVAL; 777 goto free_ops; 778 } 779 break; 780 } 781 } 782 783 return 0; 784 785 /* Error cleanup paths. */ 786 free_ops: 787 if (!inplace) { 788 /* This also releases chained mbufs if any. */ 789 rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); 790 } 791 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt); 792 /* This also releases chained mbufs if any. */ 793 rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt); 794 return rc; 795 } 796 797 static inline struct accel_dpdk_cryptodev_qp * 798 accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type) 799 { 800 struct accel_dpdk_cryptodev_device *device, *device_tmp; 801 struct accel_dpdk_cryptodev_qp *qpair; 802 803 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) { 804 if (device->type != type) { 805 continue; 806 } 807 TAILQ_FOREACH(qpair, &device->qpairs, link) { 808 if (!qpair->in_use) { 809 qpair->in_use = true; 810 return qpair; 811 } 812 } 813 } 814 815 return NULL; 816 } 817 818 /* Helper function for the channel creation callback. 819 * Returns the number of drivers assigned to the channel */ 820 static uint32_t 821 accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch) 822 { 823 struct accel_dpdk_cryptodev_device *device; 824 struct accel_dpdk_cryptodev_qp *device_qp; 825 uint32_t num_drivers = 0; 826 bool qat_found = false; 827 828 pthread_mutex_lock(&g_device_lock); 829 830 TAILQ_FOREACH(device, &g_crypto_devices, link) { 831 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) { 832 /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the 833 * workload out over the multiple virtual functions in the device. For the devices 834 * where this isn't the case, it doesn't hurt. 835 */ 836 TAILQ_FOREACH(device_qp, &device->qpairs, link) { 837 if (device_qp->index != g_next_qat_index) { 838 continue; 839 } 840 if (device_qp->in_use == false) { 841 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL); 842 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp; 843 device_qp->in_use = true; 844 g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp; 845 qat_found = true; 846 num_drivers++; 847 break; 848 } else { 849 /* if the preferred index is used, skip to the next one in this set. */ 850 g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; 851 } 852 } 853 } 854 } 855 856 /* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */ 857 device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB); 858 if (device_qp) { 859 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL); 860 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp; 861 num_drivers++; 862 } 863 864 device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); 865 if (device_qp) { 866 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL); 867 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp; 868 num_drivers++; 869 } 870 871 pthread_mutex_unlock(&g_device_lock); 872 873 return num_drivers; 874 } 875 876 static void 877 _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf) 878 { 879 struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) 880 ctx_buf; 881 int i; 882 883 pthread_mutex_lock(&g_device_lock); 884 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { 885 if (crypto_ch->device_qp[i]) { 886 crypto_ch->device_qp[i]->in_use = false; 887 } 888 } 889 pthread_mutex_unlock(&g_device_lock); 890 891 spdk_poller_unregister(&crypto_ch->poller); 892 } 893 894 static int 895 _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf) 896 { 897 struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) 898 ctx_buf; 899 900 crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0); 901 if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) { 902 SPDK_ERRLOG("No crypto drivers assigned\n"); 903 spdk_poller_unregister(&crypto_ch->poller); 904 return -EINVAL; 905 } 906 907 /* We use this to queue tasks when qpair is full or no resources in pools */ 908 TAILQ_INIT(&crypto_ch->queued_tasks); 909 TAILQ_INIT(&crypto_ch->completed_tasks); 910 911 return 0; 912 } 913 914 static struct spdk_io_channel * 915 accel_dpdk_cryptodev_get_io_channel(void) 916 { 917 return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module); 918 } 919 920 static size_t 921 accel_dpdk_cryptodev_ctx_size(void) 922 { 923 return sizeof(struct accel_dpdk_cryptodev_task); 924 } 925 926 static bool 927 accel_dpdk_cryptodev_supports_opcode(enum accel_opcode opc) 928 { 929 switch (opc) { 930 case ACCEL_OPC_ENCRYPT: 931 case ACCEL_OPC_DECRYPT: 932 return true; 933 default: 934 return false; 935 } 936 } 937 938 static int 939 accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task) 940 { 941 struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task, 942 base); 943 struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch); 944 int rc; 945 946 task->cryop_completed = 0; 947 task->cryop_submitted = 0; 948 task->cryop_total = 0; 949 task->inplace = true; 950 task->is_failed = false; 951 952 /* Check if crypto operation is inplace: no destination or source == destination */ 953 if (task->base.s.iovcnt == task->base.d.iovcnt) { 954 if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) { 955 task->inplace = false; 956 } 957 } else if (task->base.d.iovcnt != 0) { 958 task->inplace = false; 959 } 960 961 rc = accel_dpdk_cryptodev_process_task(ch, task); 962 if (spdk_unlikely(rc)) { 963 if (rc == -ENOMEM) { 964 TAILQ_INSERT_TAIL(&ch->queued_tasks, task, link); 965 rc = 0; 966 } else if (rc == -EALREADY) { 967 /* -EALREADY means that a task is completed, but it might be unsafe to complete 968 * it if we are in the submission path. Hence put it into a dedicated queue to and 969 * process it during polling */ 970 TAILQ_INSERT_TAIL(&ch->completed_tasks, task, link); 971 rc = 0; 972 } 973 } 974 975 return rc; 976 } 977 978 /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but 979 * this callback has to be here. */ 980 static void 981 shinfo_free_cb(void *arg1, void *arg2) 982 { 983 } 984 985 static int 986 accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores) 987 { 988 struct rte_cryptodev_qp_conf qp_conf = { 989 .mp_session = g_session_mp, 990 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) 991 .mp_session_private = g_session_mp_priv 992 #endif 993 }; 994 /* Setup queue pairs. */ 995 struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY }; 996 struct accel_dpdk_cryptodev_device *device; 997 uint8_t j, cdev_id, cdrv_id; 998 struct accel_dpdk_cryptodev_qp *dev_qp; 999 int rc; 1000 1001 device = calloc(1, sizeof(*device)); 1002 if (!device) { 1003 return -ENOMEM; 1004 } 1005 1006 /* Get details about this device. */ 1007 rte_cryptodev_info_get(index, &device->cdev_info); 1008 cdrv_id = device->cdev_info.driver_id; 1009 cdev_id = device->cdev_id = index; 1010 1011 if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { 1012 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 1013 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; 1014 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { 1015 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 1016 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 1017 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { 1018 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5; 1019 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; 1020 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) { 1021 /* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */ 1022 rc = 0; 1023 goto err; 1024 } else { 1025 SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n", 1026 cdev_id, device->cdev_info.driver_name); 1027 rc = -EINVAL; 1028 goto err; 1029 } 1030 1031 /* Before going any further, make sure we have enough resources for this 1032 * device type to function. We need a unique queue pair per core accross each 1033 * device type to remain lockless.... 1034 */ 1035 if ((rte_cryptodev_device_count_by_driver(cdrv_id) * 1036 device->cdev_info.max_nb_queue_pairs) < num_lcores) { 1037 SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n", 1038 device->cdev_info.driver_name); 1039 SPDK_ERRLOG("Either add more crypto devices or decrease core count\n"); 1040 rc = -EINVAL; 1041 goto err; 1042 } 1043 1044 conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs; 1045 rc = rte_cryptodev_configure(cdev_id, &conf); 1046 if (rc < 0) { 1047 SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n", 1048 cdev_id, rc); 1049 rc = -EINVAL; 1050 goto err; 1051 } 1052 1053 /* Pre-setup all potential qpairs now and assign them in the channel 1054 * callback. If we were to create them there, we'd have to stop the 1055 * entire device affecting all other threads that might be using it 1056 * even on other queue pairs. 1057 */ 1058 qp_conf.nb_descriptors = device->qp_desc_nr; 1059 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 1060 rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY); 1061 if (rc < 0) { 1062 SPDK_ERRLOG("Failed to setup queue pair %u on " 1063 "cryptodev %u: error %d\n", j, cdev_id, rc); 1064 rc = -EINVAL; 1065 goto err_qp_setup; 1066 } 1067 } 1068 1069 rc = rte_cryptodev_start(cdev_id); 1070 if (rc < 0) { 1071 SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc); 1072 rc = -EINVAL; 1073 goto err_dev_start; 1074 } 1075 1076 TAILQ_INIT(&device->qpairs); 1077 /* Build up lists of device/qp combinations per PMD */ 1078 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 1079 dev_qp = calloc(1, sizeof(*dev_qp)); 1080 if (!dev_qp) { 1081 rc = -ENOMEM; 1082 goto err_qp_alloc; 1083 } 1084 dev_qp->device = device; 1085 dev_qp->qp = j; 1086 dev_qp->in_use = false; 1087 TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link); 1088 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1089 dev_qp->index = g_qat_total_qp++; 1090 } 1091 } 1092 /* Add to our list of available crypto devices. */ 1093 TAILQ_INSERT_TAIL(&g_crypto_devices, device, link); 1094 1095 return 0; 1096 1097 err_qp_alloc: 1098 TAILQ_FOREACH(dev_qp, &device->qpairs, link) { 1099 if (dev_qp->device->cdev_id != device->cdev_id) { 1100 continue; 1101 } 1102 free(dev_qp); 1103 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1104 assert(g_qat_total_qp); 1105 g_qat_total_qp--; 1106 } 1107 } 1108 rte_cryptodev_stop(cdev_id); 1109 err_dev_start: 1110 err_qp_setup: 1111 rte_cryptodev_close(cdev_id); 1112 err: 1113 free(device); 1114 1115 return rc; 1116 } 1117 1118 static void 1119 accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device) 1120 { 1121 struct accel_dpdk_cryptodev_qp *dev_qp, *tmp; 1122 1123 assert(device); 1124 1125 TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) { 1126 free(dev_qp); 1127 } 1128 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1129 assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs); 1130 g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs; 1131 } 1132 rte_cryptodev_stop(device->cdev_id); 1133 rte_cryptodev_close(device->cdev_id); 1134 free(device); 1135 } 1136 1137 static int 1138 accel_dpdk_cryptodev_init(void) 1139 { 1140 uint8_t cdev_count; 1141 uint8_t cdev_id; 1142 int i, rc; 1143 struct accel_dpdk_cryptodev_device *device, *tmp_dev; 1144 unsigned int max_sess_size = 0, sess_size; 1145 uint16_t num_lcores = rte_lcore_count(); 1146 char aesni_args[32]; 1147 1148 /* Only the first call via module init should init the crypto drivers. */ 1149 if (g_session_mp != NULL) { 1150 return 0; 1151 } 1152 1153 /* We always init ACCEL_DPDK_CRYPTODEV_AESNI_MB */ 1154 snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d", 1155 ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP); 1156 rc = rte_vdev_init(ACCEL_DPDK_CRYPTODEV_AESNI_MB, aesni_args); 1157 if (rc) { 1158 SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. " 1159 "Possibly %s is not supported by DPDK library. " 1160 "Keep going...\n", ACCEL_DPDK_CRYPTODEV_AESNI_MB, rc, ACCEL_DPDK_CRYPTODEV_AESNI_MB); 1161 } 1162 1163 /* If we have no crypto devices, there's no reason to continue. */ 1164 cdev_count = rte_cryptodev_count(); 1165 SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count); 1166 if (cdev_count == 0) { 1167 return 0; 1168 } 1169 1170 g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context); 1171 if (g_mbuf_offset < 0) { 1172 SPDK_ERRLOG("error registering dynamic field with DPDK\n"); 1173 return -EINVAL; 1174 } 1175 1176 /* Create global mempools, shared by all devices regardless of type */ 1177 /* First determine max session size, most pools are shared by all the devices, 1178 * so we need to find the global max sessions size. */ 1179 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 1180 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 1181 if (sess_size > max_sess_size) { 1182 max_sess_size = sess_size; 1183 } 1184 } 1185 1186 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) 1187 g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv", 1188 ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, 1189 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); 1190 if (g_session_mp_priv == NULL) { 1191 SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size); 1192 return -ENOMEM; 1193 } 1194 1195 /* When session private data mempool allocated, the element size for the session mempool 1196 * should be 0. */ 1197 max_sess_size = 0; 1198 #endif 1199 1200 g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp", 1201 ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, 1202 SOCKET_ID_ANY); 1203 if (g_session_mp == NULL) { 1204 SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size); 1205 rc = -ENOMEM; 1206 goto error_create_session_mp; 1207 } 1208 1209 g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, 1210 ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, 1211 0, 0, SPDK_ENV_SOCKET_ID_ANY); 1212 if (g_mbuf_mp == NULL) { 1213 SPDK_ERRLOG("Cannot create mbuf pool\n"); 1214 rc = -ENOMEM; 1215 goto error_create_mbuf; 1216 } 1217 1218 /* We use per op private data as suggested by DPDK and to store the IV and 1219 * our own struct for queueing ops. */ 1220 g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp", 1221 RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, 1222 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) + 1223 ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id()); 1224 if (g_crypto_op_mp == NULL) { 1225 SPDK_ERRLOG("Cannot create op pool\n"); 1226 rc = -ENOMEM; 1227 goto error_create_op; 1228 } 1229 1230 /* Init all devices */ 1231 for (i = 0; i < cdev_count; i++) { 1232 rc = accel_dpdk_cryptodev_create(i, num_lcores); 1233 if (rc) { 1234 goto err; 1235 } 1236 } 1237 1238 g_shinfo.free_cb = shinfo_free_cb; 1239 1240 spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb, 1241 _accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel), 1242 "accel_dpdk_cryptodev"); 1243 1244 return 0; 1245 1246 /* Error cleanup paths. */ 1247 err: 1248 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) { 1249 TAILQ_REMOVE(&g_crypto_devices, device, link); 1250 accel_dpdk_cryptodev_release(device); 1251 } 1252 rte_mempool_free(g_crypto_op_mp); 1253 g_crypto_op_mp = NULL; 1254 error_create_op: 1255 rte_mempool_free(g_mbuf_mp); 1256 g_mbuf_mp = NULL; 1257 error_create_mbuf: 1258 rte_mempool_free(g_session_mp); 1259 g_session_mp = NULL; 1260 error_create_session_mp: 1261 if (g_session_mp_priv != NULL) { 1262 rte_mempool_free(g_session_mp_priv); 1263 g_session_mp_priv = NULL; 1264 } 1265 return rc; 1266 } 1267 1268 static void 1269 accel_dpdk_cryptodev_fini_cb(void *io_device) 1270 { 1271 struct accel_dpdk_cryptodev_device *device, *tmp; 1272 1273 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) { 1274 TAILQ_REMOVE(&g_crypto_devices, device, link); 1275 accel_dpdk_cryptodev_release(device); 1276 } 1277 rte_vdev_uninit(ACCEL_DPDK_CRYPTODEV_AESNI_MB); 1278 1279 rte_mempool_free(g_crypto_op_mp); 1280 rte_mempool_free(g_mbuf_mp); 1281 rte_mempool_free(g_session_mp); 1282 if (g_session_mp_priv != NULL) { 1283 rte_mempool_free(g_session_mp_priv); 1284 } 1285 1286 spdk_accel_module_finish(); 1287 } 1288 1289 /* Called when the entire module is being torn down. */ 1290 static void 1291 accel_dpdk_cryptodev_fini(void *ctx) 1292 { 1293 if (g_crypto_op_mp) { 1294 spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb); 1295 } 1296 } 1297 1298 static void 1299 accel_dpdk_cryptodev_key_handle_session_free(struct accel_dpdk_cryptodev_device *device, 1300 void *session) 1301 { 1302 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 1303 assert(device != NULL); 1304 1305 rte_cryptodev_sym_session_free(device->cdev_id, session); 1306 #else 1307 rte_cryptodev_sym_session_free(session); 1308 #endif 1309 } 1310 1311 static void * 1312 accel_dpdk_cryptodev_key_handle_session_create(struct accel_dpdk_cryptodev_device *device, 1313 struct rte_crypto_sym_xform *cipher_xform) 1314 { 1315 void *session; 1316 1317 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 1318 session = rte_cryptodev_sym_session_create(device->cdev_id, cipher_xform, g_session_mp); 1319 #else 1320 session = rte_cryptodev_sym_session_create(g_session_mp); 1321 if (!session) { 1322 return NULL; 1323 } 1324 1325 if (rte_cryptodev_sym_session_init(device->cdev_id, session, cipher_xform, g_session_mp_priv) < 0) { 1326 accel_dpdk_cryptodev_key_handle_session_free(device, session); 1327 return NULL; 1328 } 1329 #endif 1330 1331 return session; 1332 } 1333 1334 static int 1335 accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key, 1336 struct accel_dpdk_cryptodev_key_handle *key_handle) 1337 { 1338 struct accel_dpdk_cryptodev_key_priv *priv = key->priv; 1339 1340 key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1341 key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET; 1342 key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH; 1343 1344 switch (priv->cipher) { 1345 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: 1346 key_handle->cipher_xform.cipher.key.data = key->key; 1347 key_handle->cipher_xform.cipher.key.length = key->key_size; 1348 key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 1349 break; 1350 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: 1351 key_handle->cipher_xform.cipher.key.data = priv->xts_key; 1352 key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size; 1353 key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; 1354 break; 1355 default: 1356 SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher); 1357 return -EINVAL; 1358 } 1359 1360 key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1361 key_handle->session_encrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device, 1362 &key_handle->cipher_xform); 1363 if (!key_handle->session_encrypt) { 1364 SPDK_ERRLOG("Failed to init encrypt session\n"); 1365 return -EINVAL; 1366 } 1367 1368 key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 1369 key_handle->session_decrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device, 1370 &key_handle->cipher_xform); 1371 if (!key_handle->session_decrypt) { 1372 SPDK_ERRLOG("Failed to init decrypt session:"); 1373 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt); 1374 return -EINVAL; 1375 } 1376 1377 return 0; 1378 } 1379 1380 static int 1381 accel_dpdk_cryptodev_validate_parameters(enum accel_dpdk_cryptodev_driver_type driver, 1382 enum accel_dpdk_crypto_dev_cipher_type cipher, struct spdk_accel_crypto_key *key) 1383 { 1384 /* Check that all required parameters exist */ 1385 switch (cipher) { 1386 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: 1387 if (!key->key || !key->key_size) { 1388 SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC requires a key\n"); 1389 return -1; 1390 } 1391 if (key->key2 || key->key2_size) { 1392 SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC doesn't use key2\n"); 1393 return -1; 1394 } 1395 break; 1396 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: 1397 if (!key->key || !key->key_size || !key->key2 || !key->key2_size) { 1398 SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_XTS requires both key and key2\n"); 1399 return -1; 1400 } 1401 break; 1402 default: 1403 return -1; 1404 } 1405 1406 /* Check driver/cipher combinations and key lengths */ 1407 switch (cipher) { 1408 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: 1409 if (driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { 1410 SPDK_ERRLOG("Driver %s only supports cipher %s\n", 1411 g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI], 1412 g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS]); 1413 return -1; 1414 } 1415 if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH) { 1416 SPDK_ERRLOG("Invalid key size %zu for cipher %s, should be %d\n", key->key_size, 1417 g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC], ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH); 1418 return -1; 1419 } 1420 break; 1421 case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: 1422 if (key->key_size != key->key2_size) { 1423 SPDK_ERRLOG("Cipher %s requires equal key and key2 sizes\n", g_cipher_names[driver]); 1424 return -1; 1425 } 1426 switch (driver) { 1427 case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI: 1428 if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH && 1429 key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH) { 1430 SPDK_ERRLOG("Invalid key size %zu for driver %s, cipher %s, supported %d or %d\n", 1431 key->key_size, g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI], 1432 g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS], 1433 ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH, 1434 ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH); 1435 return -1; 1436 } 1437 break; 1438 case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT: 1439 case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB: 1440 if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH) { 1441 SPDK_ERRLOG("Invalid key size %zu, supported %d\n", key->key_size, 1442 ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH); 1443 return -1; 1444 } 1445 break; 1446 default: 1447 SPDK_ERRLOG("Incorrect driver type %d\n", driver); 1448 assert(0); 1449 return -1; 1450 } 1451 break; 1452 } 1453 1454 return 0; 1455 } 1456 1457 static void 1458 accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key) 1459 { 1460 struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp; 1461 struct accel_dpdk_cryptodev_key_priv *priv = key->priv; 1462 1463 TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) { 1464 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt); 1465 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_decrypt); 1466 TAILQ_REMOVE(&priv->dev_keys, key_handle, link); 1467 spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle)); 1468 free(key_handle); 1469 } 1470 1471 if (priv->xts_key) { 1472 spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size); 1473 } 1474 free(priv->xts_key); 1475 free(priv); 1476 } 1477 1478 static int 1479 accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key) 1480 { 1481 struct accel_dpdk_cryptodev_device *device; 1482 struct accel_dpdk_cryptodev_key_priv *priv; 1483 struct accel_dpdk_cryptodev_key_handle *key_handle; 1484 enum accel_dpdk_cryptodev_driver_type driver; 1485 enum accel_dpdk_crypto_dev_cipher_type cipher; 1486 int rc; 1487 1488 if (!key->param.cipher) { 1489 SPDK_ERRLOG("Cipher is missing\n"); 1490 return -EINVAL; 1491 } 1492 1493 if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_CBC) == 0) { 1494 cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; 1495 } else if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_XTS) == 0) { 1496 cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS; 1497 } else { 1498 SPDK_ERRLOG("Unsupported cipher name %s.\n", key->param.cipher); 1499 return -EINVAL; 1500 } 1501 1502 driver = g_dpdk_cryptodev_driver; 1503 1504 if (accel_dpdk_cryptodev_validate_parameters(driver, cipher, key)) { 1505 return -EINVAL; 1506 } 1507 1508 priv = calloc(1, sizeof(*priv)); 1509 if (!priv) { 1510 SPDK_ERRLOG("Memory allocation failed\n"); 1511 return -ENOMEM; 1512 } 1513 key->priv = priv; 1514 priv->driver = driver; 1515 priv->cipher = cipher; 1516 TAILQ_INIT(&priv->dev_keys); 1517 1518 if (cipher == ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS) { 1519 /* DPDK expects the keys to be concatenated together. */ 1520 priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char)); 1521 if (!priv->xts_key) { 1522 SPDK_ERRLOG("Memory allocation failed\n"); 1523 accel_dpdk_cryptodev_key_deinit(key); 1524 return -ENOMEM; 1525 } 1526 memcpy(priv->xts_key, key->key, key->key_size); 1527 memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size); 1528 } 1529 1530 pthread_mutex_lock(&g_device_lock); 1531 TAILQ_FOREACH(device, &g_crypto_devices, link) { 1532 if (device->type != driver) { 1533 continue; 1534 } 1535 key_handle = calloc(1, sizeof(*key_handle)); 1536 if (!key_handle) { 1537 pthread_mutex_unlock(&g_device_lock); 1538 accel_dpdk_cryptodev_key_deinit(key); 1539 return -ENOMEM; 1540 } 1541 key_handle->device = device; 1542 TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link); 1543 rc = accel_dpdk_cryptodev_key_handle_configure(key, key_handle); 1544 if (rc) { 1545 pthread_mutex_unlock(&g_device_lock); 1546 accel_dpdk_cryptodev_key_deinit(key); 1547 return rc; 1548 } 1549 if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { 1550 /* For MLX5_PCI we need to register a key on each device since 1551 * the key is bound to a specific Protection Domain, 1552 * so don't break the loop */ 1553 break; 1554 } 1555 } 1556 pthread_mutex_unlock(&g_device_lock); 1557 1558 if (TAILQ_EMPTY(&priv->dev_keys)) { 1559 free(priv); 1560 return -ENODEV; 1561 } 1562 1563 return 0; 1564 } 1565 1566 static void 1567 accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w) 1568 { 1569 spdk_json_write_object_begin(w); 1570 spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module"); 1571 spdk_json_write_object_end(w); 1572 1573 spdk_json_write_object_begin(w); 1574 spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver"); 1575 spdk_json_write_named_object_begin(w, "params"); 1576 spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]); 1577 spdk_json_write_object_end(w); 1578 spdk_json_write_object_end(w); 1579 } 1580 1581 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = { 1582 .module_init = accel_dpdk_cryptodev_init, 1583 .module_fini = accel_dpdk_cryptodev_fini, 1584 .write_config_json = accel_dpdk_cryptodev_write_config_json, 1585 .get_ctx_size = accel_dpdk_cryptodev_ctx_size, 1586 .name = "dpdk_cryptodev", 1587 .supports_opcode = accel_dpdk_cryptodev_supports_opcode, 1588 .get_io_channel = accel_dpdk_cryptodev_get_io_channel, 1589 .submit_tasks = accel_dpdk_cryptodev_submit_tasks, 1590 .crypto_key_init = accel_dpdk_cryptodev_key_init, 1591 .crypto_key_deinit = accel_dpdk_cryptodev_key_deinit, 1592 }; 1593