1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "accel_dpdk_cryptodev.h" 8 9 #include "spdk/accel.h" 10 #include "spdk/accel_module.h" 11 #include "spdk/env.h" 12 #include "spdk/likely.h" 13 #include "spdk/thread.h" 14 #include "spdk/util.h" 15 #include "spdk/log.h" 16 #include "spdk/json.h" 17 #include "spdk_internal/sgl.h" 18 19 #include <rte_bus_vdev.h> 20 #include <rte_crypto.h> 21 #include <rte_cryptodev.h> 22 #include <rte_mbuf_dyn.h> 23 #include <rte_version.h> 24 25 /* The VF spread is the number of queue pairs between virtual functions, we use this to 26 * load balance the QAT device. 27 */ 28 #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD 32 29 30 /* This controls how many ops will be dequeued from the crypto driver in one run 31 * of the poller. It is mainly a performance knob as it effectively determines how 32 * much work the poller has to do. However even that can vary between crypto drivers 33 * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the 34 * QAT driver just dequeues what has been completed already. 35 */ 36 #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE 64 37 38 #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128) 39 40 /* The number of MBUFS we need must be a power of two and to support other small IOs 41 * in addition to the limits mentioned above, we go to the next power of two. It is 42 * big number because it is one mempool for source and destination mbufs. It may 43 * need to be bigger to support multiple crypto drivers at once. 44 */ 45 #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS 32768 46 #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE 256 47 #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES 128 48 #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS (2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES) 49 #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE 0 50 51 /* This is the max number of IOs we can supply to any crypto device QP at one time. 52 * It can vary between drivers. 53 */ 54 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS 2048 55 56 /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512 57 * as a compromise value between performance and the time spent for initialization. */ 58 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5 512 59 60 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP 64 61 62 /* Common for supported devices. */ 63 #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS 2 64 #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \ 65 sizeof(struct rte_crypto_sym_op) + \ 66 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \ 67 sizeof(struct rte_crypto_sym_xform))) 68 #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16 69 70 /* Driver names */ 71 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb" 72 #define ACCEL_DPDK_CRYPTODEV_QAT "crypto_qat" 73 #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM "crypto_qat_asym" 74 #define ACCEL_DPDK_CRYPTODEV_MLX5 "mlx5_pci" 75 #define ACCEL_DPDK_CRYPTODEV_UADK "crypto_uadk" 76 77 /* Supported ciphers */ 78 #define ACCEL_DPDK_CRYPTODEV_AES_CBC "AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */ 79 #define ACCEL_DPDK_CRYPTODEV_AES_XTS "AES_XTS" /* QAT and MLX5 */ 80 81 /* Specific to AES_CBC. */ 82 #define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH 16 83 84 /* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len` 85 * parameter, we use closes aligned value 32768 for better performance */ 86 #define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN 32768 87 88 /* Used to store IO context in mbuf */ 89 static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { 90 .name = "context_accel_dpdk_cryptodev", 91 .size = sizeof(uint64_t), 92 .align = __alignof__(uint64_t), 93 .flags = 0, 94 }; 95 96 struct accel_dpdk_cryptodev_device; 97 98 enum accel_dpdk_cryptodev_driver_type { 99 ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0, 100 ACCEL_DPDK_CRYPTODEV_DRIVER_QAT, 101 ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI, 102 ACCEL_DPDK_CRYPTODEV_DRIVER_UADK, 103 ACCEL_DPDK_CRYPTODEV_DRIVER_LAST 104 }; 105 106 struct accel_dpdk_cryptodev_qp { 107 struct accel_dpdk_cryptodev_device *device; /* ptr to crypto device */ 108 uint32_t num_enqueued_ops; /* Used to decide whether to poll the qp or not */ 109 uint8_t qp; /* queue identifier */ 110 bool in_use; /* whether this node is in use or not */ 111 uint8_t index; /* used by QAT to load balance placement of qpairs */ 112 TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link; 113 }; 114 115 struct accel_dpdk_cryptodev_device { 116 enum accel_dpdk_cryptodev_driver_type type; 117 struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */ 118 uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */ 119 uint8_t cdev_id; /* identifier for the device */ 120 TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs; 121 TAILQ_ENTRY(accel_dpdk_cryptodev_device) link; 122 }; 123 124 struct accel_dpdk_cryptodev_key_handle { 125 struct accel_dpdk_cryptodev_device *device; 126 TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link; 127 void *session_encrypt; /* encryption session for this key */ 128 void *session_decrypt; /* decryption session for this key */ 129 struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this key */ 130 }; 131 132 struct accel_dpdk_cryptodev_key_priv { 133 enum accel_dpdk_cryptodev_driver_type driver; 134 enum spdk_accel_cipher cipher; 135 char *xts_key; 136 TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys; 137 }; 138 139 /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code. 140 * We store things in here that are needed on per thread basis like the base_channel for this thread, 141 * and the poller for this thread. 142 */ 143 struct accel_dpdk_cryptodev_io_channel { 144 /* completion poller */ 145 struct spdk_poller *poller; 146 /* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */ 147 struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST]; 148 /* Used to queue tasks when qpair is full or only part of crypto ops was submitted to the PMD */ 149 TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks; 150 /* Used to queue tasks that were completed in submission path - to avoid calling cpl_cb and possibly overflow 151 * call stack */ 152 TAILQ_HEAD(, accel_dpdk_cryptodev_task) completed_tasks; 153 }; 154 155 struct accel_dpdk_cryptodev_task { 156 struct spdk_accel_task base; 157 uint32_t cryop_completed; /* The number of crypto operations completed by HW */ 158 uint32_t cryop_submitted; /* The number of crypto operations submitted to HW */ 159 uint32_t cryop_total; /* Total number of crypto operations in this task */ 160 bool is_failed; 161 bool inplace; 162 TAILQ_ENTRY(accel_dpdk_cryptodev_task) link; 163 }; 164 165 /* Shared mempools between all devices on this system */ 166 static struct rte_mempool *g_session_mp = NULL; 167 static struct rte_mempool *g_session_mp_priv = NULL; 168 static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ 169 static int g_mbuf_offset; 170 static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */ 171 172 static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */ 173 174 static uint8_t g_qat_total_qp = 0; 175 static uint8_t g_next_qat_index; 176 177 static const char *g_driver_names[] = { 178 [ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = ACCEL_DPDK_CRYPTODEV_AESNI_MB, 179 [ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = ACCEL_DPDK_CRYPTODEV_QAT, 180 [ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = ACCEL_DPDK_CRYPTODEV_MLX5, 181 [ACCEL_DPDK_CRYPTODEV_DRIVER_UADK] = ACCEL_DPDK_CRYPTODEV_UADK 182 }; 183 static const char *g_cipher_names[] = { 184 [SPDK_ACCEL_CIPHER_AES_CBC] = ACCEL_DPDK_CRYPTODEV_AES_CBC, 185 [SPDK_ACCEL_CIPHER_AES_XTS] = ACCEL_DPDK_CRYPTODEV_AES_XTS, 186 }; 187 188 static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver = 189 ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 190 191 /* Global list of all crypto devices */ 192 static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER( 193 g_crypto_devices); 194 static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER; 195 196 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module; 197 198 static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 199 struct accel_dpdk_cryptodev_task *task); 200 201 void 202 accel_dpdk_cryptodev_enable(void) 203 { 204 spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module); 205 } 206 207 int 208 accel_dpdk_cryptodev_set_driver(const char *driver_name) 209 { 210 if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { 211 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; 212 } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { 213 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 214 } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { 215 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; 216 } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_UADK) == 0) { 217 g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_UADK; 218 } else { 219 SPDK_ERRLOG("Unsupported driver %s\n", driver_name); 220 return -EINVAL; 221 } 222 223 SPDK_NOTICELOG("Using driver %s\n", driver_name); 224 225 return 0; 226 } 227 228 const char * 229 accel_dpdk_cryptodev_get_driver(void) 230 { 231 return g_driver_names[g_dpdk_cryptodev_driver]; 232 } 233 234 static inline uint16_t 235 accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp, 236 struct accel_dpdk_cryptodev_io_channel *crypto_ch) 237 { 238 struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; 239 struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; 240 struct accel_dpdk_cryptodev_task *task; 241 uint32_t num_mbufs = 0; 242 int i; 243 uint16_t num_dequeued_ops; 244 245 /* Each run of the poller will get just what the device has available 246 * at the moment we call it, we don't check again after draining the 247 * first batch. 248 */ 249 num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp, 250 dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE); 251 /* Check if operation was processed successfully */ 252 for (i = 0; i < num_dequeued_ops; i++) { 253 254 /* We don't know the order or association of the crypto ops wrt any 255 * particular task so need to look at each and determine if it's 256 * the last one for it's task or not. 257 */ 258 task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, 259 g_mbuf_offset, uint64_t *); 260 assert(task != NULL); 261 262 if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 263 SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status); 264 /* Update the task status to error, we'll still process the 265 * rest of the crypto ops for this task though so they 266 * aren't left hanging. 267 */ 268 task->is_failed = true; 269 } 270 271 /* Return the associated src and dst mbufs by collecting them into 272 * an array that we can use the bulk API to free after the loop. 273 */ 274 *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0; 275 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; 276 if (dequeued_ops[i]->sym->m_dst) { 277 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; 278 } 279 280 task->cryop_completed++; 281 if (task->cryop_completed == task->cryop_total) { 282 /* Complete the IO */ 283 spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0); 284 } else if (task->cryop_completed == task->cryop_submitted) { 285 /* submit remaining crypto ops */ 286 int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task); 287 288 if (spdk_unlikely(rc)) { 289 if (rc == -ENOMEM) { 290 TAILQ_INSERT_TAIL(&crypto_ch->queued_tasks, task, link); 291 continue; 292 } else if (rc == -EALREADY) { 293 /* -EALREADY means that a task is completed, but it might be unsafe to complete 294 * it if we are in the submission path. Since we are in the poller context, we can 295 * complete th task immediately */ 296 rc = 0; 297 } 298 spdk_accel_task_complete(&task->base, rc); 299 } 300 } 301 } 302 303 /* Now bulk free both mbufs and crypto operations. */ 304 if (num_dequeued_ops > 0) { 305 rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops); 306 assert(num_mbufs > 0); 307 /* This also releases chained mbufs if any. */ 308 rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); 309 } 310 311 assert(qp->num_enqueued_ops >= num_dequeued_ops); 312 qp->num_enqueued_ops -= num_dequeued_ops; 313 314 return num_dequeued_ops; 315 } 316 317 /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at 318 * the device. Then we need to decide if what we've got so far (including previous poller 319 * runs) totals up to one or more complete task */ 320 static int 321 accel_dpdk_cryptodev_poller(void *args) 322 { 323 struct accel_dpdk_cryptodev_io_channel *crypto_ch = args; 324 struct accel_dpdk_cryptodev_qp *qp; 325 struct accel_dpdk_cryptodev_task *task, *task_tmp; 326 TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp; 327 uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0, num_completed_tasks = 0; 328 int i, rc; 329 330 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { 331 qp = crypto_ch->device_qp[i]; 332 /* Avoid polling "idle" qps since it may affect performance */ 333 if (qp && qp->num_enqueued_ops) { 334 num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch); 335 } 336 } 337 338 if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) { 339 TAILQ_INIT(&queued_tasks_tmp); 340 341 TAILQ_FOREACH_SAFE(task, &crypto_ch->queued_tasks, link, task_tmp) { 342 TAILQ_REMOVE(&crypto_ch->queued_tasks, task, link); 343 rc = accel_dpdk_cryptodev_process_task(crypto_ch, task); 344 if (spdk_unlikely(rc)) { 345 if (rc == -ENOMEM) { 346 TAILQ_INSERT_TAIL(&queued_tasks_tmp, task, link); 347 /* Other queued tasks may belong to other qpairs, 348 * so process the whole list */ 349 continue; 350 } else if (rc == -EALREADY) { 351 /* -EALREADY means that a task is completed, but it might be unsafe to complete 352 * it if we are in the submission path. Since we are in the poller context, we can 353 * complete th task immediately */ 354 rc = 0; 355 } 356 spdk_accel_task_complete(&task->base, rc); 357 num_completed_tasks++; 358 } else { 359 num_enqueued_ops++; 360 } 361 } 362 363 TAILQ_SWAP(&crypto_ch->queued_tasks, &queued_tasks_tmp, accel_dpdk_cryptodev_task, link); 364 } 365 366 TAILQ_FOREACH_SAFE(task, &crypto_ch->completed_tasks, link, task_tmp) { 367 TAILQ_REMOVE(&crypto_ch->completed_tasks, task, link); 368 spdk_accel_task_complete(&task->base, 0); 369 num_completed_tasks++; 370 } 371 372 return !!(num_dequeued_ops + num_enqueued_ops + num_completed_tasks); 373 } 374 375 /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach 376 * it to the @orig_mbuf. */ 377 static inline int 378 accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task, 379 struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder) 380 { 381 uint64_t phys_addr, phys_len, remainder = *_remainder; 382 struct rte_mbuf *chain_mbuf; 383 int rc; 384 385 phys_len = remainder; 386 phys_addr = spdk_vtophys((void *)addr, &phys_len); 387 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) { 388 return -EFAULT; 389 } 390 remainder = spdk_min(remainder, phys_len); 391 remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 392 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1); 393 if (spdk_unlikely(rc)) { 394 return -ENOMEM; 395 } 396 /* Store context in every mbuf as we don't know anything about completion order */ 397 *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; 398 rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo); 399 rte_pktmbuf_append(chain_mbuf, remainder); 400 401 /* Chained buffer is released by rte_pktbuf_free_bulk() automagically. */ 402 rte_pktmbuf_chain(orig_mbuf, chain_mbuf); 403 *_remainder = remainder; 404 405 return 0; 406 } 407 408 /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the 409 * contiguous space that was physically available. */ 410 static inline uint64_t 411 accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf, 412 uint8_t *addr, uint32_t len) 413 { 414 uint64_t phys_addr, phys_len; 415 416 /* Store context in every mbuf as we don't know anything about completion order */ 417 *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; 418 419 phys_len = len; 420 phys_addr = spdk_vtophys((void *)addr, &phys_len); 421 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) { 422 return 0; 423 } 424 assert(phys_len <= len); 425 phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 426 427 /* Set the mbuf elements address and length. */ 428 rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo); 429 rte_pktmbuf_append(mbuf, phys_len); 430 431 return phys_len; 432 } 433 434 static inline struct accel_dpdk_cryptodev_key_handle * 435 accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 436 struct accel_dpdk_cryptodev_key_priv *key) 437 { 438 struct accel_dpdk_cryptodev_key_handle *key_handle; 439 440 if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { 441 /* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device. 442 * We need to iterate a list of key entries to find a suitable device */ 443 TAILQ_FOREACH(key_handle, &key->dev_keys, link) { 444 if (key_handle->device->cdev_id == 445 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) { 446 return key_handle; 447 } 448 } 449 return NULL; 450 } else { 451 return TAILQ_FIRST(&key->dev_keys); 452 } 453 } 454 455 static inline int 456 accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs, 457 struct rte_crypto_op **crypto_ops, int count) 458 { 459 int rc; 460 461 /* Get the number of source mbufs that we need. These will always be 1:1 because we 462 * don't support chaining. The reason we don't is because of our decision to use 463 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the 464 * op would be > 1 LBA. 465 */ 466 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count); 467 if (rc) { 468 SPDK_ERRLOG("Failed to get src_mbufs!\n"); 469 return -ENOMEM; 470 } 471 472 /* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */ 473 if (dst_mbufs) { 474 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count); 475 if (rc) { 476 SPDK_ERRLOG("Failed to get dst_mbufs!\n"); 477 goto err_free_src; 478 } 479 } 480 481 #ifdef __clang_analyzer__ 482 /* silence scan-build false positive */ 483 SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE, 484 0x1000); 485 #endif 486 /* Allocate crypto operations. */ 487 rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp, 488 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 489 crypto_ops, count); 490 if (rc < count) { 491 SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc); 492 goto err_free_ops; 493 } 494 495 return 0; 496 497 err_free_ops: 498 if (rc > 0) { 499 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc); 500 } 501 if (dst_mbufs) { 502 /* This also releases chained mbufs if any. */ 503 rte_pktmbuf_free_bulk(dst_mbufs, count); 504 } 505 err_free_src: 506 /* This also releases chained mbufs if any. */ 507 rte_pktmbuf_free_bulk(src_mbufs, count); 508 509 return -ENOMEM; 510 } 511 512 static inline int 513 accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf, 514 struct accel_dpdk_cryptodev_task *task) 515 { 516 int rc; 517 uint8_t *buf_addr; 518 uint64_t phys_len; 519 uint64_t remainder; 520 uint64_t buf_len; 521 522 assert(sgl->iov->iov_len > sgl->iov_offset); 523 buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset); 524 buf_addr = sgl->iov->iov_base + sgl->iov_offset; 525 phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len); 526 if (spdk_unlikely(phys_len == 0)) { 527 return -EFAULT; 528 } 529 buf_len = spdk_min(buf_len, phys_len); 530 spdk_iov_sgl_advance(sgl, buf_len); 531 532 /* Handle the case of page boundary. */ 533 assert(task->base.block_size >= buf_len); 534 remainder = task->base.block_size - buf_len; 535 while (remainder) { 536 buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset); 537 buf_addr = sgl->iov->iov_base + sgl->iov_offset; 538 rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len); 539 if (spdk_unlikely(rc)) { 540 return rc; 541 } 542 spdk_iov_sgl_advance(sgl, buf_len); 543 remainder -= buf_len; 544 } 545 546 return 0; 547 } 548 549 static inline void 550 accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv) 551 { 552 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET); 553 554 /* Set the IV - we use the LBA of the crypto_op */ 555 memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH); 556 rte_memcpy(iv_ptr, &iv, sizeof(uint64_t)); 557 } 558 559 static inline void 560 accel_dpdk_cryptodev_update_resources_from_pools(struct rte_crypto_op **crypto_ops, 561 struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs, 562 uint32_t num_enqueued_ops, uint32_t cryop_cnt) 563 { 564 memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt); 565 memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt); 566 if (dst_mbufs) { 567 memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt); 568 } 569 } 570 571 static int 572 accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 573 struct accel_dpdk_cryptodev_task *task) 574 { 575 uint16_t num_enqueued_ops; 576 uint32_t cryop_cnt; 577 uint32_t crypto_len = task->base.block_size; 578 uint64_t dst_length, total_length; 579 uint32_t sgl_offset; 580 uint32_t qp_capacity; 581 uint64_t iv_start; 582 uint32_t i, crypto_index; 583 struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 584 struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 585 struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; 586 void *session; 587 struct accel_dpdk_cryptodev_key_priv *priv; 588 struct accel_dpdk_cryptodev_key_handle *key_handle; 589 struct accel_dpdk_cryptodev_qp *qp; 590 struct accel_dpdk_cryptodev_device *dev; 591 struct spdk_iov_sgl src, dst = {}; 592 int rc; 593 bool inplace = task->inplace; 594 595 if (spdk_unlikely(!task->base.crypto_key || 596 task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) { 597 return -EINVAL; 598 } 599 600 priv = task->base.crypto_key->priv; 601 assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST); 602 603 if (task->cryop_completed) { 604 /* We continue to process remaining blocks */ 605 assert(task->cryop_submitted == task->cryop_completed); 606 assert(task->cryop_total > task->cryop_completed); 607 cryop_cnt = task->cryop_total - task->cryop_completed; 608 sgl_offset = task->cryop_completed * crypto_len; 609 iv_start = task->base.iv + task->cryop_completed; 610 } else { 611 /* That is a new task */ 612 total_length = 0; 613 for (i = 0; i < task->base.s.iovcnt; i++) { 614 total_length += task->base.s.iovs[i].iov_len; 615 } 616 dst_length = 0; 617 for (i = 0; i < task->base.d.iovcnt; i++) { 618 dst_length += task->base.d.iovs[i].iov_len; 619 } 620 621 if (spdk_unlikely(total_length != dst_length || !total_length)) { 622 return -ERANGE; 623 } 624 if (spdk_unlikely(total_length % task->base.block_size != 0)) { 625 return -EINVAL; 626 } 627 628 cryop_cnt = total_length / task->base.block_size; 629 task->cryop_total = cryop_cnt; 630 sgl_offset = 0; 631 iv_start = task->base.iv; 632 } 633 634 /* Limit the number of crypto ops that we can process once */ 635 cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 636 637 qp = crypto_ch->device_qp[priv->driver]; 638 assert(qp); 639 dev = qp->device; 640 assert(dev); 641 assert(dev->qp_desc_nr >= qp->num_enqueued_ops); 642 643 qp_capacity = dev->qp_desc_nr - qp->num_enqueued_ops; 644 cryop_cnt = spdk_min(cryop_cnt, qp_capacity); 645 if (spdk_unlikely(cryop_cnt == 0)) { 646 /* QP is full */ 647 return -ENOMEM; 648 } 649 650 key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv); 651 if (spdk_unlikely(!key_handle)) { 652 SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver], 653 g_cipher_names[priv->cipher]); 654 return -EINVAL; 655 } 656 /* mlx5_pci binds keys to a specific device, we can't use a key with any device */ 657 assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); 658 659 if (task->base.op_code == SPDK_ACCEL_OPC_ENCRYPT) { 660 session = key_handle->session_encrypt; 661 } else if (task->base.op_code == SPDK_ACCEL_OPC_DECRYPT) { 662 session = key_handle->session_decrypt; 663 } else { 664 return -EINVAL; 665 } 666 667 rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs, 668 crypto_ops, cryop_cnt); 669 if (rc) { 670 return rc; 671 } 672 673 /* As we don't support chaining because of a decision to use LBA as IV, construction 674 * of crypto operations is straightforward. We build both the op, the mbuf and the 675 * dst_mbuf in our local arrays by looping through the length of the accel task and 676 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each 677 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single 678 * mbuf per crypto operation. 679 */ 680 spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0); 681 spdk_iov_sgl_advance(&src, sgl_offset); 682 if (!inplace) { 683 spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0); 684 spdk_iov_sgl_advance(&dst, sgl_offset); 685 } 686 687 for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) { 688 rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task); 689 if (spdk_unlikely(rc)) { 690 goto free_ops; 691 } 692 accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start); 693 iv_start++; 694 695 /* Set the data to encrypt/decrypt length */ 696 crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; 697 crypto_ops[crypto_index]->sym->cipher.data.offset = 0; 698 rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session); 699 700 /* link the mbuf to the crypto op. */ 701 crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; 702 703 if (inplace) { 704 crypto_ops[crypto_index]->sym->m_dst = NULL; 705 } else { 706 #ifndef __clang_analyzer__ 707 /* scan-build thinks that dst_mbufs is not initialized */ 708 rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task); 709 if (spdk_unlikely(rc)) { 710 goto free_ops; 711 } 712 crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; 713 #endif 714 } 715 } 716 717 /* Enqueue everything we've got but limit by the max number of descriptors we 718 * configured the crypto device for. 719 */ 720 num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt); 721 /* This value is used in the completion callback to determine when the accel task is complete. */ 722 task->cryop_submitted += num_enqueued_ops; 723 qp->num_enqueued_ops += num_enqueued_ops; 724 /* We were unable to enqueue everything but did get some, so need to decide what 725 * to do based on the status of the last op. 726 */ 727 if (num_enqueued_ops < cryop_cnt) { 728 switch (crypto_ops[num_enqueued_ops]->status) { 729 case RTE_CRYPTO_OP_STATUS_SUCCESS: 730 /* Crypto operation might be completed successfully but enqueuing to a completion ring might fail. 731 * That might happen with SW PMDs like openssl 732 * We can't retry such operation on next turn since if crypto operation was inplace, we can encrypt/ 733 * decrypt already processed buffer. See github issue #2907 for more details. 734 * Handle this case as the crypto op was completed successfully - increment cryop_submitted and 735 * cryop_completed. 736 * We won't receive a completion for such operation, so we need to cleanup mbufs and crypto_ops */ 737 assert(task->cryop_total > task->cryop_completed); 738 task->cryop_completed++; 739 task->cryop_submitted++; 740 if (task->cryop_completed == task->cryop_total) { 741 assert(num_enqueued_ops == 0); 742 /* All crypto ops are completed. We can't complete the task immediately since this function might be 743 * called in scope of spdk_accel_submit_* function and user's logic in the completion callback 744 * might lead to stack overflow */ 745 cryop_cnt -= num_enqueued_ops; 746 accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs, 747 num_enqueued_ops, cryop_cnt); 748 rc = -EALREADY; 749 goto free_ops; 750 } 751 /* fallthrough */ 752 case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: 753 if (num_enqueued_ops == 0) { 754 /* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */ 755 rc = -ENOMEM; 756 goto free_ops; 757 } 758 /* Part of the crypto operations were not submitted, release mbufs and crypto ops. 759 * The rest crypto ops will be submitted again once current batch is completed */ 760 cryop_cnt -= num_enqueued_ops; 761 accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs, 762 num_enqueued_ops, cryop_cnt); 763 rc = 0; 764 goto free_ops; 765 default: 766 /* For all other statuses, mark task as failed so that the poller will pick 767 * the failure up for the overall task status. 768 */ 769 task->is_failed = true; 770 if (num_enqueued_ops == 0) { 771 /* If nothing was enqueued, but the last one wasn't because of 772 * busy, fail it now as the poller won't know anything about it. 773 */ 774 rc = -EINVAL; 775 goto free_ops; 776 } 777 break; 778 } 779 } 780 781 return 0; 782 783 /* Error cleanup paths. */ 784 free_ops: 785 if (!inplace) { 786 /* This also releases chained mbufs if any. */ 787 rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); 788 } 789 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt); 790 /* This also releases chained mbufs if any. */ 791 rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt); 792 return rc; 793 } 794 795 static inline struct accel_dpdk_cryptodev_qp * 796 accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type) 797 { 798 struct accel_dpdk_cryptodev_device *device, *device_tmp; 799 struct accel_dpdk_cryptodev_qp *qpair; 800 801 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) { 802 if (device->type != type) { 803 continue; 804 } 805 TAILQ_FOREACH(qpair, &device->qpairs, link) { 806 if (!qpair->in_use) { 807 qpair->in_use = true; 808 return qpair; 809 } 810 } 811 } 812 813 return NULL; 814 } 815 816 /* Helper function for the channel creation callback. 817 * Returns the number of drivers assigned to the channel */ 818 static uint32_t 819 accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch) 820 { 821 struct accel_dpdk_cryptodev_device *device; 822 struct accel_dpdk_cryptodev_qp *device_qp; 823 uint32_t num_drivers = 0; 824 bool qat_found = false; 825 826 pthread_mutex_lock(&g_device_lock); 827 828 TAILQ_FOREACH(device, &g_crypto_devices, link) { 829 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) { 830 /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the 831 * workload out over the multiple virtual functions in the device. For the devices 832 * where this isn't the case, it doesn't hurt. 833 */ 834 TAILQ_FOREACH(device_qp, &device->qpairs, link) { 835 if (device_qp->index != g_next_qat_index) { 836 continue; 837 } 838 if (device_qp->in_use == false) { 839 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL); 840 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp; 841 device_qp->in_use = true; 842 g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp; 843 qat_found = true; 844 num_drivers++; 845 break; 846 } else { 847 /* if the preferred index is used, skip to the next one in this set. */ 848 g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; 849 } 850 } 851 } 852 } 853 854 /* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */ 855 device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB); 856 if (device_qp) { 857 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL); 858 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp; 859 num_drivers++; 860 } 861 862 device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); 863 if (device_qp) { 864 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL); 865 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp; 866 num_drivers++; 867 } 868 869 device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_UADK); 870 if (device_qp) { 871 assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_UADK] == NULL); 872 crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_UADK] = device_qp; 873 num_drivers++; 874 } 875 pthread_mutex_unlock(&g_device_lock); 876 877 return num_drivers; 878 } 879 880 static void 881 _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf) 882 { 883 struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) 884 ctx_buf; 885 int i; 886 887 pthread_mutex_lock(&g_device_lock); 888 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { 889 if (crypto_ch->device_qp[i]) { 890 crypto_ch->device_qp[i]->in_use = false; 891 } 892 } 893 pthread_mutex_unlock(&g_device_lock); 894 895 spdk_poller_unregister(&crypto_ch->poller); 896 } 897 898 static int 899 _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf) 900 { 901 struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) 902 ctx_buf; 903 904 crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0); 905 if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) { 906 SPDK_ERRLOG("No crypto drivers assigned\n"); 907 spdk_poller_unregister(&crypto_ch->poller); 908 return -EINVAL; 909 } 910 911 /* We use this to queue tasks when qpair is full or no resources in pools */ 912 TAILQ_INIT(&crypto_ch->queued_tasks); 913 TAILQ_INIT(&crypto_ch->completed_tasks); 914 915 return 0; 916 } 917 918 static struct spdk_io_channel * 919 accel_dpdk_cryptodev_get_io_channel(void) 920 { 921 return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module); 922 } 923 924 static size_t 925 accel_dpdk_cryptodev_ctx_size(void) 926 { 927 return sizeof(struct accel_dpdk_cryptodev_task); 928 } 929 930 static bool 931 accel_dpdk_cryptodev_supports_opcode(enum spdk_accel_opcode opc) 932 { 933 switch (opc) { 934 case SPDK_ACCEL_OPC_ENCRYPT: 935 case SPDK_ACCEL_OPC_DECRYPT: 936 return true; 937 default: 938 return false; 939 } 940 } 941 942 static int 943 accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task) 944 { 945 struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task, 946 base); 947 struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch); 948 int rc; 949 950 task->cryop_completed = 0; 951 task->cryop_submitted = 0; 952 task->cryop_total = 0; 953 task->inplace = true; 954 task->is_failed = false; 955 956 /* Check if crypto operation is inplace: no destination or source == destination */ 957 if (task->base.s.iovcnt == task->base.d.iovcnt) { 958 if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) { 959 task->inplace = false; 960 } 961 } else if (task->base.d.iovcnt != 0) { 962 task->inplace = false; 963 } 964 965 rc = accel_dpdk_cryptodev_process_task(ch, task); 966 if (spdk_unlikely(rc)) { 967 if (rc == -ENOMEM) { 968 TAILQ_INSERT_TAIL(&ch->queued_tasks, task, link); 969 rc = 0; 970 } else if (rc == -EALREADY) { 971 /* -EALREADY means that a task is completed, but it might be unsafe to complete 972 * it if we are in the submission path. Hence put it into a dedicated queue to and 973 * process it during polling */ 974 TAILQ_INSERT_TAIL(&ch->completed_tasks, task, link); 975 rc = 0; 976 } 977 } 978 979 return rc; 980 } 981 982 /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but 983 * this callback has to be here. */ 984 static void 985 shinfo_free_cb(void *arg1, void *arg2) 986 { 987 } 988 989 static int 990 accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores) 991 { 992 struct rte_cryptodev_qp_conf qp_conf = { 993 .mp_session = g_session_mp, 994 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) 995 .mp_session_private = g_session_mp_priv 996 #endif 997 }; 998 /* Setup queue pairs. */ 999 struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY }; 1000 struct accel_dpdk_cryptodev_device *device; 1001 uint8_t j, cdev_id, cdrv_id; 1002 struct accel_dpdk_cryptodev_qp *dev_qp; 1003 int rc; 1004 1005 device = calloc(1, sizeof(*device)); 1006 if (!device) { 1007 return -ENOMEM; 1008 } 1009 1010 /* Get details about this device. */ 1011 rte_cryptodev_info_get(index, &device->cdev_info); 1012 cdrv_id = device->cdev_info.driver_id; 1013 cdev_id = device->cdev_id = index; 1014 1015 if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { 1016 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 1017 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; 1018 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { 1019 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 1020 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 1021 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { 1022 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5; 1023 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; 1024 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) { 1025 /* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */ 1026 rc = 0; 1027 goto err; 1028 } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_UADK) == 0) { 1029 device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 1030 device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_UADK; 1031 } else { 1032 SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n", 1033 cdev_id, device->cdev_info.driver_name); 1034 rc = -EINVAL; 1035 goto err; 1036 } 1037 1038 /* Before going any further, make sure we have enough resources for this 1039 * device type to function. We need a unique queue pair per core across each 1040 * device type to remain lockless.... 1041 */ 1042 if ((rte_cryptodev_device_count_by_driver(cdrv_id) * 1043 device->cdev_info.max_nb_queue_pairs) < num_lcores) { 1044 SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n", 1045 device->cdev_info.driver_name); 1046 SPDK_ERRLOG("Either add more crypto devices or decrease core count\n"); 1047 rc = -EINVAL; 1048 goto err; 1049 } 1050 1051 conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs; 1052 rc = rte_cryptodev_configure(cdev_id, &conf); 1053 if (rc < 0) { 1054 SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n", 1055 cdev_id, rc); 1056 rc = -EINVAL; 1057 goto err; 1058 } 1059 1060 /* Pre-setup all potential qpairs now and assign them in the channel 1061 * callback. If we were to create them there, we'd have to stop the 1062 * entire device affecting all other threads that might be using it 1063 * even on other queue pairs. 1064 */ 1065 qp_conf.nb_descriptors = device->qp_desc_nr; 1066 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 1067 rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY); 1068 if (rc < 0) { 1069 SPDK_ERRLOG("Failed to setup queue pair %u on " 1070 "cryptodev %u: error %d\n", j, cdev_id, rc); 1071 rc = -EINVAL; 1072 goto err_qp_setup; 1073 } 1074 } 1075 1076 rc = rte_cryptodev_start(cdev_id); 1077 if (rc < 0) { 1078 SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc); 1079 rc = -EINVAL; 1080 goto err_dev_start; 1081 } 1082 1083 TAILQ_INIT(&device->qpairs); 1084 /* Build up lists of device/qp combinations per PMD */ 1085 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 1086 dev_qp = calloc(1, sizeof(*dev_qp)); 1087 if (!dev_qp) { 1088 rc = -ENOMEM; 1089 goto err_qp_alloc; 1090 } 1091 dev_qp->device = device; 1092 dev_qp->qp = j; 1093 dev_qp->in_use = false; 1094 TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link); 1095 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1096 dev_qp->index = g_qat_total_qp++; 1097 } 1098 } 1099 /* Add to our list of available crypto devices. */ 1100 TAILQ_INSERT_TAIL(&g_crypto_devices, device, link); 1101 1102 return 0; 1103 1104 err_qp_alloc: 1105 TAILQ_FOREACH(dev_qp, &device->qpairs, link) { 1106 if (dev_qp->device->cdev_id != device->cdev_id) { 1107 continue; 1108 } 1109 free(dev_qp); 1110 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1111 assert(g_qat_total_qp); 1112 g_qat_total_qp--; 1113 } 1114 } 1115 rte_cryptodev_stop(cdev_id); 1116 err_dev_start: 1117 err_qp_setup: 1118 rte_cryptodev_close(cdev_id); 1119 err: 1120 free(device); 1121 1122 return rc; 1123 } 1124 1125 static void 1126 accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device) 1127 { 1128 struct accel_dpdk_cryptodev_qp *dev_qp, *tmp; 1129 1130 assert(device); 1131 1132 TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) { 1133 free(dev_qp); 1134 } 1135 if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { 1136 assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs); 1137 g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs; 1138 } 1139 rte_cryptodev_stop(device->cdev_id); 1140 rte_cryptodev_close(device->cdev_id); 1141 free(device); 1142 } 1143 1144 static int 1145 accel_dpdk_cryptodev_init(void) 1146 { 1147 uint8_t cdev_count; 1148 uint8_t cdev_id; 1149 int i, rc; 1150 const char *driver_name = g_driver_names[g_dpdk_cryptodev_driver]; 1151 struct accel_dpdk_cryptodev_device *device, *tmp_dev; 1152 unsigned int max_sess_size = 0, sess_size; 1153 uint16_t num_lcores = rte_lcore_count(); 1154 char init_args[32]; 1155 1156 /* Only the first call via module init should init the crypto drivers. */ 1157 if (g_session_mp != NULL) { 1158 return 0; 1159 } 1160 1161 if (g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB || 1162 g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_UADK) { 1163 snprintf(init_args, sizeof(init_args), "max_nb_queue_pairs=%d", 1164 ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP); 1165 rc = rte_vdev_init(driver_name, init_args); 1166 if (rc) { 1167 SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. " 1168 "Possibly %s is not supported by DPDK library. " 1169 "Keep going...\n", driver_name, rc, driver_name); 1170 } 1171 } 1172 1173 /* If we have no crypto devices, report error to fallback on other modules. */ 1174 cdev_count = rte_cryptodev_count(); 1175 if (cdev_count == 0) { 1176 return -ENODEV; 1177 } 1178 SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count); 1179 1180 g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context); 1181 if (g_mbuf_offset < 0) { 1182 SPDK_ERRLOG("error registering dynamic field with DPDK\n"); 1183 return -EINVAL; 1184 } 1185 1186 /* Create global mempools, shared by all devices regardless of type */ 1187 /* First determine max session size, most pools are shared by all the devices, 1188 * so we need to find the global max sessions size. */ 1189 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 1190 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 1191 if (sess_size > max_sess_size) { 1192 max_sess_size = sess_size; 1193 } 1194 } 1195 1196 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) 1197 g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv", 1198 ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, 1199 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); 1200 if (g_session_mp_priv == NULL) { 1201 SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size); 1202 return -ENOMEM; 1203 } 1204 1205 /* When session private data mempool allocated, the element size for the session mempool 1206 * should be 0. */ 1207 max_sess_size = 0; 1208 #endif 1209 1210 g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp", 1211 ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, 1212 SOCKET_ID_ANY); 1213 if (g_session_mp == NULL) { 1214 SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size); 1215 rc = -ENOMEM; 1216 goto error_create_session_mp; 1217 } 1218 1219 g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, 1220 ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, 1221 0, 0, SPDK_ENV_SOCKET_ID_ANY); 1222 if (g_mbuf_mp == NULL) { 1223 SPDK_ERRLOG("Cannot create mbuf pool\n"); 1224 rc = -ENOMEM; 1225 goto error_create_mbuf; 1226 } 1227 1228 /* We use per op private data as suggested by DPDK and to store the IV and 1229 * our own struct for queueing ops. */ 1230 g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp", 1231 RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, 1232 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) + 1233 ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id()); 1234 if (g_crypto_op_mp == NULL) { 1235 SPDK_ERRLOG("Cannot create op pool\n"); 1236 rc = -ENOMEM; 1237 goto error_create_op; 1238 } 1239 1240 /* Init all devices */ 1241 for (i = 0; i < cdev_count; i++) { 1242 rc = accel_dpdk_cryptodev_create(i, num_lcores); 1243 if (rc) { 1244 goto err; 1245 } 1246 } 1247 1248 g_shinfo.free_cb = shinfo_free_cb; 1249 1250 spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb, 1251 _accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel), 1252 "accel_dpdk_cryptodev"); 1253 1254 return 0; 1255 1256 /* Error cleanup paths. */ 1257 err: 1258 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) { 1259 TAILQ_REMOVE(&g_crypto_devices, device, link); 1260 accel_dpdk_cryptodev_release(device); 1261 } 1262 rte_mempool_free(g_crypto_op_mp); 1263 g_crypto_op_mp = NULL; 1264 error_create_op: 1265 rte_mempool_free(g_mbuf_mp); 1266 g_mbuf_mp = NULL; 1267 error_create_mbuf: 1268 rte_mempool_free(g_session_mp); 1269 g_session_mp = NULL; 1270 error_create_session_mp: 1271 if (g_session_mp_priv != NULL) { 1272 rte_mempool_free(g_session_mp_priv); 1273 g_session_mp_priv = NULL; 1274 } 1275 return rc; 1276 } 1277 1278 static void 1279 accel_dpdk_cryptodev_fini_cb(void *io_device) 1280 { 1281 struct accel_dpdk_cryptodev_device *device, *tmp; 1282 1283 TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) { 1284 TAILQ_REMOVE(&g_crypto_devices, device, link); 1285 accel_dpdk_cryptodev_release(device); 1286 } 1287 1288 if (g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB || 1289 g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_UADK) { 1290 rte_vdev_uninit(g_driver_names[g_dpdk_cryptodev_driver]); 1291 } 1292 1293 rte_mempool_free(g_crypto_op_mp); 1294 rte_mempool_free(g_mbuf_mp); 1295 rte_mempool_free(g_session_mp); 1296 if (g_session_mp_priv != NULL) { 1297 rte_mempool_free(g_session_mp_priv); 1298 } 1299 1300 spdk_accel_module_finish(); 1301 } 1302 1303 /* Called when the entire module is being torn down. */ 1304 static void 1305 accel_dpdk_cryptodev_fini(void *ctx) 1306 { 1307 if (g_crypto_op_mp) { 1308 spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb); 1309 } 1310 } 1311 1312 static void 1313 accel_dpdk_cryptodev_key_handle_session_free(struct accel_dpdk_cryptodev_device *device, 1314 void *session) 1315 { 1316 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 1317 assert(device != NULL); 1318 1319 rte_cryptodev_sym_session_free(device->cdev_id, session); 1320 #else 1321 rte_cryptodev_sym_session_free(session); 1322 #endif 1323 } 1324 1325 static void * 1326 accel_dpdk_cryptodev_key_handle_session_create(struct accel_dpdk_cryptodev_device *device, 1327 struct rte_crypto_sym_xform *cipher_xform) 1328 { 1329 void *session; 1330 1331 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 1332 session = rte_cryptodev_sym_session_create(device->cdev_id, cipher_xform, g_session_mp); 1333 #else 1334 session = rte_cryptodev_sym_session_create(g_session_mp); 1335 if (!session) { 1336 return NULL; 1337 } 1338 1339 if (rte_cryptodev_sym_session_init(device->cdev_id, session, cipher_xform, g_session_mp_priv) < 0) { 1340 accel_dpdk_cryptodev_key_handle_session_free(device, session); 1341 return NULL; 1342 } 1343 #endif 1344 1345 return session; 1346 } 1347 1348 static int 1349 accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key, 1350 struct accel_dpdk_cryptodev_key_handle *key_handle) 1351 { 1352 struct accel_dpdk_cryptodev_key_priv *priv = key->priv; 1353 1354 key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1355 key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET; 1356 key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH; 1357 1358 switch (priv->cipher) { 1359 case SPDK_ACCEL_CIPHER_AES_CBC: 1360 key_handle->cipher_xform.cipher.key.data = key->key; 1361 key_handle->cipher_xform.cipher.key.length = key->key_size; 1362 key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 1363 break; 1364 case SPDK_ACCEL_CIPHER_AES_XTS: 1365 key_handle->cipher_xform.cipher.key.data = priv->xts_key; 1366 key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size; 1367 key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; 1368 break; 1369 default: 1370 SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher); 1371 return -EINVAL; 1372 } 1373 1374 key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1375 key_handle->session_encrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device, 1376 &key_handle->cipher_xform); 1377 if (!key_handle->session_encrypt) { 1378 SPDK_ERRLOG("Failed to init encrypt session\n"); 1379 return -EINVAL; 1380 } 1381 1382 key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 1383 key_handle->session_decrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device, 1384 &key_handle->cipher_xform); 1385 if (!key_handle->session_decrypt) { 1386 SPDK_ERRLOG("Failed to init decrypt session:"); 1387 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt); 1388 return -EINVAL; 1389 } 1390 1391 return 0; 1392 } 1393 1394 static void 1395 accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key) 1396 { 1397 struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp; 1398 struct accel_dpdk_cryptodev_key_priv *priv = key->priv; 1399 1400 TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) { 1401 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt); 1402 accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_decrypt); 1403 TAILQ_REMOVE(&priv->dev_keys, key_handle, link); 1404 spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle)); 1405 free(key_handle); 1406 } 1407 1408 if (priv->xts_key) { 1409 spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size); 1410 } 1411 free(priv->xts_key); 1412 free(priv); 1413 } 1414 1415 static bool 1416 accel_dpdk_cryptodev_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size) 1417 { 1418 switch (g_dpdk_cryptodev_driver) { 1419 case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT: 1420 case ACCEL_DPDK_CRYPTODEV_DRIVER_UADK: 1421 case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB: 1422 switch (cipher) { 1423 case SPDK_ACCEL_CIPHER_AES_XTS: 1424 return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE; 1425 case SPDK_ACCEL_CIPHER_AES_CBC: 1426 return key_size == ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH; 1427 default: 1428 return false; 1429 } 1430 case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI: 1431 switch (cipher) { 1432 case SPDK_ACCEL_CIPHER_AES_XTS: 1433 return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE; 1434 default: 1435 return false; 1436 } 1437 default: 1438 return false; 1439 } 1440 } 1441 1442 static int 1443 accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key) 1444 { 1445 struct accel_dpdk_cryptodev_device *device; 1446 struct accel_dpdk_cryptodev_key_priv *priv; 1447 struct accel_dpdk_cryptodev_key_handle *key_handle; 1448 enum accel_dpdk_cryptodev_driver_type driver; 1449 int rc; 1450 1451 driver = g_dpdk_cryptodev_driver; 1452 1453 priv = calloc(1, sizeof(*priv)); 1454 if (!priv) { 1455 SPDK_ERRLOG("Memory allocation failed\n"); 1456 return -ENOMEM; 1457 } 1458 key->priv = priv; 1459 priv->driver = driver; 1460 priv->cipher = key->cipher; 1461 TAILQ_INIT(&priv->dev_keys); 1462 1463 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 1464 /* DPDK expects the keys to be concatenated together. */ 1465 priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char)); 1466 if (!priv->xts_key) { 1467 SPDK_ERRLOG("Memory allocation failed\n"); 1468 accel_dpdk_cryptodev_key_deinit(key); 1469 return -ENOMEM; 1470 } 1471 memcpy(priv->xts_key, key->key, key->key_size); 1472 memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size); 1473 } 1474 1475 pthread_mutex_lock(&g_device_lock); 1476 TAILQ_FOREACH(device, &g_crypto_devices, link) { 1477 if (device->type != driver) { 1478 continue; 1479 } 1480 key_handle = calloc(1, sizeof(*key_handle)); 1481 if (!key_handle) { 1482 pthread_mutex_unlock(&g_device_lock); 1483 accel_dpdk_cryptodev_key_deinit(key); 1484 return -ENOMEM; 1485 } 1486 key_handle->device = device; 1487 TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link); 1488 rc = accel_dpdk_cryptodev_key_handle_configure(key, key_handle); 1489 if (rc) { 1490 pthread_mutex_unlock(&g_device_lock); 1491 accel_dpdk_cryptodev_key_deinit(key); 1492 return rc; 1493 } 1494 if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { 1495 /* For MLX5_PCI we need to register a key on each device since 1496 * the key is bound to a specific Protection Domain, 1497 * so don't break the loop */ 1498 break; 1499 } 1500 } 1501 pthread_mutex_unlock(&g_device_lock); 1502 1503 if (TAILQ_EMPTY(&priv->dev_keys)) { 1504 free(priv); 1505 return -ENODEV; 1506 } 1507 1508 return 0; 1509 } 1510 1511 static void 1512 accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w) 1513 { 1514 spdk_json_write_object_begin(w); 1515 spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module"); 1516 spdk_json_write_object_end(w); 1517 1518 spdk_json_write_object_begin(w); 1519 spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver"); 1520 spdk_json_write_named_object_begin(w, "params"); 1521 spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]); 1522 spdk_json_write_object_end(w); 1523 spdk_json_write_object_end(w); 1524 } 1525 1526 static int 1527 accel_dpdk_cryptodev_get_operation_info(enum spdk_accel_opcode opcode, 1528 const struct spdk_accel_operation_exec_ctx *ctx, 1529 struct spdk_accel_opcode_info *info) 1530 { 1531 if (!accel_dpdk_cryptodev_supports_opcode(opcode)) { 1532 SPDK_ERRLOG("Received unexpected opcode: %d", opcode); 1533 assert(false); 1534 return -EINVAL; 1535 } 1536 1537 switch (g_dpdk_cryptodev_driver) { 1538 case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT: 1539 info->required_alignment = spdk_u32log2(ctx->block_size); 1540 break; 1541 default: 1542 info->required_alignment = 0; 1543 break; 1544 } 1545 1546 return 0; 1547 } 1548 1549 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = { 1550 .module_init = accel_dpdk_cryptodev_init, 1551 .module_fini = accel_dpdk_cryptodev_fini, 1552 .write_config_json = accel_dpdk_cryptodev_write_config_json, 1553 .get_ctx_size = accel_dpdk_cryptodev_ctx_size, 1554 .name = "dpdk_cryptodev", 1555 .supports_opcode = accel_dpdk_cryptodev_supports_opcode, 1556 .get_io_channel = accel_dpdk_cryptodev_get_io_channel, 1557 .submit_tasks = accel_dpdk_cryptodev_submit_tasks, 1558 .crypto_key_init = accel_dpdk_cryptodev_key_init, 1559 .crypto_key_deinit = accel_dpdk_cryptodev_key_deinit, 1560 .crypto_supports_cipher = accel_dpdk_cryptodev_supports_cipher, 1561 .get_operation_info = accel_dpdk_cryptodev_get_operation_info, 1562 }; 1563