129610e41SRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 229610e41SRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 329610e41SRavi Kumar */ 429610e41SRavi Kumar 529610e41SRavi Kumar #include <dirent.h> 629610e41SRavi Kumar #include <fcntl.h> 729610e41SRavi Kumar #include <stdio.h> 829610e41SRavi Kumar #include <string.h> 929610e41SRavi Kumar #include <sys/mman.h> 1029610e41SRavi Kumar #include <sys/queue.h> 1129610e41SRavi Kumar #include <sys/types.h> 1229610e41SRavi Kumar #include <unistd.h> 1329610e41SRavi Kumar 1429610e41SRavi Kumar #include <rte_hexdump.h> 1529610e41SRavi Kumar #include <rte_memzone.h> 1629610e41SRavi Kumar #include <rte_malloc.h> 1729610e41SRavi Kumar #include <rte_memory.h> 1829610e41SRavi Kumar #include <rte_spinlock.h> 1929610e41SRavi Kumar #include <rte_string_fns.h> 2029610e41SRavi Kumar #include <rte_cryptodev_pmd.h> 2129610e41SRavi Kumar 2229610e41SRavi Kumar #include "ccp_dev.h" 2329610e41SRavi Kumar #include "ccp_crypto.h" 2429610e41SRavi Kumar #include "ccp_pci.h" 2529610e41SRavi Kumar #include "ccp_pmd_private.h" 2629610e41SRavi Kumar 2729610e41SRavi Kumar static enum ccp_cmd_order 2829610e41SRavi Kumar ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform) 2929610e41SRavi Kumar { 3029610e41SRavi Kumar enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED; 3129610e41SRavi Kumar 3229610e41SRavi Kumar if (xform == NULL) 3329610e41SRavi Kumar return res; 3429610e41SRavi Kumar if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3529610e41SRavi Kumar if (xform->next == NULL) 3629610e41SRavi Kumar return CCP_CMD_AUTH; 3729610e41SRavi Kumar else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 3829610e41SRavi Kumar return CCP_CMD_HASH_CIPHER; 3929610e41SRavi Kumar } 4029610e41SRavi Kumar if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 4129610e41SRavi Kumar if (xform->next == NULL) 4229610e41SRavi Kumar return CCP_CMD_CIPHER; 4329610e41SRavi Kumar else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) 4429610e41SRavi Kumar return CCP_CMD_CIPHER_HASH; 4529610e41SRavi Kumar } 4629610e41SRavi Kumar if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) 4729610e41SRavi Kumar return CCP_CMD_COMBINED; 4829610e41SRavi Kumar return res; 4929610e41SRavi Kumar } 5029610e41SRavi Kumar 5129610e41SRavi Kumar /* configure session */ 5229610e41SRavi Kumar static int 5329610e41SRavi Kumar ccp_configure_session_cipher(struct ccp_session *sess, 5429610e41SRavi Kumar const struct rte_crypto_sym_xform *xform) 5529610e41SRavi Kumar { 5629610e41SRavi Kumar const struct rte_crypto_cipher_xform *cipher_xform = NULL; 57*c05adb06SRavi Kumar size_t i, j, x; 5829610e41SRavi Kumar 5929610e41SRavi Kumar cipher_xform = &xform->cipher; 6029610e41SRavi Kumar 6129610e41SRavi Kumar /* set cipher direction */ 6229610e41SRavi Kumar if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) 6329610e41SRavi Kumar sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; 6429610e41SRavi Kumar else 6529610e41SRavi Kumar sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; 6629610e41SRavi Kumar 6729610e41SRavi Kumar /* set cipher key */ 6829610e41SRavi Kumar sess->cipher.key_length = cipher_xform->key.length; 6929610e41SRavi Kumar rte_memcpy(sess->cipher.key, cipher_xform->key.data, 7029610e41SRavi Kumar cipher_xform->key.length); 7129610e41SRavi Kumar 7229610e41SRavi Kumar /* set iv parameters */ 7329610e41SRavi Kumar sess->iv.offset = cipher_xform->iv.offset; 7429610e41SRavi Kumar sess->iv.length = cipher_xform->iv.length; 7529610e41SRavi Kumar 7629610e41SRavi Kumar switch (cipher_xform->algo) { 77d9a9e561SRavi Kumar case RTE_CRYPTO_CIPHER_AES_CTR: 78d9a9e561SRavi Kumar sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR; 79d9a9e561SRavi Kumar sess->cipher.um.aes_mode = CCP_AES_MODE_CTR; 80d9a9e561SRavi Kumar sess->cipher.engine = CCP_ENGINE_AES; 81d9a9e561SRavi Kumar break; 82d9a9e561SRavi Kumar case RTE_CRYPTO_CIPHER_AES_ECB: 83d9a9e561SRavi Kumar sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; 84d9a9e561SRavi Kumar sess->cipher.um.aes_mode = CCP_AES_MODE_ECB; 85d9a9e561SRavi Kumar sess->cipher.engine = CCP_ENGINE_AES; 86d9a9e561SRavi Kumar break; 87d9a9e561SRavi Kumar case RTE_CRYPTO_CIPHER_AES_CBC: 88d9a9e561SRavi Kumar sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; 89d9a9e561SRavi Kumar sess->cipher.um.aes_mode = CCP_AES_MODE_CBC; 90d9a9e561SRavi Kumar sess->cipher.engine = CCP_ENGINE_AES; 91d9a9e561SRavi Kumar break; 92*c05adb06SRavi Kumar case RTE_CRYPTO_CIPHER_3DES_CBC: 93*c05adb06SRavi Kumar sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC; 94*c05adb06SRavi Kumar sess->cipher.um.des_mode = CCP_DES_MODE_CBC; 95*c05adb06SRavi Kumar sess->cipher.engine = CCP_ENGINE_3DES; 96*c05adb06SRavi Kumar break; 9729610e41SRavi Kumar default: 9829610e41SRavi Kumar CCP_LOG_ERR("Unsupported cipher algo"); 9929610e41SRavi Kumar return -1; 10029610e41SRavi Kumar } 10129610e41SRavi Kumar 10229610e41SRavi Kumar 10329610e41SRavi Kumar switch (sess->cipher.engine) { 104d9a9e561SRavi Kumar case CCP_ENGINE_AES: 105d9a9e561SRavi Kumar if (sess->cipher.key_length == 16) 106d9a9e561SRavi Kumar sess->cipher.ut.aes_type = CCP_AES_TYPE_128; 107d9a9e561SRavi Kumar else if (sess->cipher.key_length == 24) 108d9a9e561SRavi Kumar sess->cipher.ut.aes_type = CCP_AES_TYPE_192; 109d9a9e561SRavi Kumar else if (sess->cipher.key_length == 32) 110d9a9e561SRavi Kumar sess->cipher.ut.aes_type = CCP_AES_TYPE_256; 111d9a9e561SRavi Kumar else { 112d9a9e561SRavi Kumar CCP_LOG_ERR("Invalid cipher key length"); 113d9a9e561SRavi Kumar return -1; 114d9a9e561SRavi Kumar } 115d9a9e561SRavi Kumar for (i = 0; i < sess->cipher.key_length ; i++) 116d9a9e561SRavi Kumar sess->cipher.key_ccp[sess->cipher.key_length - i - 1] = 117d9a9e561SRavi Kumar sess->cipher.key[i]; 118d9a9e561SRavi Kumar break; 119*c05adb06SRavi Kumar case CCP_ENGINE_3DES: 120*c05adb06SRavi Kumar if (sess->cipher.key_length == 16) 121*c05adb06SRavi Kumar sess->cipher.ut.des_type = CCP_DES_TYPE_128; 122*c05adb06SRavi Kumar else if (sess->cipher.key_length == 24) 123*c05adb06SRavi Kumar sess->cipher.ut.des_type = CCP_DES_TYPE_192; 124*c05adb06SRavi Kumar else { 125*c05adb06SRavi Kumar CCP_LOG_ERR("Invalid cipher key length"); 126*c05adb06SRavi Kumar return -1; 127*c05adb06SRavi Kumar } 128*c05adb06SRavi Kumar for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8) 129*c05adb06SRavi Kumar for (i = 0; i < 8; i++) 130*c05adb06SRavi Kumar sess->cipher.key_ccp[(8 + x) - i - 1] = 131*c05adb06SRavi Kumar sess->cipher.key[i + x]; 132*c05adb06SRavi Kumar break; 13329610e41SRavi Kumar default: 13429610e41SRavi Kumar CCP_LOG_ERR("Invalid CCP Engine"); 13529610e41SRavi Kumar return -ENOTSUP; 13629610e41SRavi Kumar } 137d9a9e561SRavi Kumar sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); 138d9a9e561SRavi Kumar sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); 13929610e41SRavi Kumar return 0; 14029610e41SRavi Kumar } 14129610e41SRavi Kumar 14229610e41SRavi Kumar static int 14329610e41SRavi Kumar ccp_configure_session_auth(struct ccp_session *sess, 14429610e41SRavi Kumar const struct rte_crypto_sym_xform *xform) 14529610e41SRavi Kumar { 14629610e41SRavi Kumar const struct rte_crypto_auth_xform *auth_xform = NULL; 14729610e41SRavi Kumar 14829610e41SRavi Kumar auth_xform = &xform->auth; 14929610e41SRavi Kumar 15029610e41SRavi Kumar sess->auth.digest_length = auth_xform->digest_length; 15129610e41SRavi Kumar if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) 15229610e41SRavi Kumar sess->auth.op = CCP_AUTH_OP_GENERATE; 15329610e41SRavi Kumar else 15429610e41SRavi Kumar sess->auth.op = CCP_AUTH_OP_VERIFY; 15529610e41SRavi Kumar switch (auth_xform->algo) { 15629610e41SRavi Kumar default: 15729610e41SRavi Kumar CCP_LOG_ERR("Unsupported hash algo"); 15829610e41SRavi Kumar return -ENOTSUP; 15929610e41SRavi Kumar } 16029610e41SRavi Kumar return 0; 16129610e41SRavi Kumar } 16229610e41SRavi Kumar 16329610e41SRavi Kumar static int 16429610e41SRavi Kumar ccp_configure_session_aead(struct ccp_session *sess, 16529610e41SRavi Kumar const struct rte_crypto_sym_xform *xform) 16629610e41SRavi Kumar { 16729610e41SRavi Kumar const struct rte_crypto_aead_xform *aead_xform = NULL; 16829610e41SRavi Kumar 16929610e41SRavi Kumar aead_xform = &xform->aead; 17029610e41SRavi Kumar 17129610e41SRavi Kumar sess->cipher.key_length = aead_xform->key.length; 17229610e41SRavi Kumar rte_memcpy(sess->cipher.key, aead_xform->key.data, 17329610e41SRavi Kumar aead_xform->key.length); 17429610e41SRavi Kumar 17529610e41SRavi Kumar if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { 17629610e41SRavi Kumar sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; 17729610e41SRavi Kumar sess->auth.op = CCP_AUTH_OP_GENERATE; 17829610e41SRavi Kumar } else { 17929610e41SRavi Kumar sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; 18029610e41SRavi Kumar sess->auth.op = CCP_AUTH_OP_VERIFY; 18129610e41SRavi Kumar } 18229610e41SRavi Kumar sess->auth.aad_length = aead_xform->aad_length; 18329610e41SRavi Kumar sess->auth.digest_length = aead_xform->digest_length; 18429610e41SRavi Kumar 18529610e41SRavi Kumar /* set iv parameters */ 18629610e41SRavi Kumar sess->iv.offset = aead_xform->iv.offset; 18729610e41SRavi Kumar sess->iv.length = aead_xform->iv.length; 18829610e41SRavi Kumar 18929610e41SRavi Kumar switch (aead_xform->algo) { 19029610e41SRavi Kumar default: 19129610e41SRavi Kumar CCP_LOG_ERR("Unsupported aead algo"); 19229610e41SRavi Kumar return -ENOTSUP; 19329610e41SRavi Kumar } 19429610e41SRavi Kumar return 0; 19529610e41SRavi Kumar } 19629610e41SRavi Kumar 19729610e41SRavi Kumar int 19829610e41SRavi Kumar ccp_set_session_parameters(struct ccp_session *sess, 19929610e41SRavi Kumar const struct rte_crypto_sym_xform *xform) 20029610e41SRavi Kumar { 20129610e41SRavi Kumar const struct rte_crypto_sym_xform *cipher_xform = NULL; 20229610e41SRavi Kumar const struct rte_crypto_sym_xform *auth_xform = NULL; 20329610e41SRavi Kumar const struct rte_crypto_sym_xform *aead_xform = NULL; 20429610e41SRavi Kumar int ret = 0; 20529610e41SRavi Kumar 20629610e41SRavi Kumar sess->cmd_id = ccp_get_cmd_id(xform); 20729610e41SRavi Kumar 20829610e41SRavi Kumar switch (sess->cmd_id) { 20929610e41SRavi Kumar case CCP_CMD_CIPHER: 21029610e41SRavi Kumar cipher_xform = xform; 21129610e41SRavi Kumar break; 21229610e41SRavi Kumar case CCP_CMD_AUTH: 21329610e41SRavi Kumar auth_xform = xform; 21429610e41SRavi Kumar break; 21529610e41SRavi Kumar case CCP_CMD_CIPHER_HASH: 21629610e41SRavi Kumar cipher_xform = xform; 21729610e41SRavi Kumar auth_xform = xform->next; 21829610e41SRavi Kumar break; 21929610e41SRavi Kumar case CCP_CMD_HASH_CIPHER: 22029610e41SRavi Kumar auth_xform = xform; 22129610e41SRavi Kumar cipher_xform = xform->next; 22229610e41SRavi Kumar break; 22329610e41SRavi Kumar case CCP_CMD_COMBINED: 22429610e41SRavi Kumar aead_xform = xform; 22529610e41SRavi Kumar break; 22629610e41SRavi Kumar default: 22729610e41SRavi Kumar CCP_LOG_ERR("Unsupported cmd_id"); 22829610e41SRavi Kumar return -1; 22929610e41SRavi Kumar } 23029610e41SRavi Kumar 23129610e41SRavi Kumar /* Default IV length = 0 */ 23229610e41SRavi Kumar sess->iv.length = 0; 23329610e41SRavi Kumar if (cipher_xform) { 23429610e41SRavi Kumar ret = ccp_configure_session_cipher(sess, cipher_xform); 23529610e41SRavi Kumar if (ret != 0) { 23629610e41SRavi Kumar CCP_LOG_ERR("Invalid/unsupported cipher parameters"); 23729610e41SRavi Kumar return ret; 23829610e41SRavi Kumar } 23929610e41SRavi Kumar } 24029610e41SRavi Kumar if (auth_xform) { 24129610e41SRavi Kumar ret = ccp_configure_session_auth(sess, auth_xform); 24229610e41SRavi Kumar if (ret != 0) { 24329610e41SRavi Kumar CCP_LOG_ERR("Invalid/unsupported auth parameters"); 24429610e41SRavi Kumar return ret; 24529610e41SRavi Kumar } 24629610e41SRavi Kumar } 24729610e41SRavi Kumar if (aead_xform) { 24829610e41SRavi Kumar ret = ccp_configure_session_aead(sess, aead_xform); 24929610e41SRavi Kumar if (ret != 0) { 25029610e41SRavi Kumar CCP_LOG_ERR("Invalid/unsupported aead parameters"); 25129610e41SRavi Kumar return ret; 25229610e41SRavi Kumar } 25329610e41SRavi Kumar } 25429610e41SRavi Kumar return ret; 25529610e41SRavi Kumar } 25670f0f8a8SRavi Kumar 25770f0f8a8SRavi Kumar /* calculate CCP descriptors requirement */ 25870f0f8a8SRavi Kumar static inline int 25970f0f8a8SRavi Kumar ccp_cipher_slot(struct ccp_session *session) 26070f0f8a8SRavi Kumar { 26170f0f8a8SRavi Kumar int count = 0; 26270f0f8a8SRavi Kumar 26370f0f8a8SRavi Kumar switch (session->cipher.algo) { 264d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_CBC: 265d9a9e561SRavi Kumar count = 2; 266d9a9e561SRavi Kumar /**< op + passthrough for iv */ 267d9a9e561SRavi Kumar break; 268d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_ECB: 269d9a9e561SRavi Kumar count = 1; 270d9a9e561SRavi Kumar /**<only op*/ 271d9a9e561SRavi Kumar break; 272d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_CTR: 273d9a9e561SRavi Kumar count = 2; 274d9a9e561SRavi Kumar /**< op + passthrough for iv */ 275d9a9e561SRavi Kumar break; 276*c05adb06SRavi Kumar case CCP_CIPHER_ALGO_3DES_CBC: 277*c05adb06SRavi Kumar count = 2; 278*c05adb06SRavi Kumar /**< op + passthrough for iv */ 279*c05adb06SRavi Kumar break; 28070f0f8a8SRavi Kumar default: 28170f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported cipher algo %d", 28270f0f8a8SRavi Kumar session->cipher.algo); 28370f0f8a8SRavi Kumar } 28470f0f8a8SRavi Kumar return count; 28570f0f8a8SRavi Kumar } 28670f0f8a8SRavi Kumar 28770f0f8a8SRavi Kumar static inline int 28870f0f8a8SRavi Kumar ccp_auth_slot(struct ccp_session *session) 28970f0f8a8SRavi Kumar { 29070f0f8a8SRavi Kumar int count = 0; 29170f0f8a8SRavi Kumar 29270f0f8a8SRavi Kumar switch (session->auth.algo) { 29370f0f8a8SRavi Kumar default: 29470f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported auth algo %d", 29570f0f8a8SRavi Kumar session->auth.algo); 29670f0f8a8SRavi Kumar } 29770f0f8a8SRavi Kumar 29870f0f8a8SRavi Kumar return count; 29970f0f8a8SRavi Kumar } 30070f0f8a8SRavi Kumar 30170f0f8a8SRavi Kumar static int 30270f0f8a8SRavi Kumar ccp_aead_slot(struct ccp_session *session) 30370f0f8a8SRavi Kumar { 30470f0f8a8SRavi Kumar int count = 0; 30570f0f8a8SRavi Kumar 30670f0f8a8SRavi Kumar switch (session->aead_algo) { 30770f0f8a8SRavi Kumar default: 30870f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported aead algo %d", 30970f0f8a8SRavi Kumar session->aead_algo); 31070f0f8a8SRavi Kumar } 31170f0f8a8SRavi Kumar return count; 31270f0f8a8SRavi Kumar } 31370f0f8a8SRavi Kumar 31470f0f8a8SRavi Kumar int 31570f0f8a8SRavi Kumar ccp_compute_slot_count(struct ccp_session *session) 31670f0f8a8SRavi Kumar { 31770f0f8a8SRavi Kumar int count = 0; 31870f0f8a8SRavi Kumar 31970f0f8a8SRavi Kumar switch (session->cmd_id) { 32070f0f8a8SRavi Kumar case CCP_CMD_CIPHER: 32170f0f8a8SRavi Kumar count = ccp_cipher_slot(session); 32270f0f8a8SRavi Kumar break; 32370f0f8a8SRavi Kumar case CCP_CMD_AUTH: 32470f0f8a8SRavi Kumar count = ccp_auth_slot(session); 32570f0f8a8SRavi Kumar break; 32670f0f8a8SRavi Kumar case CCP_CMD_CIPHER_HASH: 32770f0f8a8SRavi Kumar case CCP_CMD_HASH_CIPHER: 32870f0f8a8SRavi Kumar count = ccp_cipher_slot(session); 32970f0f8a8SRavi Kumar count += ccp_auth_slot(session); 33070f0f8a8SRavi Kumar break; 33170f0f8a8SRavi Kumar case CCP_CMD_COMBINED: 33270f0f8a8SRavi Kumar count = ccp_aead_slot(session); 33370f0f8a8SRavi Kumar break; 33470f0f8a8SRavi Kumar default: 33570f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported cmd_id"); 33670f0f8a8SRavi Kumar 33770f0f8a8SRavi Kumar } 33870f0f8a8SRavi Kumar 33970f0f8a8SRavi Kumar return count; 34070f0f8a8SRavi Kumar } 34170f0f8a8SRavi Kumar 342d9a9e561SRavi Kumar static void 343d9a9e561SRavi Kumar ccp_perform_passthru(struct ccp_passthru *pst, 344d9a9e561SRavi Kumar struct ccp_queue *cmd_q) 345d9a9e561SRavi Kumar { 346d9a9e561SRavi Kumar struct ccp_desc *desc; 347d9a9e561SRavi Kumar union ccp_function function; 348d9a9e561SRavi Kumar 349d9a9e561SRavi Kumar desc = &cmd_q->qbase_desc[cmd_q->qidx]; 350d9a9e561SRavi Kumar 351d9a9e561SRavi Kumar CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU; 352d9a9e561SRavi Kumar 353d9a9e561SRavi Kumar CCP_CMD_SOC(desc) = 0; 354d9a9e561SRavi Kumar CCP_CMD_IOC(desc) = 0; 355d9a9e561SRavi Kumar CCP_CMD_INIT(desc) = 0; 356d9a9e561SRavi Kumar CCP_CMD_EOM(desc) = 0; 357d9a9e561SRavi Kumar CCP_CMD_PROT(desc) = 0; 358d9a9e561SRavi Kumar 359d9a9e561SRavi Kumar function.raw = 0; 360d9a9e561SRavi Kumar CCP_PT_BYTESWAP(&function) = pst->byte_swap; 361d9a9e561SRavi Kumar CCP_PT_BITWISE(&function) = pst->bit_mod; 362d9a9e561SRavi Kumar CCP_CMD_FUNCTION(desc) = function.raw; 363d9a9e561SRavi Kumar 364d9a9e561SRavi Kumar CCP_CMD_LEN(desc) = pst->len; 365d9a9e561SRavi Kumar 366d9a9e561SRavi Kumar if (pst->dir) { 367d9a9e561SRavi Kumar CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); 368d9a9e561SRavi Kumar CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr); 369d9a9e561SRavi Kumar CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; 370d9a9e561SRavi Kumar 371d9a9e561SRavi Kumar CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); 372d9a9e561SRavi Kumar CCP_CMD_DST_HI(desc) = 0; 373d9a9e561SRavi Kumar CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB; 374d9a9e561SRavi Kumar 375d9a9e561SRavi Kumar if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 376d9a9e561SRavi Kumar CCP_CMD_LSB_ID(desc) = cmd_q->sb_key; 377d9a9e561SRavi Kumar } else { 378d9a9e561SRavi Kumar 379d9a9e561SRavi Kumar CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); 380d9a9e561SRavi Kumar CCP_CMD_SRC_HI(desc) = 0; 381d9a9e561SRavi Kumar CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB; 382d9a9e561SRavi Kumar 383d9a9e561SRavi Kumar CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); 384d9a9e561SRavi Kumar CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr); 385d9a9e561SRavi Kumar CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; 386d9a9e561SRavi Kumar } 387d9a9e561SRavi Kumar 388d9a9e561SRavi Kumar cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; 389d9a9e561SRavi Kumar } 390d9a9e561SRavi Kumar 391d9a9e561SRavi Kumar static int 392d9a9e561SRavi Kumar ccp_perform_aes(struct rte_crypto_op *op, 393d9a9e561SRavi Kumar struct ccp_queue *cmd_q, 394d9a9e561SRavi Kumar struct ccp_batch_info *b_info) 395d9a9e561SRavi Kumar { 396d9a9e561SRavi Kumar struct ccp_session *session; 397d9a9e561SRavi Kumar union ccp_function function; 398d9a9e561SRavi Kumar uint8_t *lsb_buf; 399d9a9e561SRavi Kumar struct ccp_passthru pst = {0}; 400d9a9e561SRavi Kumar struct ccp_desc *desc; 401d9a9e561SRavi Kumar phys_addr_t src_addr, dest_addr, key_addr; 402d9a9e561SRavi Kumar uint8_t *iv; 403d9a9e561SRavi Kumar 404d9a9e561SRavi Kumar session = (struct ccp_session *)get_session_private_data( 405d9a9e561SRavi Kumar op->sym->session, 406d9a9e561SRavi Kumar ccp_cryptodev_driver_id); 407d9a9e561SRavi Kumar function.raw = 0; 408d9a9e561SRavi Kumar 409d9a9e561SRavi Kumar iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); 410d9a9e561SRavi Kumar if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) { 411d9a9e561SRavi Kumar if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) { 412d9a9e561SRavi Kumar rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, 413d9a9e561SRavi Kumar iv, session->iv.length); 414d9a9e561SRavi Kumar pst.src_addr = (phys_addr_t)session->cipher.nonce_phys; 415d9a9e561SRavi Kumar CCP_AES_SIZE(&function) = 0x1F; 416d9a9e561SRavi Kumar } else { 417d9a9e561SRavi Kumar lsb_buf = 418d9a9e561SRavi Kumar &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); 419d9a9e561SRavi Kumar rte_memcpy(lsb_buf + 420d9a9e561SRavi Kumar (CCP_SB_BYTES - session->iv.length), 421d9a9e561SRavi Kumar iv, session->iv.length); 422d9a9e561SRavi Kumar pst.src_addr = b_info->lsb_buf_phys + 423d9a9e561SRavi Kumar (b_info->lsb_buf_idx * CCP_SB_BYTES); 424d9a9e561SRavi Kumar b_info->lsb_buf_idx++; 425d9a9e561SRavi Kumar } 426d9a9e561SRavi Kumar 427d9a9e561SRavi Kumar pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); 428d9a9e561SRavi Kumar pst.len = CCP_SB_BYTES; 429d9a9e561SRavi Kumar pst.dir = 1; 430d9a9e561SRavi Kumar pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; 431d9a9e561SRavi Kumar pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; 432d9a9e561SRavi Kumar ccp_perform_passthru(&pst, cmd_q); 433d9a9e561SRavi Kumar } 434d9a9e561SRavi Kumar 435d9a9e561SRavi Kumar desc = &cmd_q->qbase_desc[cmd_q->qidx]; 436d9a9e561SRavi Kumar 437d9a9e561SRavi Kumar src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, 438d9a9e561SRavi Kumar op->sym->cipher.data.offset); 439d9a9e561SRavi Kumar if (likely(op->sym->m_dst != NULL)) 440d9a9e561SRavi Kumar dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, 441d9a9e561SRavi Kumar op->sym->cipher.data.offset); 442d9a9e561SRavi Kumar else 443d9a9e561SRavi Kumar dest_addr = src_addr; 444d9a9e561SRavi Kumar key_addr = session->cipher.key_phys; 445d9a9e561SRavi Kumar 446d9a9e561SRavi Kumar /* prepare desc for aes command */ 447d9a9e561SRavi Kumar CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; 448d9a9e561SRavi Kumar CCP_CMD_INIT(desc) = 1; 449d9a9e561SRavi Kumar CCP_CMD_EOM(desc) = 1; 450d9a9e561SRavi Kumar 451d9a9e561SRavi Kumar CCP_AES_ENCRYPT(&function) = session->cipher.dir; 452d9a9e561SRavi Kumar CCP_AES_MODE(&function) = session->cipher.um.aes_mode; 453d9a9e561SRavi Kumar CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; 454d9a9e561SRavi Kumar CCP_CMD_FUNCTION(desc) = function.raw; 455d9a9e561SRavi Kumar 456d9a9e561SRavi Kumar CCP_CMD_LEN(desc) = op->sym->cipher.data.length; 457d9a9e561SRavi Kumar 458d9a9e561SRavi Kumar CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); 459d9a9e561SRavi Kumar CCP_CMD_SRC_HI(desc) = high32_value(src_addr); 460d9a9e561SRavi Kumar CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; 461d9a9e561SRavi Kumar 462d9a9e561SRavi Kumar CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); 463d9a9e561SRavi Kumar CCP_CMD_DST_HI(desc) = high32_value(dest_addr); 464d9a9e561SRavi Kumar CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; 465d9a9e561SRavi Kumar 466d9a9e561SRavi Kumar CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); 467d9a9e561SRavi Kumar CCP_CMD_KEY_HI(desc) = high32_value(key_addr); 468d9a9e561SRavi Kumar CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; 469d9a9e561SRavi Kumar 470d9a9e561SRavi Kumar if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) 471d9a9e561SRavi Kumar CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; 472d9a9e561SRavi Kumar 473d9a9e561SRavi Kumar cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; 474d9a9e561SRavi Kumar op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 475d9a9e561SRavi Kumar return 0; 476d9a9e561SRavi Kumar } 477d9a9e561SRavi Kumar 478*c05adb06SRavi Kumar static int 479*c05adb06SRavi Kumar ccp_perform_3des(struct rte_crypto_op *op, 480*c05adb06SRavi Kumar struct ccp_queue *cmd_q, 481*c05adb06SRavi Kumar struct ccp_batch_info *b_info) 482*c05adb06SRavi Kumar { 483*c05adb06SRavi Kumar struct ccp_session *session; 484*c05adb06SRavi Kumar union ccp_function function; 485*c05adb06SRavi Kumar unsigned char *lsb_buf; 486*c05adb06SRavi Kumar struct ccp_passthru pst; 487*c05adb06SRavi Kumar struct ccp_desc *desc; 488*c05adb06SRavi Kumar uint32_t tail; 489*c05adb06SRavi Kumar uint8_t *iv; 490*c05adb06SRavi Kumar phys_addr_t src_addr, dest_addr, key_addr; 491*c05adb06SRavi Kumar 492*c05adb06SRavi Kumar session = (struct ccp_session *)get_session_private_data( 493*c05adb06SRavi Kumar op->sym->session, 494*c05adb06SRavi Kumar ccp_cryptodev_driver_id); 495*c05adb06SRavi Kumar 496*c05adb06SRavi Kumar iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); 497*c05adb06SRavi Kumar switch (session->cipher.um.des_mode) { 498*c05adb06SRavi Kumar case CCP_DES_MODE_CBC: 499*c05adb06SRavi Kumar lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); 500*c05adb06SRavi Kumar b_info->lsb_buf_idx++; 501*c05adb06SRavi Kumar 502*c05adb06SRavi Kumar rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), 503*c05adb06SRavi Kumar iv, session->iv.length); 504*c05adb06SRavi Kumar 505*c05adb06SRavi Kumar pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf); 506*c05adb06SRavi Kumar pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); 507*c05adb06SRavi Kumar pst.len = CCP_SB_BYTES; 508*c05adb06SRavi Kumar pst.dir = 1; 509*c05adb06SRavi Kumar pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; 510*c05adb06SRavi Kumar pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; 511*c05adb06SRavi Kumar ccp_perform_passthru(&pst, cmd_q); 512*c05adb06SRavi Kumar break; 513*c05adb06SRavi Kumar case CCP_DES_MODE_CFB: 514*c05adb06SRavi Kumar case CCP_DES_MODE_ECB: 515*c05adb06SRavi Kumar CCP_LOG_ERR("Unsupported DES cipher mode"); 516*c05adb06SRavi Kumar return -ENOTSUP; 517*c05adb06SRavi Kumar } 518*c05adb06SRavi Kumar 519*c05adb06SRavi Kumar src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, 520*c05adb06SRavi Kumar op->sym->cipher.data.offset); 521*c05adb06SRavi Kumar if (unlikely(op->sym->m_dst != NULL)) 522*c05adb06SRavi Kumar dest_addr = 523*c05adb06SRavi Kumar rte_pktmbuf_mtophys_offset(op->sym->m_dst, 524*c05adb06SRavi Kumar op->sym->cipher.data.offset); 525*c05adb06SRavi Kumar else 526*c05adb06SRavi Kumar dest_addr = src_addr; 527*c05adb06SRavi Kumar 528*c05adb06SRavi Kumar key_addr = rte_mem_virt2phy(session->cipher.key_ccp); 529*c05adb06SRavi Kumar 530*c05adb06SRavi Kumar desc = &cmd_q->qbase_desc[cmd_q->qidx]; 531*c05adb06SRavi Kumar 532*c05adb06SRavi Kumar memset(desc, 0, Q_DESC_SIZE); 533*c05adb06SRavi Kumar 534*c05adb06SRavi Kumar /* prepare desc for des command */ 535*c05adb06SRavi Kumar CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES; 536*c05adb06SRavi Kumar 537*c05adb06SRavi Kumar CCP_CMD_SOC(desc) = 0; 538*c05adb06SRavi Kumar CCP_CMD_IOC(desc) = 0; 539*c05adb06SRavi Kumar CCP_CMD_INIT(desc) = 1; 540*c05adb06SRavi Kumar CCP_CMD_EOM(desc) = 1; 541*c05adb06SRavi Kumar CCP_CMD_PROT(desc) = 0; 542*c05adb06SRavi Kumar 543*c05adb06SRavi Kumar function.raw = 0; 544*c05adb06SRavi Kumar CCP_DES_ENCRYPT(&function) = session->cipher.dir; 545*c05adb06SRavi Kumar CCP_DES_MODE(&function) = session->cipher.um.des_mode; 546*c05adb06SRavi Kumar CCP_DES_TYPE(&function) = session->cipher.ut.des_type; 547*c05adb06SRavi Kumar CCP_CMD_FUNCTION(desc) = function.raw; 548*c05adb06SRavi Kumar 549*c05adb06SRavi Kumar CCP_CMD_LEN(desc) = op->sym->cipher.data.length; 550*c05adb06SRavi Kumar 551*c05adb06SRavi Kumar CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); 552*c05adb06SRavi Kumar CCP_CMD_SRC_HI(desc) = high32_value(src_addr); 553*c05adb06SRavi Kumar CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; 554*c05adb06SRavi Kumar 555*c05adb06SRavi Kumar CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); 556*c05adb06SRavi Kumar CCP_CMD_DST_HI(desc) = high32_value(dest_addr); 557*c05adb06SRavi Kumar CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; 558*c05adb06SRavi Kumar 559*c05adb06SRavi Kumar CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); 560*c05adb06SRavi Kumar CCP_CMD_KEY_HI(desc) = high32_value(key_addr); 561*c05adb06SRavi Kumar CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; 562*c05adb06SRavi Kumar 563*c05adb06SRavi Kumar if (session->cipher.um.des_mode) 564*c05adb06SRavi Kumar CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; 565*c05adb06SRavi Kumar 566*c05adb06SRavi Kumar cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; 567*c05adb06SRavi Kumar 568*c05adb06SRavi Kumar rte_wmb(); 569*c05adb06SRavi Kumar 570*c05adb06SRavi Kumar /* Write the new tail address back to the queue register */ 571*c05adb06SRavi Kumar tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); 572*c05adb06SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); 573*c05adb06SRavi Kumar /* Turn the queue back on using our cached control register */ 574*c05adb06SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, 575*c05adb06SRavi Kumar cmd_q->qcontrol | CMD_Q_RUN); 576*c05adb06SRavi Kumar 577*c05adb06SRavi Kumar op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 578*c05adb06SRavi Kumar return 0; 579*c05adb06SRavi Kumar } 580*c05adb06SRavi Kumar 58170f0f8a8SRavi Kumar static inline int 58270f0f8a8SRavi Kumar ccp_crypto_cipher(struct rte_crypto_op *op, 583d9a9e561SRavi Kumar struct ccp_queue *cmd_q, 584d9a9e561SRavi Kumar struct ccp_batch_info *b_info) 58570f0f8a8SRavi Kumar { 58670f0f8a8SRavi Kumar int result = 0; 58770f0f8a8SRavi Kumar struct ccp_session *session; 58870f0f8a8SRavi Kumar 58970f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 59070f0f8a8SRavi Kumar op->sym->session, 59170f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 59270f0f8a8SRavi Kumar 59370f0f8a8SRavi Kumar switch (session->cipher.algo) { 594d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_CBC: 595d9a9e561SRavi Kumar result = ccp_perform_aes(op, cmd_q, b_info); 596d9a9e561SRavi Kumar b_info->desccnt += 2; 597d9a9e561SRavi Kumar break; 598d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_CTR: 599d9a9e561SRavi Kumar result = ccp_perform_aes(op, cmd_q, b_info); 600d9a9e561SRavi Kumar b_info->desccnt += 2; 601d9a9e561SRavi Kumar break; 602d9a9e561SRavi Kumar case CCP_CIPHER_ALGO_AES_ECB: 603d9a9e561SRavi Kumar result = ccp_perform_aes(op, cmd_q, b_info); 604d9a9e561SRavi Kumar b_info->desccnt += 1; 605d9a9e561SRavi Kumar break; 606*c05adb06SRavi Kumar case CCP_CIPHER_ALGO_3DES_CBC: 607*c05adb06SRavi Kumar result = ccp_perform_3des(op, cmd_q, b_info); 608*c05adb06SRavi Kumar b_info->desccnt += 2; 609*c05adb06SRavi Kumar break; 61070f0f8a8SRavi Kumar default: 61170f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported cipher algo %d", 61270f0f8a8SRavi Kumar session->cipher.algo); 61370f0f8a8SRavi Kumar return -ENOTSUP; 61470f0f8a8SRavi Kumar } 61570f0f8a8SRavi Kumar return result; 61670f0f8a8SRavi Kumar } 61770f0f8a8SRavi Kumar 61870f0f8a8SRavi Kumar static inline int 61970f0f8a8SRavi Kumar ccp_crypto_auth(struct rte_crypto_op *op, 62070f0f8a8SRavi Kumar struct ccp_queue *cmd_q __rte_unused, 62170f0f8a8SRavi Kumar struct ccp_batch_info *b_info __rte_unused) 62270f0f8a8SRavi Kumar { 62370f0f8a8SRavi Kumar 62470f0f8a8SRavi Kumar int result = 0; 62570f0f8a8SRavi Kumar struct ccp_session *session; 62670f0f8a8SRavi Kumar 62770f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 62870f0f8a8SRavi Kumar op->sym->session, 62970f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 63070f0f8a8SRavi Kumar 63170f0f8a8SRavi Kumar switch (session->auth.algo) { 63270f0f8a8SRavi Kumar default: 63370f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported auth algo %d", 63470f0f8a8SRavi Kumar session->auth.algo); 63570f0f8a8SRavi Kumar return -ENOTSUP; 63670f0f8a8SRavi Kumar } 63770f0f8a8SRavi Kumar 63870f0f8a8SRavi Kumar return result; 63970f0f8a8SRavi Kumar } 64070f0f8a8SRavi Kumar 64170f0f8a8SRavi Kumar static inline int 64270f0f8a8SRavi Kumar ccp_crypto_aead(struct rte_crypto_op *op, 64370f0f8a8SRavi Kumar struct ccp_queue *cmd_q __rte_unused, 64470f0f8a8SRavi Kumar struct ccp_batch_info *b_info __rte_unused) 64570f0f8a8SRavi Kumar { 64670f0f8a8SRavi Kumar int result = 0; 64770f0f8a8SRavi Kumar struct ccp_session *session; 64870f0f8a8SRavi Kumar 64970f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 65070f0f8a8SRavi Kumar op->sym->session, 65170f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 65270f0f8a8SRavi Kumar 65370f0f8a8SRavi Kumar switch (session->aead_algo) { 65470f0f8a8SRavi Kumar default: 65570f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported aead algo %d", 65670f0f8a8SRavi Kumar session->aead_algo); 65770f0f8a8SRavi Kumar return -ENOTSUP; 65870f0f8a8SRavi Kumar } 65970f0f8a8SRavi Kumar return result; 66070f0f8a8SRavi Kumar } 66170f0f8a8SRavi Kumar 66270f0f8a8SRavi Kumar int 66370f0f8a8SRavi Kumar process_ops_to_enqueue(const struct ccp_qp *qp, 66470f0f8a8SRavi Kumar struct rte_crypto_op **op, 66570f0f8a8SRavi Kumar struct ccp_queue *cmd_q, 66670f0f8a8SRavi Kumar uint16_t nb_ops, 66770f0f8a8SRavi Kumar int slots_req) 66870f0f8a8SRavi Kumar { 66970f0f8a8SRavi Kumar int i, result = 0; 67070f0f8a8SRavi Kumar struct ccp_batch_info *b_info; 67170f0f8a8SRavi Kumar struct ccp_session *session; 67270f0f8a8SRavi Kumar 67370f0f8a8SRavi Kumar if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) { 67470f0f8a8SRavi Kumar CCP_LOG_ERR("batch info allocation failed"); 67570f0f8a8SRavi Kumar return 0; 67670f0f8a8SRavi Kumar } 67770f0f8a8SRavi Kumar /* populate batch info necessary for dequeue */ 67870f0f8a8SRavi Kumar b_info->op_idx = 0; 67970f0f8a8SRavi Kumar b_info->lsb_buf_idx = 0; 68070f0f8a8SRavi Kumar b_info->desccnt = 0; 68170f0f8a8SRavi Kumar b_info->cmd_q = cmd_q; 68270f0f8a8SRavi Kumar b_info->lsb_buf_phys = 68370f0f8a8SRavi Kumar (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); 68470f0f8a8SRavi Kumar rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); 68570f0f8a8SRavi Kumar 68670f0f8a8SRavi Kumar b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * 68770f0f8a8SRavi Kumar Q_DESC_SIZE); 68870f0f8a8SRavi Kumar for (i = 0; i < nb_ops; i++) { 68970f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 69070f0f8a8SRavi Kumar op[i]->sym->session, 69170f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 69270f0f8a8SRavi Kumar switch (session->cmd_id) { 69370f0f8a8SRavi Kumar case CCP_CMD_CIPHER: 69470f0f8a8SRavi Kumar result = ccp_crypto_cipher(op[i], cmd_q, b_info); 69570f0f8a8SRavi Kumar break; 69670f0f8a8SRavi Kumar case CCP_CMD_AUTH: 69770f0f8a8SRavi Kumar result = ccp_crypto_auth(op[i], cmd_q, b_info); 69870f0f8a8SRavi Kumar break; 69970f0f8a8SRavi Kumar case CCP_CMD_CIPHER_HASH: 70070f0f8a8SRavi Kumar result = ccp_crypto_cipher(op[i], cmd_q, b_info); 70170f0f8a8SRavi Kumar if (result) 70270f0f8a8SRavi Kumar break; 70370f0f8a8SRavi Kumar result = ccp_crypto_auth(op[i], cmd_q, b_info); 70470f0f8a8SRavi Kumar break; 70570f0f8a8SRavi Kumar case CCP_CMD_HASH_CIPHER: 70670f0f8a8SRavi Kumar result = ccp_crypto_auth(op[i], cmd_q, b_info); 70770f0f8a8SRavi Kumar if (result) 70870f0f8a8SRavi Kumar break; 70970f0f8a8SRavi Kumar result = ccp_crypto_cipher(op[i], cmd_q, b_info); 71070f0f8a8SRavi Kumar break; 71170f0f8a8SRavi Kumar case CCP_CMD_COMBINED: 71270f0f8a8SRavi Kumar result = ccp_crypto_aead(op[i], cmd_q, b_info); 71370f0f8a8SRavi Kumar break; 71470f0f8a8SRavi Kumar default: 71570f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported cmd_id"); 71670f0f8a8SRavi Kumar result = -1; 71770f0f8a8SRavi Kumar } 71870f0f8a8SRavi Kumar if (unlikely(result < 0)) { 71970f0f8a8SRavi Kumar rte_atomic64_add(&b_info->cmd_q->free_slots, 72070f0f8a8SRavi Kumar (slots_req - b_info->desccnt)); 72170f0f8a8SRavi Kumar break; 72270f0f8a8SRavi Kumar } 72370f0f8a8SRavi Kumar b_info->op[i] = op[i]; 72470f0f8a8SRavi Kumar } 72570f0f8a8SRavi Kumar 72670f0f8a8SRavi Kumar b_info->opcnt = i; 72770f0f8a8SRavi Kumar b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * 72870f0f8a8SRavi Kumar Q_DESC_SIZE); 72970f0f8a8SRavi Kumar 73070f0f8a8SRavi Kumar rte_wmb(); 73170f0f8a8SRavi Kumar /* Write the new tail address back to the queue register */ 73270f0f8a8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, 73370f0f8a8SRavi Kumar b_info->tail_offset); 73470f0f8a8SRavi Kumar /* Turn the queue back on using our cached control register */ 73570f0f8a8SRavi Kumar CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, 73670f0f8a8SRavi Kumar cmd_q->qcontrol | CMD_Q_RUN); 73770f0f8a8SRavi Kumar 73870f0f8a8SRavi Kumar rte_ring_enqueue(qp->processed_pkts, (void *)b_info); 73970f0f8a8SRavi Kumar 74070f0f8a8SRavi Kumar return i; 74170f0f8a8SRavi Kumar } 74270f0f8a8SRavi Kumar 74370f0f8a8SRavi Kumar static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) 74470f0f8a8SRavi Kumar { 74570f0f8a8SRavi Kumar struct ccp_session *session; 74670f0f8a8SRavi Kumar uint8_t *digest_data, *addr; 74770f0f8a8SRavi Kumar struct rte_mbuf *m_last; 74870f0f8a8SRavi Kumar int offset, digest_offset; 74970f0f8a8SRavi Kumar uint8_t digest_le[64]; 75070f0f8a8SRavi Kumar 75170f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 75270f0f8a8SRavi Kumar op->sym->session, 75370f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 75470f0f8a8SRavi Kumar 75570f0f8a8SRavi Kumar if (session->cmd_id == CCP_CMD_COMBINED) { 75670f0f8a8SRavi Kumar digest_data = op->sym->aead.digest.data; 75770f0f8a8SRavi Kumar digest_offset = op->sym->aead.data.offset + 75870f0f8a8SRavi Kumar op->sym->aead.data.length; 75970f0f8a8SRavi Kumar } else { 76070f0f8a8SRavi Kumar digest_data = op->sym->auth.digest.data; 76170f0f8a8SRavi Kumar digest_offset = op->sym->auth.data.offset + 76270f0f8a8SRavi Kumar op->sym->auth.data.length; 76370f0f8a8SRavi Kumar } 76470f0f8a8SRavi Kumar m_last = rte_pktmbuf_lastseg(op->sym->m_src); 76570f0f8a8SRavi Kumar addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off + 76670f0f8a8SRavi Kumar m_last->data_len - session->auth.ctx_len); 76770f0f8a8SRavi Kumar 76870f0f8a8SRavi Kumar rte_mb(); 76970f0f8a8SRavi Kumar offset = session->auth.offset; 77070f0f8a8SRavi Kumar 77170f0f8a8SRavi Kumar if (session->auth.engine == CCP_ENGINE_SHA) 77270f0f8a8SRavi Kumar if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) && 77370f0f8a8SRavi Kumar (session->auth.ut.sha_type != CCP_SHA_TYPE_224) && 77470f0f8a8SRavi Kumar (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) { 77570f0f8a8SRavi Kumar /* All other algorithms require byte 77670f0f8a8SRavi Kumar * swap done by host 77770f0f8a8SRavi Kumar */ 77870f0f8a8SRavi Kumar unsigned int i; 77970f0f8a8SRavi Kumar 78070f0f8a8SRavi Kumar offset = session->auth.ctx_len - 78170f0f8a8SRavi Kumar session->auth.offset - 1; 78270f0f8a8SRavi Kumar for (i = 0; i < session->auth.digest_length; i++) 78370f0f8a8SRavi Kumar digest_le[i] = addr[offset - i]; 78470f0f8a8SRavi Kumar offset = 0; 78570f0f8a8SRavi Kumar addr = digest_le; 78670f0f8a8SRavi Kumar } 78770f0f8a8SRavi Kumar 78870f0f8a8SRavi Kumar op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 78970f0f8a8SRavi Kumar if (session->auth.op == CCP_AUTH_OP_VERIFY) { 79070f0f8a8SRavi Kumar if (memcmp(addr + offset, digest_data, 79170f0f8a8SRavi Kumar session->auth.digest_length) != 0) 79270f0f8a8SRavi Kumar op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 79370f0f8a8SRavi Kumar 79470f0f8a8SRavi Kumar } else { 79570f0f8a8SRavi Kumar if (unlikely(digest_data == 0)) 79670f0f8a8SRavi Kumar digest_data = rte_pktmbuf_mtod_offset( 79770f0f8a8SRavi Kumar op->sym->m_dst, uint8_t *, 79870f0f8a8SRavi Kumar digest_offset); 79970f0f8a8SRavi Kumar rte_memcpy(digest_data, addr + offset, 80070f0f8a8SRavi Kumar session->auth.digest_length); 80170f0f8a8SRavi Kumar } 80270f0f8a8SRavi Kumar /* Trim area used for digest from mbuf. */ 80370f0f8a8SRavi Kumar rte_pktmbuf_trim(op->sym->m_src, 80470f0f8a8SRavi Kumar session->auth.ctx_len); 80570f0f8a8SRavi Kumar } 80670f0f8a8SRavi Kumar 80770f0f8a8SRavi Kumar static int 80870f0f8a8SRavi Kumar ccp_prepare_ops(struct rte_crypto_op **op_d, 80970f0f8a8SRavi Kumar struct ccp_batch_info *b_info, 81070f0f8a8SRavi Kumar uint16_t nb_ops) 81170f0f8a8SRavi Kumar { 81270f0f8a8SRavi Kumar int i, min_ops; 81370f0f8a8SRavi Kumar struct ccp_session *session; 81470f0f8a8SRavi Kumar 81570f0f8a8SRavi Kumar min_ops = RTE_MIN(nb_ops, b_info->opcnt); 81670f0f8a8SRavi Kumar 81770f0f8a8SRavi Kumar for (i = 0; i < min_ops; i++) { 81870f0f8a8SRavi Kumar op_d[i] = b_info->op[b_info->op_idx++]; 81970f0f8a8SRavi Kumar session = (struct ccp_session *)get_session_private_data( 82070f0f8a8SRavi Kumar op_d[i]->sym->session, 82170f0f8a8SRavi Kumar ccp_cryptodev_driver_id); 82270f0f8a8SRavi Kumar switch (session->cmd_id) { 82370f0f8a8SRavi Kumar case CCP_CMD_CIPHER: 82470f0f8a8SRavi Kumar op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 82570f0f8a8SRavi Kumar break; 82670f0f8a8SRavi Kumar case CCP_CMD_AUTH: 82770f0f8a8SRavi Kumar case CCP_CMD_CIPHER_HASH: 82870f0f8a8SRavi Kumar case CCP_CMD_HASH_CIPHER: 82970f0f8a8SRavi Kumar case CCP_CMD_COMBINED: 83070f0f8a8SRavi Kumar ccp_auth_dq_prepare(op_d[i]); 83170f0f8a8SRavi Kumar break; 83270f0f8a8SRavi Kumar default: 83370f0f8a8SRavi Kumar CCP_LOG_ERR("Unsupported cmd_id"); 83470f0f8a8SRavi Kumar } 83570f0f8a8SRavi Kumar } 83670f0f8a8SRavi Kumar 83770f0f8a8SRavi Kumar b_info->opcnt -= min_ops; 83870f0f8a8SRavi Kumar return min_ops; 83970f0f8a8SRavi Kumar } 84070f0f8a8SRavi Kumar 84170f0f8a8SRavi Kumar int 84270f0f8a8SRavi Kumar process_ops_to_dequeue(struct ccp_qp *qp, 84370f0f8a8SRavi Kumar struct rte_crypto_op **op, 84470f0f8a8SRavi Kumar uint16_t nb_ops) 84570f0f8a8SRavi Kumar { 84670f0f8a8SRavi Kumar struct ccp_batch_info *b_info; 84770f0f8a8SRavi Kumar uint32_t cur_head_offset; 84870f0f8a8SRavi Kumar 84970f0f8a8SRavi Kumar if (qp->b_info != NULL) { 85070f0f8a8SRavi Kumar b_info = qp->b_info; 85170f0f8a8SRavi Kumar if (unlikely(b_info->op_idx > 0)) 85270f0f8a8SRavi Kumar goto success; 85370f0f8a8SRavi Kumar } else if (rte_ring_dequeue(qp->processed_pkts, 85470f0f8a8SRavi Kumar (void **)&b_info)) 85570f0f8a8SRavi Kumar return 0; 85670f0f8a8SRavi Kumar cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base, 85770f0f8a8SRavi Kumar CMD_Q_HEAD_LO_BASE); 85870f0f8a8SRavi Kumar 85970f0f8a8SRavi Kumar if (b_info->head_offset < b_info->tail_offset) { 86070f0f8a8SRavi Kumar if ((cur_head_offset >= b_info->head_offset) && 86170f0f8a8SRavi Kumar (cur_head_offset < b_info->tail_offset)) { 86270f0f8a8SRavi Kumar qp->b_info = b_info; 86370f0f8a8SRavi Kumar return 0; 86470f0f8a8SRavi Kumar } 86570f0f8a8SRavi Kumar } else { 86670f0f8a8SRavi Kumar if ((cur_head_offset >= b_info->head_offset) || 86770f0f8a8SRavi Kumar (cur_head_offset < b_info->tail_offset)) { 86870f0f8a8SRavi Kumar qp->b_info = b_info; 86970f0f8a8SRavi Kumar return 0; 87070f0f8a8SRavi Kumar } 87170f0f8a8SRavi Kumar } 87270f0f8a8SRavi Kumar 87370f0f8a8SRavi Kumar 87470f0f8a8SRavi Kumar success: 87570f0f8a8SRavi Kumar nb_ops = ccp_prepare_ops(op, b_info, nb_ops); 87670f0f8a8SRavi Kumar rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt); 87770f0f8a8SRavi Kumar b_info->desccnt = 0; 87870f0f8a8SRavi Kumar if (b_info->opcnt > 0) { 87970f0f8a8SRavi Kumar qp->b_info = b_info; 88070f0f8a8SRavi Kumar } else { 88170f0f8a8SRavi Kumar rte_mempool_put(qp->batch_mp, (void *)b_info); 88270f0f8a8SRavi Kumar qp->b_info = NULL; 88370f0f8a8SRavi Kumar } 88470f0f8a8SRavi Kumar 88570f0f8a8SRavi Kumar return nb_ops; 88670f0f8a8SRavi Kumar } 887