1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2024 Intel Corporation
3 */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12
13 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
14 QAT_SYM_AEAD_CAP(AES_GCM,
15 CAP_SET(block_size, 16),
16 CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
17 CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
18 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
19 };
20
21 static int
qat_sgl_add_buffer_gen_lce(void * list_in,uint64_t addr,uint32_t len)22 qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
23 {
24 struct qat_sgl *list = (struct qat_sgl *)list_in;
25 uint32_t nr;
26
27 nr = list->num_bufs;
28
29 if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
30 QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
31 return -EINVAL;
32 }
33
34 list->buffers[nr].len = len;
35 list->buffers[nr].resrvd = 0;
36 list->buffers[nr].addr = addr;
37
38 list->num_bufs++;
39 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40 QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
41 QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
42 nr, list->buffers[nr].len, list->buffers[nr].addr);
43 #endif
44 return 0;
45 }
46
47 static int
qat_sgl_fill_array_with_mbuf(struct rte_mbuf * buf,int64_t offset,void * list_in,uint32_t data_len)48 qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
49 void *list_in, uint32_t data_len)
50 {
51 struct qat_sgl *list = (struct qat_sgl *)list_in;
52 uint32_t nr, buf_len;
53 int res = -EINVAL;
54 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
55 uint32_t start_idx = list->num_bufs;
56 #endif
57
58 /* Append to the existing list */
59 nr = list->num_bufs;
60
61 for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
62 if (offset >= rte_pktmbuf_data_len(buf)) {
63 offset -= rte_pktmbuf_data_len(buf);
64 /* Jump to next mbuf */
65 continue;
66 }
67
68 list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
69 list->buffers[nr].resrvd = 0;
70 list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
71
72 offset = 0;
73 buf_len += list->buffers[nr].len;
74
75 if (buf_len >= data_len) {
76 list->buffers[nr].len -= buf_len - data_len;
77 res = 0;
78 break;
79 }
80 ++nr;
81 }
82
83 if (unlikely(res != 0)) {
84 if (nr == QAT_SYM_SGL_MAX_NUMBER)
85 QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
86 QAT_SYM_SGL_MAX_NUMBER);
87 else
88 QAT_DP_LOG(ERR, "Mbuf chain is too short");
89 } else {
90
91 list->num_bufs = ++nr;
92 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
93 QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
94 for (nr = start_idx; nr < list->num_bufs; nr++) {
95 QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
96 nr, list->buffers[nr].len,
97 list->buffers[nr].addr);
98 }
99 #endif
100 }
101
102 return res;
103 }
104
105 static int
qat_sym_build_op_aead_gen_lce(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)106 qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
107 uint8_t *out_msg, void *op_cookie)
108 {
109 struct qat_sym_op_cookie *cookie = op_cookie;
110 struct rte_crypto_op *op = in_op;
111 uint64_t digest_phys_addr, aad_phys_addr;
112 uint16_t iv_len, aad_len, digest_len, key_len;
113 uint32_t cipher_ofs, iv_offset, cipher_len;
114 register struct icp_qat_fw_la_bulk_req *qat_req;
115 struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
116 enum icp_qat_hw_cipher_dir dir;
117 bool is_digest_adjacent = false;
118
119 if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
120 ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
121 ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
122
123 QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
124 "GEN_LCE PMD only supports AES-256 AEAD mode",
125 ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
126 return -EINVAL;
127 }
128
129 qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
130 rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
131 qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
132 cipher_param = (void *)&qat_req->serv_specif_rqpars;
133
134 dir = ctx->qat_dir;
135
136 aad_phys_addr = op->sym->aead.aad.phys_addr;
137 aad_len = ctx->aad_len;
138
139 iv_offset = ctx->cipher_iv.offset;
140 iv_len = ctx->cipher_iv.length;
141
142 cipher_ofs = op->sym->aead.data.offset;
143 cipher_len = op->sym->aead.data.length;
144
145 digest_phys_addr = op->sym->aead.digest.phys_addr;
146 digest_len = ctx->digest_length;
147
148 /* Up to 16B IV can be directly embedded in descriptor.
149 * GCM supports only 12B IV for GEN LCE
150 */
151 if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
152 QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.", iv_len);
153 return -EINVAL;
154 }
155
156 rte_memcpy(cipher_param->u.cipher_IV_array,
157 rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset), iv_len);
158
159 /* Always SGL */
160 RTE_ASSERT((qat_req->comn_hdr.comn_req_flags & ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
161 /* Always inplace */
162 RTE_ASSERT(op->sym->m_dst == NULL);
163
164 /* Key buffer address is already programmed by reusing the
165 * content-descriptor buffer
166 */
167 key_len = ctx->auth_key_length;
168
169 cipher_param->spc_aad_sz = aad_len;
170 cipher_param->cipher_length = key_len;
171 cipher_param->spc_auth_res_sz = digest_len;
172
173 /* Knowing digest is contiguous to cipher-text helps optimizing SGL */
174 if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len) == digest_phys_addr)
175 is_digest_adjacent = true;
176
177 /* SRC-SGL: 3 entries:
178 * a) AAD
179 * b) cipher
180 * c) digest (only for decrypt and buffer is_NOT_adjacent)
181 *
182 */
183 cookie->qat_sgl_src.num_bufs = 0;
184 if (aad_len)
185 qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr, aad_len);
186
187 if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
188 qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
189 cipher_len + digest_len);
190 } else {
191 qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
192 cipher_len);
193
194 /* Digest buffer in decrypt job */
195 if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
196 qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
197 digest_phys_addr, digest_len);
198 }
199
200 /* (in-place) DST-SGL: 2 entries:
201 * a) cipher
202 * b) digest (only for encrypt and buffer is_NOT_adjacent)
203 */
204 cookie->qat_sgl_dst.num_bufs = 0;
205
206 if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
207 qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
208 cipher_len + digest_len);
209 } else {
210 qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
211 cipher_len);
212
213 /* Digest buffer in Encrypt job */
214 if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
215 qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
216 digest_phys_addr, digest_len);
217 }
218
219 /* Length values in 128B descriptor */
220 qat_req->comn_mid.src_length = cipher_len;
221 qat_req->comn_mid.dst_length = cipher_len;
222
223 if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
224 qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
225
226 /* src & dst SGL addresses in 128B descriptor */
227 qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
228 qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
229
230 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
231 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));
232 QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
233 rte_pktmbuf_data_len(op->sym->m_src));
234 QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, digest_len);
235 QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
236 #endif
237 return 0;
238 }
239
240 static int
qat_sym_crypto_set_session_gen_lce(void * cdev __rte_unused,void * session)241 qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
242 {
243 struct qat_sym_session *ctx = session;
244 qat_sym_build_request_t build_request = NULL;
245 enum rte_proc_type_t proc_type = rte_eal_process_type();
246
247 if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
248 return -EINVAL;
249
250 /* build request for aead */
251 if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
252 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
253 build_request = qat_sym_build_op_aead_gen_lce;
254 ctx->build_request[proc_type] = build_request;
255 }
256 return 0;
257 }
258
259
260 static int
qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t __rte_unused slice_map)261 qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
262 const char *capa_memz_name,
263 const uint16_t __rte_unused slice_map)
264 {
265 const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
266 uint32_t i;
267
268 internals->capa_mz = rte_memzone_lookup(capa_memz_name);
269 if (internals->capa_mz == NULL) {
270 internals->capa_mz = rte_memzone_reserve(capa_memz_name, size, rte_socket_id(), 0);
271 if (internals->capa_mz == NULL) {
272 QAT_LOG(DEBUG, "Error allocating memzone for capabilities");
273 return -1;
274 }
275 }
276
277 struct rte_cryptodev_capabilities *addr =
278 (struct rte_cryptodev_capabilities *)
279 internals->capa_mz->addr;
280 const struct rte_cryptodev_capabilities *capabilities =
281 qat_sym_crypto_caps_gen_lce;
282 const uint32_t capa_num = size / sizeof(struct rte_cryptodev_capabilities);
283 uint32_t curr_capa = 0;
284
285 for (i = 0; i < capa_num; i++) {
286 memcpy(addr + curr_capa, capabilities + i,
287 sizeof(struct rte_cryptodev_capabilities));
288 curr_capa++;
289 }
290 internals->qat_dev_capabilities = internals->capa_mz->addr;
291
292 return 0;
293 }
294
RTE_INIT(qat_sym_crypto_gen_lce_init)295 RTE_INIT(qat_sym_crypto_gen_lce_init)
296 {
297 qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
298 qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = qat_sym_crypto_cap_get_gen_lce;
299 qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session = qat_sym_crypto_set_session_gen_lce;
300 qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
301 qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1;
302 }
303
RTE_INIT(qat_asym_crypto_gen_lce_init)304 RTE_INIT(qat_asym_crypto_gen_lce_init)
305 {
306 qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
307 qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
308 qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
309 qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
310 }
311