xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 
14 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
15 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
16 		CAP_SET(block_size, 64),
17 		CAP_RNG(digest_size, 1, 20, 1)),
18 	QAT_SYM_AUTH_CAP(SHA224,
19 		CAP_SET(block_size, 64),
20 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
21 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
22 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
23 		CAP_SET(block_size, 64),
24 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
25 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
26 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
27 		CAP_SET(block_size, 64),
28 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
29 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
30 	QAT_SYM_CIPHER_CAP(SM4_ECB,
31 		CAP_SET(block_size, 16),
32 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
33 };
34 
35 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
36 	QAT_SYM_CIPHER_CAP(AES_CBC,
37 		CAP_SET(block_size, 16),
38 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
39 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
44 		CAP_SET(block_size, 128),
45 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
46 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
47 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
48 		CAP_SET(block_size, 128),
49 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
50 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
51 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
52 		CAP_SET(block_size, 16),
53 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
54 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
55 	QAT_SYM_AUTH_CAP(AES_CMAC,
56 		CAP_SET(block_size, 16),
57 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
58 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
59 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
60 		CAP_SET(block_size, 16),
61 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
62 	QAT_SYM_AUTH_CAP(NULL,
63 		CAP_SET(block_size, 1),
64 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
65 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
66 	QAT_SYM_CIPHER_CAP(NULL,
67 		CAP_SET(block_size, 1),
68 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
69 	QAT_SYM_AUTH_CAP(SHA256,
70 		CAP_SET(block_size, 64),
71 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
72 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
73 	QAT_SYM_AUTH_CAP(SHA384,
74 		CAP_SET(block_size, 128),
75 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
76 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
77 	QAT_SYM_AUTH_CAP(SHA512,
78 		CAP_SET(block_size, 128),
79 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
80 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
81 	QAT_SYM_CIPHER_CAP(AES_CTR,
82 		CAP_SET(block_size, 16),
83 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
84 	QAT_SYM_AEAD_CAP(AES_GCM,
85 		CAP_SET(block_size, 16),
86 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
87 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
88 	QAT_SYM_AEAD_CAP(AES_CCM,
89 		CAP_SET(block_size, 16),
90 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
91 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
92 	QAT_SYM_AUTH_CAP(AES_GMAC,
93 		CAP_SET(block_size, 16),
94 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
95 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
96 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
97 		CAP_SET(block_size, 64),
98 		CAP_RNG(key_size, 32, 32, 0),
99 		CAP_RNG(digest_size, 16, 16, 0),
100 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
101 	QAT_SYM_CIPHER_CAP(SM4_CBC,
102 		CAP_SET(block_size, 16),
103 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
104 	QAT_SYM_CIPHER_CAP(SM4_CTR,
105 		CAP_SET(block_size, 16),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
107 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
108 		CAP_SET(block_size, 64),
109 		CAP_RNG(digest_size, 32, 32, 0)),
110 	QAT_SYM_AUTH_CAP(SM3_HMAC,
111 		CAP_SET(block_size, 64),
112 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
113 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
114 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
115 };
116 
117 static int
118 qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
119 			const char *capa_memz_name,
120 			const uint16_t __rte_unused slice_map)
121 {
122 	uint32_t legacy_capa_num;
123 	uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
124 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen4);
125 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
126 
127 	if (unlikely(qat_legacy_capa))
128 		size = size + legacy_size;
129 
130 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
131 	if (internals->capa_mz == NULL) {
132 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
133 				size, rte_socket_id(), 0);
134 		if (internals->capa_mz == NULL) {
135 			QAT_LOG(DEBUG,
136 				"Error allocating memzone for capabilities");
137 			return -1;
138 		}
139 	}
140 
141 	struct rte_cryptodev_capabilities *addr =
142 			(struct rte_cryptodev_capabilities *)
143 				internals->capa_mz->addr;
144 
145 	struct rte_cryptodev_capabilities *capabilities;
146 
147 	if (unlikely(qat_legacy_capa)) {
148 		capabilities = qat_sym_crypto_legacy_caps_gen4;
149 		memcpy(addr, capabilities, legacy_size);
150 		addr += legacy_capa_num;
151 	}
152 	capabilities = qat_sym_crypto_caps_gen4;
153 	memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen4));
154 	internals->qat_dev_capabilities = internals->capa_mz->addr;
155 
156 	return 0;
157 }
158 
159 static __rte_always_inline void
160 enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
161 	struct icp_qat_fw_la_bulk_req *req,
162 	struct rte_crypto_va_iova_ptr *iv,
163 	struct rte_crypto_va_iova_ptr *digest,
164 	struct rte_crypto_va_iova_ptr *aad,
165 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
166 {
167 	if (ctx->is_single_pass && ctx->is_ucs) {
168 		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
169 			(void *)&req->serv_specif_rqpars;
170 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
171 			(void *)&req->serv_specif_rqpars;
172 
173 		/* QAT GEN4 uses single pass to treat AEAD as cipher
174 		 * operation
175 		 */
176 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
177 				req);
178 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
179 		cipher_param->cipher_length = data_len -
180 				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
181 
182 		cipher_param_20->spc_aad_addr = aad->iova;
183 		cipher_param_20->spc_auth_res_addr = digest->iova;
184 
185 		return;
186 	}
187 
188 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
189 }
190 
191 static int
192 qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
193 		uint8_t *out_msg, void *op_cookie)
194 {
195 	register struct icp_qat_fw_la_bulk_req *qat_req;
196 	struct rte_crypto_op *op = in_op;
197 	struct qat_sym_op_cookie *cookie = op_cookie;
198 	struct rte_crypto_sgl in_sgl, out_sgl;
199 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
200 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
201 	struct rte_crypto_va_iova_ptr cipher_iv;
202 	struct rte_crypto_va_iova_ptr aad;
203 	struct rte_crypto_va_iova_ptr digest;
204 	union rte_crypto_sym_ofs ofs;
205 	int32_t total_len;
206 
207 	in_sgl.vec = in_vec;
208 	out_sgl.vec = out_vec;
209 
210 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
211 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
212 
213 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
214 			&cipher_iv, &aad, &digest);
215 	if (unlikely(ofs.raw == UINT64_MAX)) {
216 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
217 		return -EINVAL;
218 	}
219 
220 	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
221 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
222 	if (unlikely(total_len < 0)) {
223 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
224 		return -EINVAL;
225 	}
226 
227 	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
228 		total_len);
229 
230 	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
231 			NULL, &aad, &digest);
232 
233 	return 0;
234 }
235 
236 int
237 qat_sym_crypto_set_session_gen4(void *cdev, void *session)
238 {
239 	struct qat_sym_session *ctx = session;
240 	enum rte_proc_type_t proc_type = rte_eal_process_type();
241 	int ret;
242 
243 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
244 		return -EINVAL;
245 
246 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
247 	/* special single pass build request for GEN4 */
248 	if (ctx->is_single_pass && ctx->is_ucs)
249 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
250 
251 	if (ret == -ENOTSUP) {
252 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
253 		 * this is addressed by GEN4
254 		 */
255 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
256 				ctx->qat_cipher_alg !=
257 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
258 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
259 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
260 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
261 				ctx->qat_cipher_alg !=
262 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
263 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
264 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
265 		} else if ((ctx->aes_cmac ||
266 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
267 				(ctx->qat_cipher_alg ==
268 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
269 				ctx->qat_cipher_alg ==
270 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
271 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
272 		}
273 
274 		ret = 0;
275 	}
276 
277 	return ret;
278 }
279 
280 static int
281 qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
282 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
283 	union rte_crypto_sym_ofs ofs,
284 	struct rte_crypto_va_iova_ptr *iv,
285 	struct rte_crypto_va_iova_ptr *digest,
286 	struct rte_crypto_va_iova_ptr *aad,
287 	void *user_data)
288 {
289 	struct qat_qp *qp = qp_data;
290 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
291 	struct qat_queue *tx_queue = &qp->tx_q;
292 	struct qat_sym_op_cookie *cookie;
293 	struct qat_sym_session *ctx = dp_ctx->session;
294 	struct icp_qat_fw_la_bulk_req *req;
295 
296 	int32_t data_len;
297 	uint32_t tail = dp_ctx->tail;
298 
299 	req = (struct icp_qat_fw_la_bulk_req *)(
300 		(uint8_t *)tx_queue->base_addr + tail);
301 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
302 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
303 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
304 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
305 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
306 			data, n_data_vecs, NULL, 0);
307 	if (unlikely(data_len < 0))
308 		return -1;
309 
310 	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
311 		(uint32_t)data_len);
312 
313 	dp_ctx->tail = tail;
314 	dp_ctx->cached_enqueue++;
315 
316 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
317 			NULL, aad, digest);
318 
319 	return 0;
320 }
321 
322 static uint32_t
323 qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
324 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
325 	void *user_data[], int *status)
326 {
327 	struct qat_qp *qp = qp_data;
328 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
329 	struct qat_queue *tx_queue = &qp->tx_q;
330 	struct qat_sym_session *ctx = dp_ctx->session;
331 	uint32_t i, n;
332 	uint32_t tail;
333 	struct icp_qat_fw_la_bulk_req *req;
334 	int32_t data_len;
335 
336 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
337 	if (unlikely(n == 0)) {
338 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
339 		*status = 0;
340 		return 0;
341 	}
342 
343 	tail = dp_ctx->tail;
344 
345 	for (i = 0; i < n; i++) {
346 		struct qat_sym_op_cookie *cookie =
347 			qp->op_cookies[tail >> tx_queue->trailz];
348 
349 		req  = (struct icp_qat_fw_la_bulk_req *)(
350 			(uint8_t *)tx_queue->base_addr + tail);
351 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
352 
353 		if (vec->dest_sgl) {
354 			data_len = qat_sym_build_req_set_data(req,
355 				user_data[i], cookie,
356 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
357 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
358 		} else {
359 			data_len = qat_sym_build_req_set_data(req,
360 				user_data[i], cookie,
361 				vec->src_sgl[i].vec,
362 				vec->src_sgl[i].num, NULL, 0);
363 		}
364 
365 		if (unlikely(data_len < 0))
366 			break;
367 
368 		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
369 				&vec->digest[i], &vec->aad[i], ofs,
370 				(uint32_t)data_len);
371 
372 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
373 
374 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
375 				vec->src_sgl[i].num, &vec->iv[i], NULL,
376 				&vec->aad[i], &vec->digest[i]);
377 	}
378 
379 	if (unlikely(i < n))
380 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
381 
382 	dp_ctx->tail = tail;
383 	dp_ctx->cached_enqueue += i;
384 	*status = 0;
385 	return i;
386 }
387 
388 int
389 qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
390 {
391 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
392 	struct qat_sym_session *ctx = _ctx;
393 	int ret;
394 
395 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
396 	if (ret < 0)
397 		return ret;
398 
399 	if (ctx->is_single_pass && ctx->is_ucs) {
400 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
401 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
402 	}
403 
404 	return 0;
405 }
406 
407 RTE_INIT(qat_sym_crypto_gen4_init)
408 {
409 	qat_sym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
410 		qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
411 	qat_sym_gen_dev_ops[QAT_VQAT].get_capabilities =
412 		qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
413 			qat_sym_crypto_cap_get_gen4;
414 	qat_sym_gen_dev_ops[QAT_VQAT].set_session =
415 		qat_sym_gen_dev_ops[QAT_GEN4].set_session =
416 			qat_sym_crypto_set_session_gen4;
417 	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
418 			qat_sym_configure_raw_dp_ctx_gen4;
419 	qat_sym_gen_dev_ops[QAT_VQAT].get_feature_flags =
420 		qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
421 			qat_sym_crypto_feature_flags_get_gen1;
422 	qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
423 			qat_sym_create_security_gen1;
424 }
425 
426 RTE_INIT(qat_asym_crypto_gen4_init)
427 {
428 	qat_asym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
429 		qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
430 			&qat_asym_crypto_ops_gen1;
431 	qat_asym_gen_dev_ops[QAT_VQAT].get_capabilities =
432 		qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
433 			qat_asym_crypto_cap_get_gen1;
434 	qat_asym_gen_dev_ops[QAT_VQAT].get_feature_flags =
435 		qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
436 			qat_asym_crypto_feature_flags_get_gen1;
437 	qat_asym_gen_dev_ops[QAT_VQAT].set_session =
438 		qat_asym_gen_dev_ops[QAT_GEN4].set_session =
439 			qat_asym_crypto_set_session_gen1;
440 }
441