xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
14 
15 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
16 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
17 		CAP_SET(block_size, 64),
18 		CAP_RNG(digest_size, 1, 20, 1)),
19 	QAT_SYM_AEAD_CAP(AES_GCM,
20 		CAP_SET(block_size, 16),
21 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
22 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
23 	QAT_SYM_AEAD_CAP(AES_CCM,
24 		CAP_SET(block_size, 16),
25 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
26 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
27 	QAT_SYM_AUTH_CAP(AES_GMAC,
28 		CAP_SET(block_size, 16),
29 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
31 	QAT_SYM_AUTH_CAP(AES_CMAC,
32 		CAP_SET(block_size, 16),
33 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
34 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
35 	QAT_SYM_AUTH_CAP(SHA224,
36 		CAP_SET(block_size, 64),
37 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
38 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39 	QAT_SYM_AUTH_CAP(SHA256,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_AUTH_CAP(SHA384,
44 		CAP_SET(block_size, 128),
45 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
46 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
47 	QAT_SYM_AUTH_CAP(SHA512,
48 		CAP_SET(block_size, 128),
49 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
50 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
51 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
52 		CAP_SET(block_size, 136),
53 		CAP_RNG(digest_size, 32, 32, 0)),
54 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
55 		CAP_SET(block_size, 64),
56 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
57 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
58 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
59 		CAP_SET(block_size, 64),
60 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
61 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
62 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
63 		CAP_SET(block_size, 64),
64 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
65 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
66 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
67 		CAP_SET(block_size, 128),
68 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
69 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
70 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
71 		CAP_SET(block_size, 128),
72 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
73 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
74 	QAT_SYM_AUTH_CAP(MD5_HMAC,
75 		CAP_SET(block_size, 64),
76 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
77 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
78 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
79 		CAP_SET(block_size, 16),
80 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
81 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
82 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
83 		CAP_SET(block_size, 16),
84 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
85 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
86 	QAT_SYM_AUTH_CAP(KASUMI_F9,
87 		CAP_SET(block_size, 8),
88 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
89 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
90 	QAT_SYM_AUTH_CAP(NULL,
91 		CAP_SET(block_size, 1),
92 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
93 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
94 	QAT_SYM_CIPHER_CAP(AES_CBC,
95 		CAP_SET(block_size, 16),
96 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
97 	QAT_SYM_CIPHER_CAP(AES_CTR,
98 		CAP_SET(block_size, 16),
99 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
100 	QAT_SYM_CIPHER_CAP(AES_XTS,
101 		CAP_SET(block_size, 16),
102 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
103 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
104 		CAP_SET(block_size, 16),
105 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
106 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
107 		CAP_SET(block_size, 16),
108 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
109 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
110 		CAP_SET(block_size, 8),
111 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
112 	QAT_SYM_CIPHER_CAP(NULL,
113 		CAP_SET(block_size, 1),
114 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
115 	QAT_SYM_CIPHER_CAP(3DES_CBC,
116 		CAP_SET(block_size, 8),
117 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
118 	QAT_SYM_CIPHER_CAP(3DES_CTR,
119 		CAP_SET(block_size, 8),
120 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
121 	QAT_SYM_CIPHER_CAP(DES_CBC,
122 		CAP_SET(block_size, 8),
123 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
124 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
125 		CAP_SET(block_size, 8),
126 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
127 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
128 		CAP_SET(block_size, 16),
129 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
130 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
131 		CAP_SET(block_size, 16),
132 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
133 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
134 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
135 };
136 
137 static int
138 qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
139 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
140 {
141 	struct qat_cryptodev_private *qat_sym_private = dev->data->dev_private;
142 	struct qat_qp *qp;
143 	int ret;
144 
145 	if (qat_cryptodev_qp_setup(dev, qp_id, qp_conf, socket_id)) {
146 		QAT_LOG(DEBUG, "QAT qp setup failed");
147 		return -1;
148 	}
149 
150 	qp = qat_sym_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id];
151 	ret = qat_cq_get_fw_version(qp);
152 	if (ret < 0) {
153 		qat_cryptodev_qp_release(dev, qp_id);
154 		return ret;
155 	}
156 
157 	if (ret != 0)
158 		QAT_LOG(DEBUG, "QAT firmware version: %d.%d.%d",
159 				(ret >> 24) & 0xff,
160 				(ret >> 16) & 0xff,
161 				(ret >> 8) & 0xff);
162 	else
163 		QAT_LOG(DEBUG, "unknown QAT firmware version");
164 
165 	/* set capabilities based on the fw version */
166 	qat_sym_private->internal_capabilities = QAT_SYM_CAP_VALID |
167 			((ret >= MIXED_CRYPTO_MIN_FW_VER) ?
168 					QAT_SYM_CAP_MIXED_CRYPTO : 0);
169 	return 0;
170 }
171 
172 void
173 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
174 		uint8_t hash_flag)
175 {
176 	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
177 	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
178 			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
179 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
180 
181 	/* Set the Use Extended Protocol Flags bit in LW 1 */
182 	QAT_FIELD_SET(header->comn_req_flags,
183 			QAT_COMN_EXT_FLAGS_USED,
184 			QAT_COMN_EXT_FLAGS_BITPOS,
185 			QAT_COMN_EXT_FLAGS_MASK);
186 
187 	/* Set Hash Flags in LW 28 */
188 	cd_ctrl->hash_flags |= hash_flag;
189 
190 	/* Set proto flags in LW 1 */
191 	switch (session->qat_cipher_alg) {
192 	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
193 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
194 				ICP_QAT_FW_LA_SNOW_3G_PROTO);
195 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
196 				header->serv_specif_flags, 0);
197 		break;
198 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
199 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
200 				ICP_QAT_FW_LA_NO_PROTO);
201 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
202 				header->serv_specif_flags,
203 				ICP_QAT_FW_LA_ZUC_3G_PROTO);
204 		break;
205 	default:
206 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
207 				ICP_QAT_FW_LA_NO_PROTO);
208 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
209 				header->serv_specif_flags, 0);
210 		break;
211 	}
212 }
213 
214 static int
215 qat_sym_crypto_set_session_gen2(void *cdev, void *session)
216 {
217 	struct rte_cryptodev *dev = cdev;
218 	struct qat_sym_session *ctx = session;
219 	const struct qat_cryptodev_private *qat_private =
220 			dev->data->dev_private;
221 	int ret;
222 
223 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
224 	if (ret == -ENOTSUP) {
225 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
226 		 * but some are not supported by GEN2, so checking here
227 		 */
228 		if ((qat_private->internal_capabilities &
229 				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
230 			return -ENOTSUP;
231 
232 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
233 				ctx->qat_cipher_alg !=
234 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
235 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
236 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
237 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
238 				ctx->qat_cipher_alg !=
239 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
240 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
241 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
242 		} else if ((ctx->aes_cmac ||
243 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
244 				(ctx->qat_cipher_alg ==
245 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
246 				ctx->qat_cipher_alg ==
247 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
248 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
249 		}
250 
251 		ret = 0;
252 	}
253 
254 	return ret;
255 }
256 
257 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
258 
259 	/* Device related operations */
260 	.dev_configure		= qat_cryptodev_config,
261 	.dev_start		= qat_cryptodev_start,
262 	.dev_stop		= qat_cryptodev_stop,
263 	.dev_close		= qat_cryptodev_close,
264 	.dev_infos_get		= qat_cryptodev_info_get,
265 
266 	.stats_get		= qat_cryptodev_stats_get,
267 	.stats_reset		= qat_cryptodev_stats_reset,
268 	.queue_pair_setup	= qat_sym_crypto_qp_setup_gen2,
269 	.queue_pair_release	= qat_cryptodev_qp_release,
270 
271 	/* Crypto related operations */
272 	.sym_session_get_size	= qat_sym_session_get_private_size,
273 	.sym_session_configure	= qat_sym_session_configure,
274 	.sym_session_clear	= qat_sym_session_clear,
275 
276 	/* Raw data-path API related operations */
277 	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
278 	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
279 };
280 
281 static int
282 qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private *internals,
283 			const char *capa_memz_name,
284 			const uint16_t __rte_unused slice_map)
285 {
286 	const uint32_t size = sizeof(qat_sym_crypto_caps_gen2);
287 	uint32_t i;
288 
289 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
290 	if (internals->capa_mz == NULL) {
291 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
292 				size, rte_socket_id(), 0);
293 		if (internals->capa_mz == NULL) {
294 			QAT_LOG(DEBUG,
295 				"Error allocating memzone for capabilities");
296 			return -1;
297 		}
298 	}
299 
300 	struct rte_cryptodev_capabilities *addr =
301 			(struct rte_cryptodev_capabilities *)
302 				internals->capa_mz->addr;
303 	const struct rte_cryptodev_capabilities *capabilities =
304 		qat_sym_crypto_caps_gen2;
305 	const uint32_t capa_num =
306 		size / sizeof(struct rte_cryptodev_capabilities);
307 	uint32_t curr_capa = 0;
308 
309 	for (i = 0; i < capa_num; i++) {
310 		memcpy(addr + curr_capa, capabilities + i,
311 			sizeof(struct rte_cryptodev_capabilities));
312 		curr_capa++;
313 	}
314 	internals->qat_dev_capabilities = internals->capa_mz->addr;
315 
316 	return 0;
317 }
318 
319 RTE_INIT(qat_sym_crypto_gen2_init)
320 {
321 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
322 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
323 			qat_sym_crypto_cap_get_gen2;
324 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
325 			qat_sym_crypto_set_session_gen2;
326 	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
327 			qat_sym_configure_raw_dp_ctx_gen1;
328 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
329 			qat_sym_crypto_feature_flags_get_gen1;
330 
331 #ifdef RTE_LIB_SECURITY
332 	qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx =
333 			qat_sym_create_security_gen1;
334 #endif
335 }
336 
337 RTE_INIT(qat_asym_crypto_gen2_init)
338 {
339 	qat_asym_gen_dev_ops[QAT_GEN2].cryptodev_ops =
340 			&qat_asym_crypto_ops_gen1;
341 	qat_asym_gen_dev_ops[QAT_GEN2].get_capabilities =
342 			qat_asym_crypto_cap_get_gen1;
343 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
344 			qat_asym_crypto_feature_flags_get_gen1;
345 	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
346 			qat_asym_crypto_set_session_gen1;
347 }
348