xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c (revision b7bd72d8da9c13deba44b1ac9f7dfa8cda77f240)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
14 
15 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen2[] = {
16 	QAT_SYM_CIPHER_CAP(DES_CBC,
17 		CAP_SET(block_size, 8),
18 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
19 	QAT_SYM_CIPHER_CAP(3DES_CBC,
20 		CAP_SET(block_size, 8),
21 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
22 	QAT_SYM_CIPHER_CAP(3DES_CTR,
23 		CAP_SET(block_size, 8),
24 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
25 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
26 		CAP_SET(block_size, 64),
27 		CAP_RNG(digest_size, 1, 20, 1)),
28 	QAT_SYM_AUTH_CAP(SHA224,
29 		CAP_SET(block_size, 64),
30 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
31 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
32 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
33 		CAP_SET(block_size, 64),
34 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
35 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
36 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
37 		CAP_SET(block_size, 64),
38 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
39 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
40 	QAT_SYM_AUTH_CAP(MD5_HMAC,
41 		CAP_SET(block_size, 64),
42 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
43 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
44 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
45 		CAP_SET(block_size, 8),
46 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
47 };
48 
49 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
50 	QAT_SYM_AEAD_CAP(AES_GCM,
51 		CAP_SET(block_size, 16),
52 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
53 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
54 	QAT_SYM_AEAD_CAP(AES_CCM,
55 		CAP_SET(block_size, 16),
56 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
57 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
58 	QAT_SYM_AUTH_CAP(AES_GMAC,
59 		CAP_SET(block_size, 16),
60 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
61 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
62 	QAT_SYM_AUTH_CAP(AES_CMAC,
63 		CAP_SET(block_size, 16),
64 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
65 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
66 	QAT_SYM_AUTH_CAP(SHA256,
67 		CAP_SET(block_size, 64),
68 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
69 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
70 	QAT_SYM_AUTH_CAP(SHA384,
71 		CAP_SET(block_size, 128),
72 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
73 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
74 	QAT_SYM_AUTH_CAP(SHA512,
75 		CAP_SET(block_size, 128),
76 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
77 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
78 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
79 		CAP_SET(block_size, 136),
80 		CAP_RNG(digest_size, 32, 32, 0)),
81 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
82 		CAP_SET(block_size, 64),
83 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
84 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
85 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
86 		CAP_SET(block_size, 128),
87 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
88 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
89 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
90 		CAP_SET(block_size, 128),
91 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
92 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
93 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
94 		CAP_SET(block_size, 16),
95 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
96 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
97 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
98 		CAP_SET(block_size, 16),
99 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
100 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
101 	QAT_SYM_AUTH_CAP(KASUMI_F9,
102 		CAP_SET(block_size, 8),
103 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
104 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
105 	QAT_SYM_AUTH_CAP(NULL,
106 		CAP_SET(block_size, 1),
107 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
108 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
109 	QAT_SYM_CIPHER_CAP(AES_CBC,
110 		CAP_SET(block_size, 16),
111 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
112 	QAT_SYM_CIPHER_CAP(AES_CTR,
113 		CAP_SET(block_size, 16),
114 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
115 	QAT_SYM_CIPHER_CAP(AES_XTS,
116 		CAP_SET(block_size, 16),
117 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
118 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
119 		CAP_SET(block_size, 16),
120 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
121 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
122 		CAP_SET(block_size, 16),
123 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
124 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
125 		CAP_SET(block_size, 8),
126 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
127 	QAT_SYM_CIPHER_CAP(NULL,
128 		CAP_SET(block_size, 1),
129 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
130 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
131 		CAP_SET(block_size, 16),
132 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
133 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
134 		CAP_SET(block_size, 16),
135 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
136 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
137 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
138 };
139 
140 static int
qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)141 qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
142 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
143 {
144 	struct qat_cryptodev_private *qat_sym_private = dev->data->dev_private;
145 	struct qat_qp *qp;
146 	int ret;
147 
148 	if (qat_cryptodev_qp_setup(dev, qp_id, qp_conf, socket_id)) {
149 		QAT_LOG(DEBUG, "QAT qp setup failed");
150 		return -1;
151 	}
152 
153 	qp = qat_sym_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id];
154 	ret = qat_cq_get_fw_version(qp);
155 	if (ret < 0) {
156 		qat_cryptodev_qp_release(dev, qp_id);
157 		return ret;
158 	}
159 
160 	if (ret != 0)
161 		QAT_LOG(DEBUG, "QAT firmware version: %d.%d.%d",
162 				(ret >> 24) & 0xff,
163 				(ret >> 16) & 0xff,
164 				(ret >> 8) & 0xff);
165 	else
166 		QAT_LOG(DEBUG, "unknown QAT firmware version");
167 
168 	/* set capabilities based on the fw version */
169 	qat_sym_private->internal_capabilities |= QAT_SYM_CAP_VALID |
170 			((ret >= MIXED_CRYPTO_MIN_FW_VER) ?
171 					QAT_SYM_CAP_MIXED_CRYPTO : 0);
172 	return 0;
173 }
174 
175 void
qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session * session,uint8_t hash_flag)176 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
177 		uint8_t hash_flag)
178 {
179 	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
180 	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
181 			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
182 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
183 
184 	/* Set the Use Extended Protocol Flags bit in LW 1 */
185 	ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
186 			header->ext_flags, QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
187 
188 	/* Set Hash Flags in LW 28 */
189 	cd_ctrl->hash_flags |= hash_flag;
190 
191 	/* Set proto flags in LW 1 */
192 	switch (session->qat_cipher_alg) {
193 	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
194 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
195 				ICP_QAT_FW_LA_SNOW_3G_PROTO);
196 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
197 				header->serv_specif_flags, 0);
198 		break;
199 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
200 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_256:
201 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
202 				ICP_QAT_FW_LA_NO_PROTO);
203 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
204 				header->serv_specif_flags,
205 				ICP_QAT_FW_LA_ZUC_3G_PROTO);
206 		break;
207 	default:
208 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
209 				ICP_QAT_FW_LA_NO_PROTO);
210 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
211 				header->serv_specif_flags, 0);
212 		break;
213 	}
214 }
215 
216 static int
qat_sym_crypto_set_session_gen2(void * cdev,void * session)217 qat_sym_crypto_set_session_gen2(void *cdev, void *session)
218 {
219 	struct rte_cryptodev *dev = cdev;
220 	struct qat_sym_session *ctx = session;
221 	const struct qat_cryptodev_private *qat_private =
222 			dev->data->dev_private;
223 	int ret;
224 
225 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
226 	if (ret == -ENOTSUP) {
227 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
228 		 * but some are not supported by GEN2, so checking here
229 		 */
230 		if ((qat_private->internal_capabilities &
231 				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
232 			return -ENOTSUP;
233 
234 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
235 				ctx->qat_cipher_alg !=
236 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
237 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
238 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
239 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
240 				ctx->qat_cipher_alg !=
241 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
242 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
243 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
244 		} else if ((ctx->aes_cmac ||
245 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
246 				(ctx->qat_cipher_alg ==
247 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
248 				ctx->qat_cipher_alg ==
249 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
250 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
251 		}
252 
253 		ret = 0;
254 	}
255 
256 	return ret;
257 }
258 
259 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
260 
261 	/* Device related operations */
262 	.dev_configure		= qat_cryptodev_config,
263 	.dev_start		= qat_cryptodev_start,
264 	.dev_stop		= qat_cryptodev_stop,
265 	.dev_close		= qat_cryptodev_close,
266 	.dev_infos_get		= qat_cryptodev_info_get,
267 
268 	.stats_get		= qat_cryptodev_stats_get,
269 	.stats_reset		= qat_cryptodev_stats_reset,
270 	.queue_pair_setup	= qat_sym_crypto_qp_setup_gen2,
271 	.queue_pair_release	= qat_cryptodev_qp_release,
272 
273 	/* Crypto related operations */
274 	.sym_session_get_size	= qat_sym_session_get_private_size,
275 	.sym_session_configure	= qat_sym_session_configure,
276 	.sym_session_clear	= qat_sym_session_clear,
277 
278 	/* Raw data-path API related operations */
279 	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
280 	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
281 };
282 
283 static int
qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t __rte_unused slice_map)284 qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private *internals,
285 			const char *capa_memz_name,
286 			const uint16_t __rte_unused slice_map)
287 {
288 	uint32_t legacy_capa_num;
289 	uint32_t size = sizeof(qat_sym_crypto_caps_gen2);
290 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen2);
291 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
292 
293 	if (unlikely(internals->qat_dev->options.legacy_alg))
294 		size = size + legacy_size;
295 
296 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
297 	if (internals->capa_mz == NULL) {
298 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
299 				size, rte_socket_id(), 0);
300 		if (internals->capa_mz == NULL) {
301 			QAT_LOG(DEBUG,
302 				"Error allocating memzone for capabilities");
303 			return -1;
304 		}
305 	}
306 
307 	struct rte_cryptodev_capabilities *addr =
308 			(struct rte_cryptodev_capabilities *)
309 				internals->capa_mz->addr;
310 	struct rte_cryptodev_capabilities *capabilities;
311 
312 	if (unlikely(internals->qat_dev->options.legacy_alg)) {
313 		capabilities = qat_sym_crypto_legacy_caps_gen2;
314 		memcpy(addr, capabilities, legacy_size);
315 		addr += legacy_capa_num;
316 	}
317 	capabilities = qat_sym_crypto_caps_gen2;
318 	memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen2));
319 	internals->qat_dev_capabilities = internals->capa_mz->addr;
320 
321 	return 0;
322 }
323 
RTE_INIT(qat_sym_crypto_gen2_init)324 RTE_INIT(qat_sym_crypto_gen2_init)
325 {
326 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
327 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
328 			qat_sym_crypto_cap_get_gen2;
329 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
330 			qat_sym_crypto_set_session_gen2;
331 	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
332 			qat_sym_configure_raw_dp_ctx_gen1;
333 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
334 			qat_sym_crypto_feature_flags_get_gen1;
335 	qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx =
336 			qat_sym_create_security_gen1;
337 }
338 
RTE_INIT(qat_asym_crypto_gen2_init)339 RTE_INIT(qat_asym_crypto_gen2_init)
340 {
341 	qat_asym_gen_dev_ops[QAT_GEN2].cryptodev_ops =
342 			&qat_asym_crypto_ops_gen1;
343 	qat_asym_gen_dev_ops[QAT_GEN2].get_capabilities =
344 			qat_asym_crypto_cap_get_gen1;
345 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
346 			qat_asym_crypto_feature_flags_get_gen1;
347 	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
348 			qat_asym_crypto_set_session_gen1;
349 }
350