xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
14 
15 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
16 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
17 		CAP_SET(block_size, 64),
18 		CAP_RNG(digest_size, 1, 20, 1)),
19 	QAT_SYM_AEAD_CAP(AES_GCM,
20 		CAP_SET(block_size, 16),
21 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
22 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
23 	QAT_SYM_AEAD_CAP(AES_CCM,
24 		CAP_SET(block_size, 16),
25 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
26 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
27 	QAT_SYM_AUTH_CAP(AES_GMAC,
28 		CAP_SET(block_size, 16),
29 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
31 	QAT_SYM_AUTH_CAP(AES_CMAC,
32 		CAP_SET(block_size, 16),
33 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
34 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
35 	QAT_SYM_AUTH_CAP(SHA224,
36 		CAP_SET(block_size, 64),
37 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
38 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39 	QAT_SYM_AUTH_CAP(SHA256,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_AUTH_CAP(SHA384,
44 		CAP_SET(block_size, 128),
45 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
46 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
47 	QAT_SYM_AUTH_CAP(SHA512,
48 		CAP_SET(block_size, 128),
49 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
50 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
51 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
52 		CAP_SET(block_size, 64),
53 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
54 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
55 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
56 		CAP_SET(block_size, 64),
57 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
58 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
59 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
60 		CAP_SET(block_size, 64),
61 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
62 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
63 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
64 		CAP_SET(block_size, 128),
65 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
66 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
67 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
68 		CAP_SET(block_size, 128),
69 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
70 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
71 	QAT_SYM_AUTH_CAP(MD5_HMAC,
72 		CAP_SET(block_size, 64),
73 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
74 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
75 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
76 		CAP_SET(block_size, 16),
77 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
78 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
79 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
80 		CAP_SET(block_size, 16),
81 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
82 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
83 	QAT_SYM_AUTH_CAP(KASUMI_F9,
84 		CAP_SET(block_size, 8),
85 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
86 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
87 	QAT_SYM_AUTH_CAP(NULL,
88 		CAP_SET(block_size, 1),
89 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
90 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
91 	QAT_SYM_CIPHER_CAP(AES_CBC,
92 		CAP_SET(block_size, 16),
93 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
94 	QAT_SYM_CIPHER_CAP(AES_CTR,
95 		CAP_SET(block_size, 16),
96 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
97 	QAT_SYM_CIPHER_CAP(AES_XTS,
98 		CAP_SET(block_size, 16),
99 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
100 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
101 		CAP_SET(block_size, 16),
102 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
103 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
104 		CAP_SET(block_size, 16),
105 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
106 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
107 		CAP_SET(block_size, 8),
108 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
109 	QAT_SYM_CIPHER_CAP(NULL,
110 		CAP_SET(block_size, 1),
111 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
112 	QAT_SYM_CIPHER_CAP(3DES_CBC,
113 		CAP_SET(block_size, 8),
114 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
115 	QAT_SYM_CIPHER_CAP(3DES_CTR,
116 		CAP_SET(block_size, 8),
117 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
118 	QAT_SYM_CIPHER_CAP(DES_CBC,
119 		CAP_SET(block_size, 8),
120 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
121 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
122 		CAP_SET(block_size, 8),
123 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
124 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
125 		CAP_SET(block_size, 16),
126 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
127 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
128 		CAP_SET(block_size, 16),
129 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
130 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
131 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
132 };
133 
134 static int
135 qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
136 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
137 {
138 	struct qat_cryptodev_private *qat_sym_private = dev->data->dev_private;
139 	struct qat_qp *qp;
140 	int ret;
141 
142 	if (qat_cryptodev_qp_setup(dev, qp_id, qp_conf, socket_id)) {
143 		QAT_LOG(DEBUG, "QAT qp setup failed");
144 		return -1;
145 	}
146 
147 	qp = qat_sym_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id];
148 	ret = qat_cq_get_fw_version(qp);
149 	if (ret < 0) {
150 		qat_cryptodev_qp_release(dev, qp_id);
151 		return ret;
152 	}
153 
154 	if (ret != 0)
155 		QAT_LOG(DEBUG, "QAT firmware version: %d.%d.%d",
156 				(ret >> 24) & 0xff,
157 				(ret >> 16) & 0xff,
158 				(ret >> 8) & 0xff);
159 	else
160 		QAT_LOG(DEBUG, "unknown QAT firmware version");
161 
162 	/* set capabilities based on the fw version */
163 	qat_sym_private->internal_capabilities = QAT_SYM_CAP_VALID |
164 			((ret >= MIXED_CRYPTO_MIN_FW_VER) ?
165 					QAT_SYM_CAP_MIXED_CRYPTO : 0);
166 	return 0;
167 }
168 
169 void
170 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
171 		uint8_t hash_flag)
172 {
173 	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
174 	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
175 			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
176 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
177 
178 	/* Set the Use Extended Protocol Flags bit in LW 1 */
179 	QAT_FIELD_SET(header->comn_req_flags,
180 			QAT_COMN_EXT_FLAGS_USED,
181 			QAT_COMN_EXT_FLAGS_BITPOS,
182 			QAT_COMN_EXT_FLAGS_MASK);
183 
184 	/* Set Hash Flags in LW 28 */
185 	cd_ctrl->hash_flags |= hash_flag;
186 
187 	/* Set proto flags in LW 1 */
188 	switch (session->qat_cipher_alg) {
189 	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
190 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
191 				ICP_QAT_FW_LA_SNOW_3G_PROTO);
192 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
193 				header->serv_specif_flags, 0);
194 		break;
195 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
196 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
197 				ICP_QAT_FW_LA_NO_PROTO);
198 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
199 				header->serv_specif_flags,
200 				ICP_QAT_FW_LA_ZUC_3G_PROTO);
201 		break;
202 	default:
203 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
204 				ICP_QAT_FW_LA_NO_PROTO);
205 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
206 				header->serv_specif_flags, 0);
207 		break;
208 	}
209 }
210 
211 static int
212 qat_sym_crypto_set_session_gen2(void *cdev, void *session)
213 {
214 	struct rte_cryptodev *dev = cdev;
215 	struct qat_sym_session *ctx = session;
216 	const struct qat_cryptodev_private *qat_private =
217 			dev->data->dev_private;
218 	int ret;
219 
220 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
221 	if (ret == -ENOTSUP) {
222 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
223 		 * but some are not supported by GEN2, so checking here
224 		 */
225 		if ((qat_private->internal_capabilities &
226 				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
227 			return -ENOTSUP;
228 
229 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
230 				ctx->qat_cipher_alg !=
231 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
232 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
233 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
234 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
235 				ctx->qat_cipher_alg !=
236 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
237 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
238 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
239 		} else if ((ctx->aes_cmac ||
240 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
241 				(ctx->qat_cipher_alg ==
242 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
243 				ctx->qat_cipher_alg ==
244 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
245 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
246 		}
247 
248 		ret = 0;
249 	}
250 
251 	return ret;
252 }
253 
254 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
255 
256 	/* Device related operations */
257 	.dev_configure		= qat_cryptodev_config,
258 	.dev_start		= qat_cryptodev_start,
259 	.dev_stop		= qat_cryptodev_stop,
260 	.dev_close		= qat_cryptodev_close,
261 	.dev_infos_get		= qat_cryptodev_info_get,
262 
263 	.stats_get		= qat_cryptodev_stats_get,
264 	.stats_reset		= qat_cryptodev_stats_reset,
265 	.queue_pair_setup	= qat_sym_crypto_qp_setup_gen2,
266 	.queue_pair_release	= qat_cryptodev_qp_release,
267 
268 	/* Crypto related operations */
269 	.sym_session_get_size	= qat_sym_session_get_private_size,
270 	.sym_session_configure	= qat_sym_session_configure,
271 	.sym_session_clear	= qat_sym_session_clear,
272 
273 	/* Raw data-path API related operations */
274 	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
275 	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
276 };
277 
278 static struct qat_capabilities_info
279 qat_sym_crypto_cap_get_gen2(struct qat_pci_device *qat_dev __rte_unused)
280 {
281 	struct qat_capabilities_info capa_info;
282 	capa_info.data = qat_sym_crypto_caps_gen2;
283 	capa_info.size = sizeof(qat_sym_crypto_caps_gen2);
284 	return capa_info;
285 }
286 
287 RTE_INIT(qat_sym_crypto_gen2_init)
288 {
289 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
290 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
291 			qat_sym_crypto_cap_get_gen2;
292 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
293 			qat_sym_crypto_set_session_gen2;
294 	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
295 			qat_sym_configure_raw_dp_ctx_gen1;
296 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
297 			qat_sym_crypto_feature_flags_get_gen1;
298 
299 #ifdef RTE_LIB_SECURITY
300 	qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx =
301 			qat_sym_create_security_gen1;
302 #endif
303 }
304 
305 RTE_INIT(qat_asym_crypto_gen2_init)
306 {
307 	qat_asym_gen_dev_ops[QAT_GEN2].cryptodev_ops =
308 			&qat_asym_crypto_ops_gen1;
309 	qat_asym_gen_dev_ops[QAT_GEN2].get_capabilities =
310 			qat_asym_crypto_cap_get_gen1;
311 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
312 			qat_asym_crypto_feature_flags_get_gen1;
313 	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
314 			qat_asym_crypto_set_session_gen1;
315 }
316