xref: /dpdk/drivers/crypto/qat/qat_sym.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4 
5 #ifndef _QAT_SYM_H_
6 #define _QAT_SYM_H_
7 
8 #include <rte_cryptodev_pmd.h>
9 #ifdef RTE_LIB_SECURITY
10 #include <rte_net_crc.h>
11 #endif
12 
13 #ifdef BUILD_QAT_SYM
14 #include <openssl/evp.h>
15 
16 #include "qat_common.h"
17 #include "qat_sym_session.h"
18 #include "qat_sym_pmd.h"
19 #include "qat_logs.h"
20 
21 #define BYTE_LENGTH    8
22 /* bpi is only used for partial blocks of DES and AES
23  * so AES block len can be assumed as max len for iv, src and dst
24  */
25 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
26 
27 /*
28  * Maximum number of SGL entries
29  */
30 #define QAT_SYM_SGL_MAX_NUMBER	16
31 
32 struct qat_sym_session;
33 
34 struct qat_sym_sgl {
35 	qat_sgl_hdr;
36 	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
37 } __rte_packed __rte_cache_aligned;
38 
39 struct qat_sym_op_cookie {
40 	struct qat_sym_sgl qat_sgl_src;
41 	struct qat_sym_sgl qat_sgl_dst;
42 	phys_addr_t qat_sgl_src_phys_addr;
43 	phys_addr_t qat_sgl_dst_phys_addr;
44 };
45 
46 int
47 qat_sym_build_request(void *in_op, uint8_t *out_msg,
48 		void *op_cookie, enum qat_device_gen qat_dev_gen);
49 
50 
51 /** Encrypt a single partial block
52  *  Depends on openssl libcrypto
53  *  Uses ECB+XOR to do CFB encryption, same result, more performant
54  */
55 static inline int
56 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
57 		uint8_t *iv, int ivlen, int srclen,
58 		void *bpi_ctx)
59 {
60 	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
61 	int encrypted_ivlen;
62 	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
63 	uint8_t *encr = encrypted_iv;
64 
65 	/* ECB method: encrypt the IV, then XOR this with plaintext */
66 	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
67 								<= 0)
68 		goto cipher_encrypt_err;
69 
70 	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
71 		*dst = *src ^ *encr;
72 
73 	return 0;
74 
75 cipher_encrypt_err:
76 	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
77 	return -EINVAL;
78 }
79 
80 static inline uint32_t
81 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
82 				struct rte_crypto_op *op)
83 {
84 	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
85 	struct rte_crypto_sym_op *sym_op = op->sym;
86 	uint8_t last_block_len = block_len > 0 ?
87 			sym_op->cipher.data.length % block_len : 0;
88 
89 	if (last_block_len > 0 &&
90 			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
91 
92 		/* Encrypt last block */
93 		uint8_t *last_block, *dst, *iv;
94 		uint32_t last_block_offset;
95 
96 		last_block_offset = sym_op->cipher.data.offset +
97 				sym_op->cipher.data.length - last_block_len;
98 		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
99 				uint8_t *, last_block_offset);
100 
101 		if (unlikely(sym_op->m_dst != NULL))
102 			/* out-of-place operation (OOP) */
103 			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
104 						uint8_t *, last_block_offset);
105 		else
106 			dst = last_block;
107 
108 		if (last_block_len < sym_op->cipher.data.length)
109 			/* use previous block ciphertext as IV */
110 			iv = dst - block_len;
111 		else
112 			/* runt block, i.e. less than one full block */
113 			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
114 					ctx->cipher_iv.offset);
115 
116 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
117 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
118 			last_block, last_block_len);
119 		if (sym_op->m_dst != NULL)
120 			QAT_DP_HEXDUMP_LOG(DEBUG,
121 				"BPI: dst before post-process:",
122 				dst, last_block_len);
123 #endif
124 		bpi_cipher_encrypt(last_block, dst, iv, block_len,
125 				last_block_len, ctx->bpi_ctx);
126 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
127 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
128 				last_block, last_block_len);
129 		if (sym_op->m_dst != NULL)
130 			QAT_DP_HEXDUMP_LOG(DEBUG,
131 				"BPI: dst after post-process:",
132 				dst, last_block_len);
133 #endif
134 	}
135 	return sym_op->cipher.data.length - last_block_len;
136 }
137 
138 #ifdef RTE_LIB_SECURITY
139 static inline void
140 qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
141 {
142 	struct rte_crypto_sym_op *sym_op = op->sym;
143 	uint32_t crc_data_ofs, crc_data_len, crc;
144 	uint8_t *crc_data;
145 
146 	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
147 			sym_op->auth.data.length != 0) {
148 
149 		crc_data_ofs = sym_op->auth.data.offset;
150 		crc_data_len = sym_op->auth.data.length;
151 		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
152 				crc_data_ofs);
153 
154 		crc = rte_net_crc_calc(crc_data, crc_data_len,
155 				RTE_NET_CRC32_ETH);
156 
157 		if (crc != *(uint32_t *)(crc_data + crc_data_len))
158 			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
159 	}
160 }
161 
162 static inline void
163 qat_crc_generate(struct qat_sym_session *ctx,
164 			struct rte_crypto_op *op)
165 {
166 	struct rte_crypto_sym_op *sym_op = op->sym;
167 	uint32_t *crc, crc_data_len;
168 	uint8_t *crc_data;
169 
170 	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
171 			sym_op->auth.data.length != 0 &&
172 			sym_op->m_src->nb_segs == 1) {
173 
174 		crc_data_len = sym_op->auth.data.length;
175 		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
176 				sym_op->auth.data.offset);
177 		crc = (uint32_t *)(crc_data + crc_data_len);
178 		*crc = rte_net_crc_calc(crc_data, crc_data_len,
179 				RTE_NET_CRC32_ETH);
180 	}
181 }
182 
183 static inline void
184 qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
185 {
186 	struct rte_crypto_op *op;
187 	struct qat_sym_session *ctx;
188 	uint16_t i;
189 
190 	for (i = 0; i < nb_ops; i++) {
191 		op = (struct rte_crypto_op *)ops[i];
192 
193 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
194 			ctx = (struct qat_sym_session *)
195 				get_sec_session_private_data(
196 					op->sym->sec_session);
197 
198 			if (ctx == NULL || ctx->bpi_ctx == NULL)
199 				continue;
200 
201 			qat_crc_generate(ctx, op);
202 		}
203 	}
204 }
205 #else
206 
207 static inline void
208 qat_sym_preprocess_requests(void **ops __rte_unused,
209 				uint16_t nb_ops __rte_unused)
210 {
211 }
212 #endif
213 
214 static inline void
215 qat_sym_process_response(void **op, uint8_t *resp)
216 {
217 	struct icp_qat_fw_comn_resp *resp_msg =
218 			(struct icp_qat_fw_comn_resp *)resp;
219 	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
220 			(resp_msg->opaque_data);
221 	struct qat_sym_session *sess;
222 
223 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
224 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
225 			sizeof(struct icp_qat_fw_comn_resp));
226 #endif
227 
228 	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
229 			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
230 			resp_msg->comn_hdr.comn_status)) {
231 
232 		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
233 	} else {
234 #ifdef RTE_LIB_SECURITY
235 		uint8_t is_docsis_sec = 0;
236 
237 		if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
238 			/*
239 			 * Assuming at this point that if it's a security
240 			 * op, that this is for DOCSIS
241 			 */
242 			sess = (struct qat_sym_session *)
243 					get_sec_session_private_data(
244 					rx_op->sym->sec_session);
245 			is_docsis_sec = 1;
246 		} else
247 #endif
248 		{
249 			sess = (struct qat_sym_session *)
250 					get_sym_session_private_data(
251 					rx_op->sym->session,
252 					qat_sym_driver_id);
253 		}
254 
255 		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
256 
257 		if (sess->bpi_ctx) {
258 			qat_bpicipher_postprocess(sess, rx_op);
259 #ifdef RTE_LIB_SECURITY
260 			if (is_docsis_sec)
261 				qat_crc_verify(sess, rx_op);
262 #endif
263 		}
264 	}
265 	*op = (void *)rx_op;
266 }
267 
268 int
269 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
270 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
271 	enum rte_crypto_op_sess_type sess_type,
272 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
273 
274 int
275 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
276 
277 #else
278 
279 static inline void
280 qat_sym_preprocess_requests(void **ops __rte_unused,
281 				uint16_t nb_ops __rte_unused)
282 {
283 }
284 
285 static inline void
286 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
287 {
288 }
289 
290 #endif
291 #endif /* _QAT_SYM_H_ */
292