1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2022 Intel Corporation
3 */
4
5 #include <rte_cryptodev.h>
6 #include <rte_security_driver.h>
7
8 #include "adf_transport_access_macros.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_la.h"
11
12 #include "qat_sym.h"
13 #include "qat_sym_session.h"
14 #include "qat_crypto.h"
15 #include "qat_crypto_pmd_gens.h"
16
17 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen1[] = {
18 QAT_SYM_CIPHER_CAP(DES_CBC,
19 CAP_SET(block_size, 8),
20 CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
21 QAT_SYM_CIPHER_CAP(3DES_CBC,
22 CAP_SET(block_size, 8),
23 CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
24 QAT_SYM_CIPHER_CAP(3DES_CTR,
25 CAP_SET(block_size, 8),
26 CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
27 QAT_SYM_PLAIN_AUTH_CAP(SHA1,
28 CAP_SET(block_size, 64),
29 CAP_RNG(digest_size, 1, 20, 1)),
30 QAT_SYM_AUTH_CAP(SHA224,
31 CAP_SET(block_size, 64),
32 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
33 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
34 QAT_SYM_AUTH_CAP(SHA1_HMAC,
35 CAP_SET(block_size, 64),
36 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
37 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
38 QAT_SYM_AUTH_CAP(SHA224_HMAC,
39 CAP_SET(block_size, 64),
40 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
41 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
42 QAT_SYM_AUTH_CAP(MD5_HMAC,
43 CAP_SET(block_size, 64),
44 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
45 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
46 QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
47 CAP_SET(block_size, 8),
48 CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
49 };
50
51 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
52 QAT_SYM_AEAD_CAP(AES_GCM,
53 CAP_SET(block_size, 16),
54 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
55 CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
56 QAT_SYM_AEAD_CAP(AES_CCM,
57 CAP_SET(block_size, 16),
58 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
59 CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
60 QAT_SYM_AUTH_CAP(AES_GMAC,
61 CAP_SET(block_size, 16),
62 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
63 CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
64 QAT_SYM_AUTH_CAP(AES_CMAC,
65 CAP_SET(block_size, 16),
66 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
67 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
68 QAT_SYM_AUTH_CAP(SHA256,
69 CAP_SET(block_size, 64),
70 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
71 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
72 QAT_SYM_AUTH_CAP(SHA384,
73 CAP_SET(block_size, 128),
74 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
75 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
76 QAT_SYM_AUTH_CAP(SHA512,
77 CAP_SET(block_size, 128),
78 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
79 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
80 QAT_SYM_AUTH_CAP(SHA256_HMAC,
81 CAP_SET(block_size, 64),
82 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
83 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
84 QAT_SYM_AUTH_CAP(SHA384_HMAC,
85 CAP_SET(block_size, 128),
86 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
87 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
88 QAT_SYM_AUTH_CAP(SHA512_HMAC,
89 CAP_SET(block_size, 128),
90 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
91 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
92 QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
93 CAP_SET(block_size, 16),
94 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
95 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
96 QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
97 CAP_SET(block_size, 16),
98 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
99 CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
100 QAT_SYM_AUTH_CAP(KASUMI_F9,
101 CAP_SET(block_size, 8),
102 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
103 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
104 QAT_SYM_AUTH_CAP(NULL,
105 CAP_SET(block_size, 1),
106 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
107 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
108 QAT_SYM_CIPHER_CAP(AES_CBC,
109 CAP_SET(block_size, 16),
110 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
111 QAT_SYM_CIPHER_CAP(AES_CTR,
112 CAP_SET(block_size, 16),
113 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
114 QAT_SYM_CIPHER_CAP(AES_XTS,
115 CAP_SET(block_size, 16),
116 CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
117 QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
118 CAP_SET(block_size, 16),
119 CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
120 QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
121 CAP_SET(block_size, 16),
122 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
123 QAT_SYM_CIPHER_CAP(KASUMI_F8,
124 CAP_SET(block_size, 8),
125 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
126 QAT_SYM_CIPHER_CAP(NULL,
127 CAP_SET(block_size, 1),
128 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
129 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
130 };
131
132 struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
133
134 /* Device related operations */
135 .dev_configure = qat_cryptodev_config,
136 .dev_start = qat_cryptodev_start,
137 .dev_stop = qat_cryptodev_stop,
138 .dev_close = qat_cryptodev_close,
139 .dev_infos_get = qat_cryptodev_info_get,
140
141 .stats_get = qat_cryptodev_stats_get,
142 .stats_reset = qat_cryptodev_stats_reset,
143 .queue_pair_setup = qat_cryptodev_qp_setup,
144 .queue_pair_release = qat_cryptodev_qp_release,
145
146 /* Crypto related operations */
147 .sym_session_get_size = qat_sym_session_get_private_size,
148 .sym_session_configure = qat_sym_session_configure,
149 .sym_session_clear = qat_sym_session_clear,
150
151 /* Raw data-path API related operations */
152 .sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
153 .sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
154 };
155
156 static int
qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t __rte_unused slice_map)157 qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
158 const char *capa_memz_name,
159 const uint16_t __rte_unused slice_map)
160 {
161
162 uint32_t legacy_capa_num;
163 uint32_t size = sizeof(qat_sym_crypto_caps_gen1);
164 uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen1);
165 legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
166
167 if (unlikely(internals->qat_dev->options.legacy_alg))
168 size = size + legacy_size;
169
170 internals->capa_mz = rte_memzone_lookup(capa_memz_name);
171 if (internals->capa_mz == NULL) {
172 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
173 size, rte_socket_id(), 0);
174 if (internals->capa_mz == NULL) {
175 QAT_LOG(DEBUG,
176 "Error allocating memzone for capabilities");
177 return -1;
178 }
179 }
180
181 struct rte_cryptodev_capabilities *addr =
182 (struct rte_cryptodev_capabilities *)
183 internals->capa_mz->addr;
184
185 struct rte_cryptodev_capabilities *capabilities;
186
187 if (unlikely(internals->qat_dev->options.legacy_alg)) {
188 capabilities = qat_sym_crypto_legacy_caps_gen1;
189 memcpy(addr, capabilities, legacy_size);
190 addr += legacy_capa_num;
191 }
192 capabilities = qat_sym_crypto_caps_gen1;
193 memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen1));
194 internals->qat_dev_capabilities = internals->capa_mz->addr;
195
196 return 0;
197 }
198
199 uint64_t
qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device * qat_dev __rte_unused)200 qat_sym_crypto_feature_flags_get_gen1(
201 struct qat_pci_device *qat_dev __rte_unused)
202 {
203 uint64_t feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
204 RTE_CRYPTODEV_FF_HW_ACCELERATED |
205 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
206 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
207 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
208 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
209 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
210 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
211 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
212 RTE_CRYPTODEV_FF_SYM_RAW_DP;
213
214 return feature_flags;
215 }
216
217 int
qat_sym_build_op_cipher_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)218 qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
219 uint8_t *out_msg, void *op_cookie)
220 {
221 register struct icp_qat_fw_la_bulk_req *req;
222 struct rte_crypto_op *op = in_op;
223 struct qat_sym_op_cookie *cookie = op_cookie;
224 struct rte_crypto_sgl in_sgl, out_sgl;
225 struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
226 out_vec[QAT_SYM_SGL_MAX_NUMBER];
227 struct rte_crypto_va_iova_ptr cipher_iv;
228 union rte_crypto_sym_ofs ofs;
229 int32_t total_len;
230
231 in_sgl.vec = in_vec;
232 out_sgl.vec = out_vec;
233
234 req = (struct icp_qat_fw_la_bulk_req *)out_msg;
235 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
236
237 ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
238 &cipher_iv, NULL, NULL);
239 if (unlikely(ofs.raw == UINT64_MAX)) {
240 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
241 return -EINVAL;
242 }
243
244 total_len = qat_sym_build_req_set_data(req, in_op, cookie,
245 in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
246 if (unlikely(total_len < 0)) {
247 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
248 return -EINVAL;
249 }
250
251 if (ctx->is_zuc256)
252 zuc256_modify_iv(cipher_iv.va);
253
254 enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len, op_cookie);
255
256 qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
257 NULL, NULL, NULL);
258
259 return 0;
260 }
261
262 int
qat_sym_build_op_auth_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)263 qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
264 uint8_t *out_msg, void *op_cookie)
265 {
266 register struct icp_qat_fw_la_bulk_req *req;
267 struct rte_crypto_op *op = in_op;
268 struct qat_sym_op_cookie *cookie = op_cookie;
269 struct rte_crypto_sgl in_sgl, out_sgl;
270 struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
271 out_vec[QAT_SYM_SGL_MAX_NUMBER];
272 struct rte_crypto_va_iova_ptr auth_iv;
273 struct rte_crypto_va_iova_ptr digest;
274 union rte_crypto_sym_ofs ofs;
275 int32_t total_len;
276 struct rte_cryptodev *cdev;
277 struct qat_cryptodev_private *internals;
278
279 in_sgl.vec = in_vec;
280 out_sgl.vec = out_vec;
281
282 req = (struct icp_qat_fw_la_bulk_req *)out_msg;
283 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
284
285 ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
286 NULL, &auth_iv, &digest, op_cookie);
287 if (unlikely(ofs.raw == UINT64_MAX)) {
288 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
289 return -EINVAL;
290 }
291
292 cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
293 internals = cdev->data->dev_private;
294
295 if (internals->qat_dev->options.has_wireless_slice && !ctx->is_gmac)
296 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
297 req->comn_hdr.serv_specif_flags, 0);
298
299 total_len = qat_sym_build_req_set_data(req, in_op, cookie,
300 in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
301 if (unlikely(total_len < 0)) {
302 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
303 return -EINVAL;
304 }
305
306 if (ctx->is_zuc256)
307 zuc256_modify_iv(auth_iv.va);
308
309 enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
310 total_len);
311
312 qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
313 &auth_iv, NULL, &digest);
314
315 return 0;
316 }
317
318 int
qat_sym_build_op_aead_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)319 qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
320 uint8_t *out_msg, void *op_cookie)
321 {
322 register struct icp_qat_fw_la_bulk_req *req;
323 struct rte_crypto_op *op = in_op;
324 struct qat_sym_op_cookie *cookie = op_cookie;
325 struct rte_crypto_sgl in_sgl, out_sgl;
326 struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
327 out_vec[QAT_SYM_SGL_MAX_NUMBER];
328 struct rte_crypto_va_iova_ptr cipher_iv;
329 struct rte_crypto_va_iova_ptr aad;
330 struct rte_crypto_va_iova_ptr digest;
331 union rte_crypto_sym_ofs ofs;
332 int32_t total_len;
333
334 in_sgl.vec = in_vec;
335 out_sgl.vec = out_vec;
336
337 req = (struct icp_qat_fw_la_bulk_req *)out_msg;
338 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
339
340 ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
341 &cipher_iv, &aad, &digest);
342 if (unlikely(ofs.raw == UINT64_MAX)) {
343 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
344 return -EINVAL;
345 }
346
347 total_len = qat_sym_build_req_set_data(req, in_op, cookie,
348 in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
349 if (unlikely(total_len < 0)) {
350 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
351 return -EINVAL;
352 }
353
354 enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
355 total_len);
356
357 qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
358 NULL, &aad, &digest);
359
360 return 0;
361 }
362
363 int
qat_sym_build_op_chain_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)364 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
365 uint8_t *out_msg, void *op_cookie)
366 {
367 register struct icp_qat_fw_la_bulk_req *req;
368 struct rte_crypto_op *op = in_op;
369 struct qat_sym_op_cookie *cookie = op_cookie;
370 struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
371 struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
372 out_vec[QAT_SYM_SGL_MAX_NUMBER];
373 struct rte_crypto_va_iova_ptr cipher_iv;
374 struct rte_crypto_va_iova_ptr auth_iv;
375 struct rte_crypto_va_iova_ptr digest;
376 union rte_crypto_sym_ofs ofs;
377 int32_t total_len;
378
379 in_sgl.vec = in_vec;
380 out_sgl.vec = out_vec;
381
382 req = (struct icp_qat_fw_la_bulk_req *)out_msg;
383 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
384
385 ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
386 &cipher_iv, &auth_iv, &digest, cookie);
387 if (unlikely(ofs.raw == UINT64_MAX)) {
388 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
389 return -EINVAL;
390 }
391
392 total_len = qat_sym_build_req_set_data(req, in_op, cookie,
393 in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
394 if (unlikely(total_len < 0)) {
395 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
396 return -EINVAL;
397 }
398
399 if (ctx->is_zuc256) {
400 zuc256_modify_iv(cipher_iv.va);
401 zuc256_modify_iv(auth_iv.va);
402 }
403
404 enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
405 out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
406 ofs, total_len, cookie);
407
408 qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
409 &auth_iv, NULL, &digest);
410
411 return 0;
412 }
413
414 #define QAT_SECURITY_SYM_CAPABILITIES \
415 { /* AES DOCSIS BPI */ \
416 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
417 {.sym = { \
418 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
419 {.cipher = { \
420 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
421 .block_size = 16, \
422 .key_size = { \
423 .min = 16, \
424 .max = 32, \
425 .increment = 16 \
426 }, \
427 .iv_size = { \
428 .min = 16, \
429 .max = 16, \
430 .increment = 0 \
431 } \
432 }, } \
433 }, } \
434 }
435
436 #define QAT_SECURITY_CAPABILITIES(sym) \
437 [0] = { /* DOCSIS Uplink */ \
438 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
439 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
440 .docsis = { \
441 .direction = RTE_SECURITY_DOCSIS_UPLINK \
442 }, \
443 .crypto_capabilities = (sym) \
444 }, \
445 [1] = { /* DOCSIS Downlink */ \
446 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
447 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
448 .docsis = { \
449 .direction = RTE_SECURITY_DOCSIS_DOWNLINK \
450 }, \
451 .crypto_capabilities = (sym) \
452 }
453
454 static const struct rte_cryptodev_capabilities
455 qat_security_sym_capabilities[] = {
456 QAT_SECURITY_SYM_CAPABILITIES,
457 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
458 };
459
460 static const struct rte_security_capability qat_security_capabilities_gen1[] = {
461 QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
462 {
463 .action = RTE_SECURITY_ACTION_TYPE_NONE
464 }
465 };
466
467 static const struct rte_security_capability *
qat_security_cap_get_gen1(void * dev __rte_unused)468 qat_security_cap_get_gen1(void *dev __rte_unused)
469 {
470 return qat_security_capabilities_gen1;
471 }
472
473 struct rte_security_ops security_qat_ops_gen1 = {
474 .session_create = qat_security_session_create,
475 .session_update = NULL,
476 .session_get_size = qat_security_session_get_size,
477 .session_stats_get = NULL,
478 .session_destroy = qat_security_session_destroy,
479 .set_pkt_metadata = NULL,
480 .capabilities_get = qat_security_cap_get_gen1
481 };
482
483 void *
qat_sym_create_security_gen1(void * cryptodev)484 qat_sym_create_security_gen1(void *cryptodev)
485 {
486 struct rte_security_ctx *security_instance;
487
488 security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
489 RTE_CACHE_LINE_SIZE);
490 if (security_instance == NULL)
491 return NULL;
492
493 security_instance->device = cryptodev;
494 security_instance->ops = &security_qat_ops_gen1;
495 security_instance->sess_cnt = 0;
496
497 return (void *)security_instance;
498 }
499
500 int
qat_sym_dp_enqueue_single_cipher_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest __rte_unused,struct rte_crypto_va_iova_ptr * aad __rte_unused,void * user_data)501 qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
502 struct rte_crypto_vec *data, uint16_t n_data_vecs,
503 union rte_crypto_sym_ofs ofs,
504 struct rte_crypto_va_iova_ptr *iv,
505 struct rte_crypto_va_iova_ptr *digest __rte_unused,
506 struct rte_crypto_va_iova_ptr *aad __rte_unused,
507 void *user_data)
508 {
509 struct qat_qp *qp = qp_data;
510 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
511 struct qat_queue *tx_queue = &qp->tx_q;
512 struct qat_sym_session *ctx = dp_ctx->session;
513 struct qat_sym_op_cookie *cookie;
514 struct icp_qat_fw_la_bulk_req *req;
515 int32_t data_len;
516 uint32_t tail = dp_ctx->tail;
517
518 req = (struct icp_qat_fw_la_bulk_req *)(
519 (uint8_t *)tx_queue->base_addr + tail);
520 cookie = qp->op_cookies[tail >> tx_queue->trailz];
521 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
522 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
523 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
524
525 data_len = qat_sym_build_req_set_data(req, user_data, cookie,
526 data, n_data_vecs, NULL, 0);
527 if (unlikely(data_len < 0))
528 return -1;
529
530 if (ctx->is_zuc256)
531 zuc256_modify_iv(iv->va);
532
533 enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len, cookie);
534
535 qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
536 NULL, NULL, NULL);
537
538 dp_ctx->tail = tail;
539 dp_ctx->cached_enqueue++;
540
541 return 0;
542 }
543
544 uint32_t
qat_sym_dp_enqueue_cipher_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)545 qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
546 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
547 void *user_data[], int *status)
548 {
549 struct qat_qp *qp = qp_data;
550 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
551 struct qat_queue *tx_queue = &qp->tx_q;
552 struct qat_sym_session *ctx = dp_ctx->session;
553 uint32_t i, n;
554 uint32_t tail;
555 struct icp_qat_fw_la_bulk_req *req;
556 int32_t data_len;
557
558 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
559 if (unlikely(n == 0)) {
560 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
561 *status = 0;
562 return 0;
563 }
564
565 tail = dp_ctx->tail;
566
567 for (i = 0; i < n; i++) {
568 struct qat_sym_op_cookie *cookie =
569 qp->op_cookies[tail >> tx_queue->trailz];
570
571 req = (struct icp_qat_fw_la_bulk_req *)(
572 (uint8_t *)tx_queue->base_addr + tail);
573 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
574
575 if (vec->dest_sgl) {
576 data_len = qat_sym_build_req_set_data(req,
577 user_data[i], cookie,
578 vec->src_sgl[i].vec, vec->src_sgl[i].num,
579 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
580 } else {
581 data_len = qat_sym_build_req_set_data(req,
582 user_data[i], cookie,
583 vec->src_sgl[i].vec,
584 vec->src_sgl[i].num, NULL, 0);
585 }
586
587 if (unlikely(data_len < 0))
588 break;
589
590 if (ctx->is_zuc256)
591 zuc256_modify_iv(vec->iv[i].va);
592
593 enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
594 (uint32_t)data_len, cookie);
595 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
596
597 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
598 vec->src_sgl[i].num, &vec->iv[i],
599 NULL, NULL, NULL);
600 }
601
602 if (unlikely(i < n))
603 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
604
605 dp_ctx->tail = tail;
606 dp_ctx->cached_enqueue += i;
607 *status = 0;
608 return i;
609 }
610
611 int
qat_sym_dp_enqueue_single_auth_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv __rte_unused,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)612 qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
613 struct rte_crypto_vec *data, uint16_t n_data_vecs,
614 union rte_crypto_sym_ofs ofs,
615 struct rte_crypto_va_iova_ptr *iv __rte_unused,
616 struct rte_crypto_va_iova_ptr *digest,
617 struct rte_crypto_va_iova_ptr *auth_iv,
618 void *user_data)
619 {
620 struct qat_qp *qp = qp_data;
621 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
622 struct qat_queue *tx_queue = &qp->tx_q;
623 struct qat_sym_op_cookie *cookie;
624 struct qat_sym_session *ctx = dp_ctx->session;
625 struct icp_qat_fw_la_bulk_req *req;
626 int32_t data_len;
627 uint32_t tail = dp_ctx->tail;
628 struct rte_crypto_va_iova_ptr null_digest;
629 struct rte_crypto_va_iova_ptr *job_digest = digest;
630
631 req = (struct icp_qat_fw_la_bulk_req *)(
632 (uint8_t *)tx_queue->base_addr + tail);
633 cookie = qp->op_cookies[tail >> tx_queue->trailz];
634 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
635
636 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
637 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
638 data_len = qat_sym_build_req_set_data(req, user_data, cookie,
639 data, n_data_vecs, NULL, 0);
640 if (unlikely(data_len < 0))
641 return -1;
642
643 if (ctx->is_zuc256)
644 zuc256_modify_iv(auth_iv->va);
645
646 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
647 null_digest.iova = cookie->digest_null_phys_addr;
648 job_digest = &null_digest;
649 }
650
651 enqueue_one_auth_job_gen1(ctx, req, job_digest, auth_iv, ofs,
652 (uint32_t)data_len);
653
654 dp_ctx->tail = tail;
655 dp_ctx->cached_enqueue++;
656
657 qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
658 auth_iv, NULL, digest);
659
660 return 0;
661 }
662
663 uint32_t
qat_sym_dp_enqueue_auth_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)664 qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
665 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
666 void *user_data[], int *status)
667 {
668 struct qat_qp *qp = qp_data;
669 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
670 struct qat_queue *tx_queue = &qp->tx_q;
671 struct qat_sym_session *ctx = dp_ctx->session;
672 uint32_t i, n;
673 uint32_t tail;
674 struct icp_qat_fw_la_bulk_req *req;
675 int32_t data_len;
676 struct rte_crypto_va_iova_ptr null_digest;
677 struct rte_crypto_va_iova_ptr *job_digest = NULL;
678
679 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
680 if (unlikely(n == 0)) {
681 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
682 *status = 0;
683 return 0;
684 }
685
686 tail = dp_ctx->tail;
687
688 for (i = 0; i < n; i++) {
689 struct qat_sym_op_cookie *cookie =
690 qp->op_cookies[tail >> tx_queue->trailz];
691
692 req = (struct icp_qat_fw_la_bulk_req *)(
693 (uint8_t *)tx_queue->base_addr + tail);
694 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
695
696 if (vec->dest_sgl) {
697 data_len = qat_sym_build_req_set_data(req,
698 user_data[i], cookie,
699 vec->src_sgl[i].vec, vec->src_sgl[i].num,
700 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
701 } else {
702 data_len = qat_sym_build_req_set_data(req,
703 user_data[i], cookie,
704 vec->src_sgl[i].vec,
705 vec->src_sgl[i].num, NULL, 0);
706 }
707
708 if (unlikely(data_len < 0))
709 break;
710
711 if (ctx->is_zuc256)
712 zuc256_modify_iv(vec->auth_iv[i].va);
713
714 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
715 null_digest.iova = cookie->digest_null_phys_addr;
716 job_digest = &null_digest;
717 } else
718 job_digest = &vec->digest[i];
719
720 enqueue_one_auth_job_gen1(ctx, req, job_digest,
721 &vec->auth_iv[i], ofs, (uint32_t)data_len);
722 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
723
724 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
725 vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
726 NULL, &vec->digest[i]);
727 }
728
729 if (unlikely(i < n))
730 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
731
732 dp_ctx->tail = tail;
733 dp_ctx->cached_enqueue += i;
734 *status = 0;
735 return i;
736 }
737
738 int
qat_sym_dp_enqueue_single_chain_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * cipher_iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)739 qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
740 struct rte_crypto_vec *data, uint16_t n_data_vecs,
741 union rte_crypto_sym_ofs ofs,
742 struct rte_crypto_va_iova_ptr *cipher_iv,
743 struct rte_crypto_va_iova_ptr *digest,
744 struct rte_crypto_va_iova_ptr *auth_iv,
745 void *user_data)
746 {
747 struct qat_qp *qp = qp_data;
748 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
749 struct qat_queue *tx_queue = &qp->tx_q;
750 struct qat_sym_op_cookie *cookie;
751 struct qat_sym_session *ctx = dp_ctx->session;
752 struct icp_qat_fw_la_bulk_req *req;
753 int32_t data_len;
754 uint32_t tail = dp_ctx->tail;
755 struct rte_crypto_va_iova_ptr null_digest;
756 struct rte_crypto_va_iova_ptr *job_digest = digest;
757
758 req = (struct icp_qat_fw_la_bulk_req *)(
759 (uint8_t *)tx_queue->base_addr + tail);
760 cookie = qp->op_cookies[tail >> tx_queue->trailz];
761 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
762 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
763 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
764 data_len = qat_sym_build_req_set_data(req, user_data, cookie,
765 data, n_data_vecs, NULL, 0);
766 if (unlikely(data_len < 0))
767 return -1;
768
769 if (ctx->is_zuc256) {
770 zuc256_modify_iv(cipher_iv->va);
771 zuc256_modify_iv(auth_iv->va);
772 }
773
774 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
775 null_digest.iova = cookie->digest_null_phys_addr;
776 job_digest = &null_digest;
777 }
778
779 if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
780 NULL, 0, cipher_iv, job_digest, auth_iv, ofs,
781 (uint32_t)data_len, cookie)))
782 return -1;
783
784 dp_ctx->tail = tail;
785 dp_ctx->cached_enqueue++;
786
787
788 qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
789 auth_iv, NULL, digest);
790
791 return 0;
792 }
793
794 uint32_t
qat_sym_dp_enqueue_chain_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)795 qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
796 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
797 void *user_data[], int *status)
798 {
799 struct qat_qp *qp = qp_data;
800 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
801 struct qat_queue *tx_queue = &qp->tx_q;
802 struct qat_sym_session *ctx = dp_ctx->session;
803 uint32_t i, n;
804 uint32_t tail;
805 struct icp_qat_fw_la_bulk_req *req;
806 int32_t data_len;
807 struct rte_crypto_va_iova_ptr null_digest;
808 struct rte_crypto_va_iova_ptr *job_digest;
809
810 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
811 if (unlikely(n == 0)) {
812 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
813 *status = 0;
814 return 0;
815 }
816
817 tail = dp_ctx->tail;
818
819 for (i = 0; i < n; i++) {
820 struct qat_sym_op_cookie *cookie =
821 qp->op_cookies[tail >> tx_queue->trailz];
822
823 req = (struct icp_qat_fw_la_bulk_req *)(
824 (uint8_t *)tx_queue->base_addr + tail);
825 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
826
827 if (vec->dest_sgl) {
828 data_len = qat_sym_build_req_set_data(req,
829 user_data[i], cookie,
830 vec->src_sgl[i].vec, vec->src_sgl[i].num,
831 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
832 } else {
833 data_len = qat_sym_build_req_set_data(req,
834 user_data[i], cookie,
835 vec->src_sgl[i].vec,
836 vec->src_sgl[i].num, NULL, 0);
837 }
838
839 if (unlikely(data_len < 0))
840 break;
841
842 if (ctx->is_zuc256) {
843 zuc256_modify_iv(vec->iv[i].va);
844 zuc256_modify_iv(vec->auth_iv[i].va);
845 }
846
847 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
848 null_digest.iova = cookie->digest_null_phys_addr;
849 job_digest = &null_digest;
850 } else
851 job_digest = &vec->digest[i];
852
853 if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
854 vec->src_sgl[i].vec, vec->src_sgl[i].num,
855 NULL, 0,
856 &vec->iv[i], job_digest,
857 &vec->auth_iv[i], ofs, (uint32_t)data_len, cookie)))
858 break;
859
860 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
861
862 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
863 vec->src_sgl[i].num, &vec->iv[i],
864 &vec->auth_iv[i],
865 NULL, &vec->digest[i]);
866 }
867
868 if (unlikely(i < n))
869 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
870
871 dp_ctx->tail = tail;
872 dp_ctx->cached_enqueue += i;
873 *status = 0;
874 return i;
875 }
876
877 int
qat_sym_dp_enqueue_single_aead_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,void * user_data)878 qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
879 struct rte_crypto_vec *data, uint16_t n_data_vecs,
880 union rte_crypto_sym_ofs ofs,
881 struct rte_crypto_va_iova_ptr *iv,
882 struct rte_crypto_va_iova_ptr *digest,
883 struct rte_crypto_va_iova_ptr *aad,
884 void *user_data)
885 {
886 struct qat_qp *qp = qp_data;
887 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
888 struct qat_queue *tx_queue = &qp->tx_q;
889 struct qat_sym_op_cookie *cookie;
890 struct qat_sym_session *ctx = dp_ctx->session;
891 struct icp_qat_fw_la_bulk_req *req;
892
893 int32_t data_len;
894 uint32_t tail = dp_ctx->tail;
895
896 req = (struct icp_qat_fw_la_bulk_req *)(
897 (uint8_t *)tx_queue->base_addr + tail);
898 cookie = qp->op_cookies[tail >> tx_queue->trailz];
899 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
900 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
901 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
902 data_len = qat_sym_build_req_set_data(req, user_data, cookie,
903 data, n_data_vecs, NULL, 0);
904 if (unlikely(data_len < 0))
905 return -1;
906
907 enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
908 (uint32_t)data_len);
909
910 dp_ctx->tail = tail;
911 dp_ctx->cached_enqueue++;
912
913 qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
914 NULL, aad, digest);
915
916 return 0;
917 }
918
919 uint32_t
qat_sym_dp_enqueue_aead_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)920 qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
921 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
922 void *user_data[], int *status)
923 {
924 struct qat_qp *qp = qp_data;
925 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
926 struct qat_queue *tx_queue = &qp->tx_q;
927 struct qat_sym_session *ctx = dp_ctx->session;
928 uint32_t i, n;
929 uint32_t tail;
930 struct icp_qat_fw_la_bulk_req *req;
931 int32_t data_len;
932
933 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
934 if (unlikely(n == 0)) {
935 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
936 *status = 0;
937 return 0;
938 }
939
940 tail = dp_ctx->tail;
941
942 for (i = 0; i < n; i++) {
943 struct qat_sym_op_cookie *cookie =
944 qp->op_cookies[tail >> tx_queue->trailz];
945
946 req = (struct icp_qat_fw_la_bulk_req *)(
947 (uint8_t *)tx_queue->base_addr + tail);
948 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
949
950 if (vec->dest_sgl) {
951 data_len = qat_sym_build_req_set_data(req,
952 user_data[i], cookie,
953 vec->src_sgl[i].vec, vec->src_sgl[i].num,
954 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
955 } else {
956 data_len = qat_sym_build_req_set_data(req,
957 user_data[i], cookie,
958 vec->src_sgl[i].vec,
959 vec->src_sgl[i].num, NULL, 0);
960 }
961
962 if (unlikely(data_len < 0))
963 break;
964
965 enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
966 &vec->digest[i], &vec->aad[i], ofs,
967 (uint32_t)data_len);
968
969 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
970
971 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
972 vec->src_sgl[i].num, &vec->iv[i], NULL,
973 &vec->aad[i], &vec->digest[i]);
974 }
975
976 if (unlikely(i < n))
977 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
978
979 dp_ctx->tail = tail;
980 dp_ctx->cached_enqueue += i;
981 *status = 0;
982 return i;
983 }
984
985
986 uint32_t
qat_sym_dp_dequeue_burst_gen1(void * qp_data,uint8_t * drv_ctx,rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,uint32_t max_nb_to_dequeue,rte_cryptodev_raw_post_dequeue_t post_dequeue,void ** out_user_data,uint8_t is_user_data_array,uint32_t * n_success_jobs,int * return_status)987 qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
988 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
989 uint32_t max_nb_to_dequeue,
990 rte_cryptodev_raw_post_dequeue_t post_dequeue,
991 void **out_user_data, uint8_t is_user_data_array,
992 uint32_t *n_success_jobs, int *return_status)
993 {
994 struct qat_qp *qp = qp_data;
995 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
996 struct qat_queue *rx_queue = &qp->rx_q;
997 struct icp_qat_fw_comn_resp *resp;
998 void *resp_opaque;
999 uint32_t i, n, inflight;
1000 uint32_t head;
1001 uint8_t status;
1002
1003 *n_success_jobs = 0;
1004 *return_status = 0;
1005 head = dp_ctx->head;
1006
1007 inflight = qp->enqueued - qp->dequeued;
1008 if (unlikely(inflight == 0))
1009 return 0;
1010
1011 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
1012 head);
1013 /* no operation ready */
1014 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
1015 return 0;
1016
1017 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
1018 /* get the dequeue count */
1019 if (get_dequeue_count) {
1020 n = get_dequeue_count(resp_opaque);
1021 if (unlikely(n == 0))
1022 return 0;
1023 } else {
1024 if (unlikely(max_nb_to_dequeue == 0))
1025 return 0;
1026 n = max_nb_to_dequeue;
1027 }
1028
1029 out_user_data[0] = resp_opaque;
1030 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
1031 post_dequeue(resp_opaque, 0, status);
1032 *n_success_jobs += status;
1033
1034 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
1035
1036 /* we already finished dequeue when n == 1 */
1037 if (unlikely(n == 1)) {
1038 i = 1;
1039 goto end_deq;
1040 }
1041
1042 if (is_user_data_array) {
1043 for (i = 1; i < n; i++) {
1044 resp = (struct icp_qat_fw_comn_resp *)(
1045 (uint8_t *)rx_queue->base_addr + head);
1046 if (unlikely(*(uint32_t *)resp ==
1047 ADF_RING_EMPTY_SIG))
1048 goto end_deq;
1049 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
1050 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
1051 *n_success_jobs += status;
1052 post_dequeue(out_user_data[i], i, status);
1053 head = (head + rx_queue->msg_size) &
1054 rx_queue->modulo_mask;
1055 }
1056
1057 goto end_deq;
1058 }
1059
1060 /* opaque is not array */
1061 for (i = 1; i < n; i++) {
1062 resp = (struct icp_qat_fw_comn_resp *)(
1063 (uint8_t *)rx_queue->base_addr + head);
1064 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
1065 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
1066 goto end_deq;
1067 head = (head + rx_queue->msg_size) &
1068 rx_queue->modulo_mask;
1069 post_dequeue(resp_opaque, i, status);
1070 *n_success_jobs += status;
1071 }
1072
1073 end_deq:
1074 dp_ctx->head = head;
1075 dp_ctx->cached_dequeue += i;
1076 return i;
1077 }
1078
1079 void *
qat_sym_dp_dequeue_single_gen1(void * qp_data,uint8_t * drv_ctx,int * dequeue_status,enum rte_crypto_op_status * op_status)1080 qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
1081 int *dequeue_status, enum rte_crypto_op_status *op_status)
1082 {
1083 struct qat_qp *qp = qp_data;
1084 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
1085 struct qat_queue *rx_queue = &qp->rx_q;
1086 register struct icp_qat_fw_comn_resp *resp;
1087
1088 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
1089 dp_ctx->head);
1090
1091 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
1092 return NULL;
1093
1094 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
1095 rx_queue->modulo_mask;
1096 dp_ctx->cached_dequeue++;
1097
1098 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
1099 RTE_CRYPTO_OP_STATUS_SUCCESS :
1100 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1101 *dequeue_status = 0;
1102 return (void *)(uintptr_t)resp->opaque_data;
1103 }
1104
1105 int
qat_sym_dp_enqueue_done_gen1(void * qp_data,uint8_t * drv_ctx,uint32_t n)1106 qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1107 {
1108 struct qat_qp *qp = qp_data;
1109 struct qat_queue *tx_queue = &qp->tx_q;
1110 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
1111
1112 if (unlikely(dp_ctx->cached_enqueue != n))
1113 return -1;
1114
1115 qp->enqueued += n;
1116 qp->stats.enqueued_count += n;
1117
1118 tx_queue->tail = dp_ctx->tail;
1119
1120 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
1121 tx_queue->hw_bundle_number,
1122 tx_queue->hw_queue_number, tx_queue->tail);
1123 tx_queue->csr_tail = tx_queue->tail;
1124 dp_ctx->cached_enqueue = 0;
1125
1126 return 0;
1127 }
1128
1129 int
qat_sym_dp_dequeue_done_gen1(void * qp_data,uint8_t * drv_ctx,uint32_t n)1130 qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1131 {
1132 struct qat_qp *qp = qp_data;
1133 struct qat_queue *rx_queue = &qp->rx_q;
1134 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
1135
1136 if (unlikely(dp_ctx->cached_dequeue != n))
1137 return -1;
1138
1139 rx_queue->head = dp_ctx->head;
1140 rx_queue->nb_processed_responses += n;
1141 qp->dequeued += n;
1142 qp->stats.dequeued_count += n;
1143 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
1144 uint32_t old_head, new_head;
1145 uint32_t max_head;
1146
1147 old_head = rx_queue->csr_head;
1148 new_head = rx_queue->head;
1149 max_head = qp->nb_descriptors * rx_queue->msg_size;
1150
1151 /* write out free descriptors */
1152 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
1153
1154 if (new_head < old_head) {
1155 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
1156 max_head - old_head);
1157 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
1158 new_head);
1159 } else {
1160 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
1161 old_head);
1162 }
1163 rx_queue->nb_processed_responses = 0;
1164 rx_queue->csr_head = new_head;
1165
1166 /* write current head to CSR */
1167 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
1168 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
1169 new_head);
1170 }
1171
1172 dp_ctx->cached_dequeue = 0;
1173 return 0;
1174 }
1175
1176 int
qat_sym_configure_raw_dp_ctx_gen1(void * _raw_dp_ctx,void * _ctx)1177 qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
1178 {
1179 struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
1180 struct qat_sym_session *ctx = _ctx;
1181
1182 raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
1183 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
1184 raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
1185 raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
1186
1187 if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1188 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
1189 !ctx->is_gmac) {
1190 /* AES-GCM or AES-CCM */
1191 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1192 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1193 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1194 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1195 && ctx->qat_hash_alg ==
1196 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1197 raw_dp_ctx->enqueue_burst =
1198 qat_sym_dp_enqueue_aead_jobs_gen1;
1199 raw_dp_ctx->enqueue =
1200 qat_sym_dp_enqueue_single_aead_gen1;
1201 } else {
1202 raw_dp_ctx->enqueue_burst =
1203 qat_sym_dp_enqueue_chain_jobs_gen1;
1204 raw_dp_ctx->enqueue =
1205 qat_sym_dp_enqueue_single_chain_gen1;
1206 }
1207 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
1208 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
1209 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
1210 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1211 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
1212 ctx->qat_cipher_alg ==
1213 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
1214 raw_dp_ctx->enqueue_burst =
1215 qat_sym_dp_enqueue_aead_jobs_gen1;
1216 raw_dp_ctx->enqueue =
1217 qat_sym_dp_enqueue_single_aead_gen1;
1218 } else {
1219 raw_dp_ctx->enqueue_burst =
1220 qat_sym_dp_enqueue_cipher_jobs_gen1;
1221 raw_dp_ctx->enqueue =
1222 qat_sym_dp_enqueue_single_cipher_gen1;
1223 }
1224 } else
1225 return -1;
1226
1227 return 0;
1228 }
1229
1230 int
qat_sym_crypto_set_session_gen1(void * cryptodev __rte_unused,void * session)1231 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
1232 {
1233 struct qat_sym_session *ctx = session;
1234 qat_sym_build_request_t build_request = NULL;
1235 enum rte_proc_type_t proc_type = rte_eal_process_type();
1236 int handle_mixed = 0;
1237
1238 if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
1239 return -EINVAL;
1240
1241 if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1242 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
1243 !ctx->is_gmac) {
1244 /* AES-GCM or AES-CCM */
1245 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1246 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1247 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1248 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1249 && ctx->qat_hash_alg ==
1250 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1251 /* do_aead = 1; */
1252 build_request = qat_sym_build_op_aead_gen1;
1253 } else {
1254 /* do_auth = 1; do_cipher = 1; */
1255 build_request = qat_sym_build_op_chain_gen1;
1256 handle_mixed = 1;
1257 }
1258 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
1259 /* do_auth = 1; do_cipher = 0;*/
1260 build_request = qat_sym_build_op_auth_gen1;
1261 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1262 /* do_auth = 0; do_cipher = 1; */
1263 build_request = qat_sym_build_op_cipher_gen1;
1264 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_CRC) {
1265 /* do_auth = 1; do_cipher = 1; */
1266 build_request = qat_sym_build_op_chain_gen1;
1267 handle_mixed = 1;
1268 }
1269
1270 if (build_request)
1271 ctx->build_request[proc_type] = build_request;
1272 else
1273 return -EINVAL;
1274
1275 /* no more work if not mixed op */
1276 if (!handle_mixed)
1277 return 0;
1278
1279 /* Check none supported algs if mixed */
1280 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
1281 ctx->qat_cipher_alg !=
1282 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1283 return -ENOTSUP;
1284 } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
1285 ctx->qat_cipher_alg !=
1286 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1287 return -ENOTSUP;
1288 } else if ((ctx->aes_cmac ||
1289 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
1290 (ctx->qat_cipher_alg ==
1291 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1292 ctx->qat_cipher_alg ==
1293 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
1294 return -ENOTSUP;
1295 }
1296
1297 return 0;
1298 }
1299
RTE_INIT(qat_sym_crypto_gen1_init)1300 RTE_INIT(qat_sym_crypto_gen1_init)
1301 {
1302 qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
1303 qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
1304 qat_sym_crypto_cap_get_gen1;
1305 qat_sym_gen_dev_ops[QAT_GEN1].set_session =
1306 qat_sym_crypto_set_session_gen1;
1307 qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
1308 qat_sym_configure_raw_dp_ctx_gen1;
1309 qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
1310 qat_sym_crypto_feature_flags_get_gen1;
1311 qat_sym_gen_dev_ops[QAT_GEN1].create_security_ctx =
1312 qat_sym_create_security_gen1;
1313 }
1314