xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 #include "adf_transport_access_macros_gen4vf.h"
13 
14 
15 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
16 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
17 		CAP_SET(block_size, 64),
18 		CAP_RNG(digest_size, 1, 20, 1)),
19 	QAT_SYM_AUTH_CAP(SHA224,
20 		CAP_SET(block_size, 64),
21 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
22 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
23 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
24 		CAP_SET(block_size, 64),
25 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
26 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
27 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
28 		CAP_SET(block_size, 64),
29 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
31 	QAT_SYM_CIPHER_CAP(SM4_ECB,
32 		CAP_SET(block_size, 16),
33 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
34 };
35 
36 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
37 	QAT_SYM_CIPHER_CAP(AES_CBC,
38 		CAP_SET(block_size, 16),
39 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
40 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
41 		CAP_SET(block_size, 64),
42 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
43 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
44 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
45 		CAP_SET(block_size, 128),
46 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
47 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
48 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
49 		CAP_SET(block_size, 128),
50 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
51 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
52 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
53 		CAP_SET(block_size, 16),
54 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
55 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
56 	QAT_SYM_AUTH_CAP(AES_CMAC,
57 		CAP_SET(block_size, 16),
58 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
59 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
60 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
61 		CAP_SET(block_size, 16),
62 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
63 	QAT_SYM_AUTH_CAP(NULL,
64 		CAP_SET(block_size, 1),
65 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
66 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
67 	QAT_SYM_CIPHER_CAP(NULL,
68 		CAP_SET(block_size, 1),
69 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
70 	QAT_SYM_AUTH_CAP(SHA256,
71 		CAP_SET(block_size, 64),
72 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
73 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
74 	QAT_SYM_AUTH_CAP(SHA384,
75 		CAP_SET(block_size, 128),
76 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
77 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
78 	QAT_SYM_AUTH_CAP(SHA512,
79 		CAP_SET(block_size, 128),
80 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
81 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
82 	QAT_SYM_CIPHER_CAP(AES_CTR,
83 		CAP_SET(block_size, 16),
84 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
85 	QAT_SYM_AEAD_CAP(AES_GCM,
86 		CAP_SET(block_size, 16),
87 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
88 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
89 	QAT_SYM_AEAD_CAP(AES_CCM,
90 		CAP_SET(block_size, 16),
91 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
92 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
93 	QAT_SYM_AUTH_CAP(AES_GMAC,
94 		CAP_SET(block_size, 16),
95 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
96 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
97 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
98 		CAP_SET(block_size, 64),
99 		CAP_RNG(key_size, 32, 32, 0),
100 		CAP_RNG(digest_size, 16, 16, 0),
101 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
102 	QAT_SYM_CIPHER_CAP(SM4_CBC,
103 		CAP_SET(block_size, 16),
104 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
105 	QAT_SYM_CIPHER_CAP(SM4_CTR,
106 		CAP_SET(block_size, 16),
107 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
108 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
109 		CAP_SET(block_size, 64),
110 		CAP_RNG(digest_size, 32, 32, 0)),
111 	QAT_SYM_AUTH_CAP(SM3_HMAC,
112 		CAP_SET(block_size, 64),
113 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
114 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
115 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
116 };
117 
118 static int
119 qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
120 			const char *capa_memz_name,
121 			const uint16_t __rte_unused slice_map)
122 {
123 	uint32_t legacy_capa_num;
124 	uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
125 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen4);
126 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
127 
128 	if (unlikely(internals->qat_dev->options.legacy_alg))
129 		size = size + legacy_size;
130 
131 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
132 	if (internals->capa_mz == NULL) {
133 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
134 				size, rte_socket_id(), 0);
135 		if (internals->capa_mz == NULL) {
136 			QAT_LOG(DEBUG,
137 				"Error allocating memzone for capabilities");
138 			return -1;
139 		}
140 	}
141 
142 	struct rte_cryptodev_capabilities *addr =
143 			(struct rte_cryptodev_capabilities *)
144 				internals->capa_mz->addr;
145 
146 	struct rte_cryptodev_capabilities *capabilities;
147 
148 	if (unlikely(internals->qat_dev->options.legacy_alg)) {
149 		capabilities = qat_sym_crypto_legacy_caps_gen4;
150 		memcpy(addr, capabilities, legacy_size);
151 		addr += legacy_capa_num;
152 	}
153 	capabilities = qat_sym_crypto_caps_gen4;
154 	memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen4));
155 	internals->qat_dev_capabilities = internals->capa_mz->addr;
156 
157 	return 0;
158 }
159 
160 static __rte_always_inline void
161 enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
162 	struct icp_qat_fw_la_bulk_req *req,
163 	struct rte_crypto_va_iova_ptr *iv,
164 	struct rte_crypto_va_iova_ptr *digest,
165 	struct rte_crypto_va_iova_ptr *aad,
166 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
167 {
168 	if (ctx->is_single_pass && ctx->is_ucs) {
169 		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
170 			(void *)&req->serv_specif_rqpars;
171 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
172 			(void *)&req->serv_specif_rqpars;
173 
174 		/* QAT GEN4 uses single pass to treat AEAD as cipher
175 		 * operation
176 		 */
177 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
178 				req);
179 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
180 		cipher_param->cipher_length = data_len -
181 				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
182 
183 		cipher_param_20->spc_aad_addr = aad->iova;
184 		cipher_param_20->spc_auth_res_addr = digest->iova;
185 
186 		return;
187 	}
188 
189 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
190 }
191 
192 static int
193 qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
194 		uint8_t *out_msg, void *op_cookie)
195 {
196 	register struct icp_qat_fw_la_bulk_req *qat_req;
197 	struct rte_crypto_op *op = in_op;
198 	struct qat_sym_op_cookie *cookie = op_cookie;
199 	struct rte_crypto_sgl in_sgl, out_sgl;
200 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
201 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
202 	struct rte_crypto_va_iova_ptr cipher_iv;
203 	struct rte_crypto_va_iova_ptr aad;
204 	struct rte_crypto_va_iova_ptr digest;
205 	union rte_crypto_sym_ofs ofs;
206 	int32_t total_len;
207 
208 	in_sgl.vec = in_vec;
209 	out_sgl.vec = out_vec;
210 
211 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
212 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
213 
214 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
215 			&cipher_iv, &aad, &digest);
216 	if (unlikely(ofs.raw == UINT64_MAX)) {
217 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
218 		return -EINVAL;
219 	}
220 
221 	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
222 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
223 	if (unlikely(total_len < 0)) {
224 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
225 		return -EINVAL;
226 	}
227 
228 	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
229 		total_len);
230 
231 	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
232 			NULL, &aad, &digest);
233 
234 	return 0;
235 }
236 
237 int
238 qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
239 {
240 	struct qat_qp *qp = qp_data;
241 	struct qat_queue *tx_queue = &qp->tx_q;
242 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
243 
244 	if (unlikely(dp_ctx->cached_enqueue != n))
245 		return -1;
246 
247 	qp->enqueued += n;
248 	qp->stats.enqueued_count += n;
249 
250 	tx_queue->tail = dp_ctx->tail;
251 
252 	WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
253 		tx_queue->hw_bundle_number,
254 		tx_queue->hw_queue_number, tx_queue->tail);
255 
256 	tx_queue->csr_tail = tx_queue->tail;
257 	dp_ctx->cached_enqueue = 0;
258 
259 	return 0;
260 }
261 
262 int
263 qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
264 {
265 	struct qat_qp *qp = qp_data;
266 	struct qat_queue *rx_queue = &qp->rx_q;
267 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
268 
269 	if (unlikely(dp_ctx->cached_dequeue != n))
270 		return -1;
271 
272 	rx_queue->head = dp_ctx->head;
273 	rx_queue->nb_processed_responses += n;
274 	qp->dequeued += n;
275 	qp->stats.dequeued_count += n;
276 	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
277 		uint32_t old_head, new_head;
278 		uint32_t max_head;
279 
280 		old_head = rx_queue->csr_head;
281 		new_head = rx_queue->head;
282 		max_head = qp->nb_descriptors * rx_queue->msg_size;
283 
284 		/* write out free descriptors */
285 		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
286 
287 		if (new_head < old_head) {
288 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
289 					max_head - old_head);
290 			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
291 					new_head);
292 		} else {
293 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
294 					old_head);
295 		}
296 		rx_queue->nb_processed_responses = 0;
297 		rx_queue->csr_head = new_head;
298 
299 		/* write current head to CSR */
300 		WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
301 			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
302 			new_head);
303 	}
304 
305 	dp_ctx->cached_dequeue = 0;
306 	return 0;
307 }
308 
309 int
310 qat_sym_crypto_set_session_gen4(void *cdev, void *session)
311 {
312 	struct qat_sym_session *ctx = session;
313 	enum rte_proc_type_t proc_type = rte_eal_process_type();
314 	int ret;
315 
316 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
317 		return -EINVAL;
318 
319 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
320 	/* special single pass build request for GEN4 */
321 	if (ctx->is_single_pass && ctx->is_ucs)
322 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
323 
324 	if (ret == -ENOTSUP) {
325 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
326 		 * this is addressed by GEN4
327 		 */
328 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
329 				ctx->qat_cipher_alg !=
330 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
331 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
332 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
333 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
334 				ctx->qat_cipher_alg !=
335 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
336 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
337 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
338 		} else if ((ctx->aes_cmac ||
339 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
340 				(ctx->qat_cipher_alg ==
341 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
342 				ctx->qat_cipher_alg ==
343 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
344 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
345 		}
346 
347 		ret = 0;
348 	}
349 
350 	return ret;
351 }
352 
353 static int
354 qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
355 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
356 	union rte_crypto_sym_ofs ofs,
357 	struct rte_crypto_va_iova_ptr *iv,
358 	struct rte_crypto_va_iova_ptr *digest,
359 	struct rte_crypto_va_iova_ptr *aad,
360 	void *user_data)
361 {
362 	struct qat_qp *qp = qp_data;
363 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
364 	struct qat_queue *tx_queue = &qp->tx_q;
365 	struct qat_sym_op_cookie *cookie;
366 	struct qat_sym_session *ctx = dp_ctx->session;
367 	struct icp_qat_fw_la_bulk_req *req;
368 
369 	int32_t data_len;
370 	uint32_t tail = dp_ctx->tail;
371 
372 	req = (struct icp_qat_fw_la_bulk_req *)(
373 		(uint8_t *)tx_queue->base_addr + tail);
374 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
375 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
376 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
377 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
378 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
379 			data, n_data_vecs, NULL, 0);
380 	if (unlikely(data_len < 0))
381 		return -1;
382 
383 	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
384 		(uint32_t)data_len);
385 
386 	dp_ctx->tail = tail;
387 	dp_ctx->cached_enqueue++;
388 
389 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
390 			NULL, aad, digest);
391 
392 	return 0;
393 }
394 
395 static uint32_t
396 qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
397 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
398 	void *user_data[], int *status)
399 {
400 	struct qat_qp *qp = qp_data;
401 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
402 	struct qat_queue *tx_queue = &qp->tx_q;
403 	struct qat_sym_session *ctx = dp_ctx->session;
404 	uint32_t i, n;
405 	uint32_t tail;
406 	struct icp_qat_fw_la_bulk_req *req;
407 	int32_t data_len;
408 
409 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
410 	if (unlikely(n == 0)) {
411 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
412 		*status = 0;
413 		return 0;
414 	}
415 
416 	tail = dp_ctx->tail;
417 
418 	for (i = 0; i < n; i++) {
419 		struct qat_sym_op_cookie *cookie =
420 			qp->op_cookies[tail >> tx_queue->trailz];
421 
422 		req  = (struct icp_qat_fw_la_bulk_req *)(
423 			(uint8_t *)tx_queue->base_addr + tail);
424 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
425 
426 		if (vec->dest_sgl) {
427 			data_len = qat_sym_build_req_set_data(req,
428 				user_data[i], cookie,
429 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
430 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
431 		} else {
432 			data_len = qat_sym_build_req_set_data(req,
433 				user_data[i], cookie,
434 				vec->src_sgl[i].vec,
435 				vec->src_sgl[i].num, NULL, 0);
436 		}
437 
438 		if (unlikely(data_len < 0))
439 			break;
440 
441 		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
442 				&vec->digest[i], &vec->aad[i], ofs,
443 				(uint32_t)data_len);
444 
445 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
446 
447 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
448 				vec->src_sgl[i].num, &vec->iv[i], NULL,
449 				&vec->aad[i], &vec->digest[i]);
450 	}
451 
452 	if (unlikely(i < n))
453 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
454 
455 	dp_ctx->tail = tail;
456 	dp_ctx->cached_enqueue += i;
457 	*status = 0;
458 	return i;
459 }
460 
461 int
462 qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
463 {
464 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
465 	struct qat_sym_session *ctx = _ctx;
466 
467 	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4;
468 	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
469 	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
470 	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4;
471 
472 	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
473 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
474 			!ctx->is_gmac) {
475 		/* AES-GCM or AES-CCM */
476 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
477 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
478 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
479 			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
480 			&& ctx->qat_hash_alg ==
481 					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
482 			raw_dp_ctx->enqueue_burst =
483 					qat_sym_dp_enqueue_aead_jobs_gen1;
484 			raw_dp_ctx->enqueue =
485 					qat_sym_dp_enqueue_single_aead_gen1;
486 		} else {
487 			raw_dp_ctx->enqueue_burst =
488 					qat_sym_dp_enqueue_chain_jobs_gen1;
489 			raw_dp_ctx->enqueue =
490 					qat_sym_dp_enqueue_single_chain_gen1;
491 		}
492 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
493 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
494 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
495 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
496 		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
497 			ctx->qat_cipher_alg ==
498 				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
499 			raw_dp_ctx->enqueue_burst =
500 					qat_sym_dp_enqueue_aead_jobs_gen1;
501 			raw_dp_ctx->enqueue =
502 					qat_sym_dp_enqueue_single_aead_gen1;
503 		} else {
504 			raw_dp_ctx->enqueue_burst =
505 					qat_sym_dp_enqueue_cipher_jobs_gen1;
506 			raw_dp_ctx->enqueue =
507 					qat_sym_dp_enqueue_single_cipher_gen1;
508 		}
509 	} else
510 		return -1;
511 
512 	if (ctx->is_single_pass && ctx->is_ucs) {
513 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
514 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
515 	}
516 
517 	return 0;
518 }
519 
520 RTE_INIT(qat_sym_crypto_gen4_init)
521 {
522 	qat_sym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
523 		qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
524 	qat_sym_gen_dev_ops[QAT_VQAT].get_capabilities =
525 		qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
526 			qat_sym_crypto_cap_get_gen4;
527 	qat_sym_gen_dev_ops[QAT_VQAT].set_session =
528 		qat_sym_gen_dev_ops[QAT_GEN4].set_session =
529 			qat_sym_crypto_set_session_gen4;
530 	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
531 			qat_sym_configure_raw_dp_ctx_gen4;
532 	qat_sym_gen_dev_ops[QAT_VQAT].get_feature_flags =
533 		qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
534 			qat_sym_crypto_feature_flags_get_gen1;
535 	qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
536 			qat_sym_create_security_gen1;
537 }
538 
539 RTE_INIT(qat_asym_crypto_gen4_init)
540 {
541 	qat_asym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
542 		qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
543 			&qat_asym_crypto_ops_gen1;
544 	qat_asym_gen_dev_ops[QAT_VQAT].get_capabilities =
545 		qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
546 			qat_asym_crypto_cap_get_gen1;
547 	qat_asym_gen_dev_ops[QAT_VQAT].get_feature_flags =
548 		qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
549 			qat_asym_crypto_feature_flags_get_gen1;
550 	qat_asym_gen_dev_ops[QAT_VQAT].set_session =
551 		qat_asym_gen_dev_ops[QAT_GEN4].set_session =
552 			qat_asym_crypto_set_session_gen1;
553 }
554