xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c (revision e9271821e489668e466c7db36912b7c338717688)
1*e9271821SNishikant Nayak /* SPDX-License-Identifier: BSD-3-Clause
2*e9271821SNishikant Nayak  * Copyright(c) 2024 Intel Corporation
3*e9271821SNishikant Nayak  */
4*e9271821SNishikant Nayak 
5*e9271821SNishikant Nayak #include <rte_cryptodev.h>
6*e9271821SNishikant Nayak #include <cryptodev_pmd.h>
7*e9271821SNishikant Nayak #include "qat_sym_session.h"
8*e9271821SNishikant Nayak #include "qat_sym.h"
9*e9271821SNishikant Nayak #include "qat_asym.h"
10*e9271821SNishikant Nayak #include "qat_crypto.h"
11*e9271821SNishikant Nayak #include "qat_crypto_pmd_gens.h"
12*e9271821SNishikant Nayak 
13*e9271821SNishikant Nayak static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
14*e9271821SNishikant Nayak 	QAT_SYM_AEAD_CAP(AES_GCM,
15*e9271821SNishikant Nayak 		CAP_SET(block_size, 16),
16*e9271821SNishikant Nayak 		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
17*e9271821SNishikant Nayak 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
18*e9271821SNishikant Nayak 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
19*e9271821SNishikant Nayak };
20*e9271821SNishikant Nayak 
21*e9271821SNishikant Nayak static int
qat_sgl_add_buffer_gen_lce(void * list_in,uint64_t addr,uint32_t len)22*e9271821SNishikant Nayak qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
23*e9271821SNishikant Nayak {
24*e9271821SNishikant Nayak 	struct qat_sgl *list = (struct qat_sgl *)list_in;
25*e9271821SNishikant Nayak 	uint32_t nr;
26*e9271821SNishikant Nayak 
27*e9271821SNishikant Nayak 	nr = list->num_bufs;
28*e9271821SNishikant Nayak 
29*e9271821SNishikant Nayak 	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
30*e9271821SNishikant Nayak 		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
31*e9271821SNishikant Nayak 		return -EINVAL;
32*e9271821SNishikant Nayak 	}
33*e9271821SNishikant Nayak 
34*e9271821SNishikant Nayak 	list->buffers[nr].len = len;
35*e9271821SNishikant Nayak 	list->buffers[nr].resrvd = 0;
36*e9271821SNishikant Nayak 	list->buffers[nr].addr = addr;
37*e9271821SNishikant Nayak 
38*e9271821SNishikant Nayak 	list->num_bufs++;
39*e9271821SNishikant Nayak #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40*e9271821SNishikant Nayak 	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
41*e9271821SNishikant Nayak 	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
42*e9271821SNishikant Nayak 		nr, list->buffers[nr].len, list->buffers[nr].addr);
43*e9271821SNishikant Nayak #endif
44*e9271821SNishikant Nayak 	return 0;
45*e9271821SNishikant Nayak }
46*e9271821SNishikant Nayak 
47*e9271821SNishikant Nayak static int
qat_sgl_fill_array_with_mbuf(struct rte_mbuf * buf,int64_t offset,void * list_in,uint32_t data_len)48*e9271821SNishikant Nayak qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
49*e9271821SNishikant Nayak 		void *list_in, uint32_t data_len)
50*e9271821SNishikant Nayak {
51*e9271821SNishikant Nayak 	struct qat_sgl *list = (struct qat_sgl *)list_in;
52*e9271821SNishikant Nayak 	uint32_t nr, buf_len;
53*e9271821SNishikant Nayak 	int res = -EINVAL;
54*e9271821SNishikant Nayak #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
55*e9271821SNishikant Nayak 	uint32_t start_idx = list->num_bufs;
56*e9271821SNishikant Nayak #endif
57*e9271821SNishikant Nayak 
58*e9271821SNishikant Nayak 	/* Append to the existing list */
59*e9271821SNishikant Nayak 	nr = list->num_bufs;
60*e9271821SNishikant Nayak 
61*e9271821SNishikant Nayak 	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
62*e9271821SNishikant Nayak 		if (offset >= rte_pktmbuf_data_len(buf)) {
63*e9271821SNishikant Nayak 			offset -= rte_pktmbuf_data_len(buf);
64*e9271821SNishikant Nayak 			/* Jump to next mbuf */
65*e9271821SNishikant Nayak 			continue;
66*e9271821SNishikant Nayak 		}
67*e9271821SNishikant Nayak 
68*e9271821SNishikant Nayak 		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
69*e9271821SNishikant Nayak 		list->buffers[nr].resrvd = 0;
70*e9271821SNishikant Nayak 		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
71*e9271821SNishikant Nayak 
72*e9271821SNishikant Nayak 		offset = 0;
73*e9271821SNishikant Nayak 		buf_len += list->buffers[nr].len;
74*e9271821SNishikant Nayak 
75*e9271821SNishikant Nayak 		if (buf_len >= data_len) {
76*e9271821SNishikant Nayak 			list->buffers[nr].len -= buf_len - data_len;
77*e9271821SNishikant Nayak 			res = 0;
78*e9271821SNishikant Nayak 			break;
79*e9271821SNishikant Nayak 		}
80*e9271821SNishikant Nayak 		++nr;
81*e9271821SNishikant Nayak 	}
82*e9271821SNishikant Nayak 
83*e9271821SNishikant Nayak 	if (unlikely(res != 0)) {
84*e9271821SNishikant Nayak 		if (nr == QAT_SYM_SGL_MAX_NUMBER)
85*e9271821SNishikant Nayak 			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
86*e9271821SNishikant Nayak 					QAT_SYM_SGL_MAX_NUMBER);
87*e9271821SNishikant Nayak 		else
88*e9271821SNishikant Nayak 			QAT_DP_LOG(ERR, "Mbuf chain is too short");
89*e9271821SNishikant Nayak 	} else {
90*e9271821SNishikant Nayak 
91*e9271821SNishikant Nayak 		list->num_bufs = ++nr;
92*e9271821SNishikant Nayak #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
93*e9271821SNishikant Nayak 		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
94*e9271821SNishikant Nayak 		for (nr = start_idx; nr < list->num_bufs; nr++) {
95*e9271821SNishikant Nayak 			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
96*e9271821SNishikant Nayak 					nr, list->buffers[nr].len,
97*e9271821SNishikant Nayak 					list->buffers[nr].addr);
98*e9271821SNishikant Nayak 		}
99*e9271821SNishikant Nayak #endif
100*e9271821SNishikant Nayak 	}
101*e9271821SNishikant Nayak 
102*e9271821SNishikant Nayak 	return res;
103*e9271821SNishikant Nayak }
104*e9271821SNishikant Nayak 
105*e9271821SNishikant Nayak static int
qat_sym_build_op_aead_gen_lce(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)106*e9271821SNishikant Nayak qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
107*e9271821SNishikant Nayak 	uint8_t *out_msg, void *op_cookie)
108*e9271821SNishikant Nayak {
109*e9271821SNishikant Nayak 	struct qat_sym_op_cookie *cookie = op_cookie;
110*e9271821SNishikant Nayak 	struct rte_crypto_op *op = in_op;
111*e9271821SNishikant Nayak 	uint64_t digest_phys_addr, aad_phys_addr;
112*e9271821SNishikant Nayak 	uint16_t iv_len, aad_len, digest_len, key_len;
113*e9271821SNishikant Nayak 	uint32_t cipher_ofs, iv_offset, cipher_len;
114*e9271821SNishikant Nayak 	register struct icp_qat_fw_la_bulk_req *qat_req;
115*e9271821SNishikant Nayak 	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
116*e9271821SNishikant Nayak 	enum icp_qat_hw_cipher_dir dir;
117*e9271821SNishikant Nayak 	bool is_digest_adjacent = false;
118*e9271821SNishikant Nayak 
119*e9271821SNishikant Nayak 	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
120*e9271821SNishikant Nayak 		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
121*e9271821SNishikant Nayak 		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
122*e9271821SNishikant Nayak 
123*e9271821SNishikant Nayak 		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
124*e9271821SNishikant Nayak 			"GEN_LCE PMD only supports AES-256 AEAD mode",
125*e9271821SNishikant Nayak 			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
126*e9271821SNishikant Nayak 		return -EINVAL;
127*e9271821SNishikant Nayak 	}
128*e9271821SNishikant Nayak 
129*e9271821SNishikant Nayak 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
130*e9271821SNishikant Nayak 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
131*e9271821SNishikant Nayak 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
132*e9271821SNishikant Nayak 	cipher_param = (void *)&qat_req->serv_specif_rqpars;
133*e9271821SNishikant Nayak 
134*e9271821SNishikant Nayak 	dir = ctx->qat_dir;
135*e9271821SNishikant Nayak 
136*e9271821SNishikant Nayak 	aad_phys_addr = op->sym->aead.aad.phys_addr;
137*e9271821SNishikant Nayak 	aad_len = ctx->aad_len;
138*e9271821SNishikant Nayak 
139*e9271821SNishikant Nayak 	iv_offset = ctx->cipher_iv.offset;
140*e9271821SNishikant Nayak 	iv_len = ctx->cipher_iv.length;
141*e9271821SNishikant Nayak 
142*e9271821SNishikant Nayak 	cipher_ofs = op->sym->aead.data.offset;
143*e9271821SNishikant Nayak 	cipher_len = op->sym->aead.data.length;
144*e9271821SNishikant Nayak 
145*e9271821SNishikant Nayak 	digest_phys_addr = op->sym->aead.digest.phys_addr;
146*e9271821SNishikant Nayak 	digest_len = ctx->digest_length;
147*e9271821SNishikant Nayak 
148*e9271821SNishikant Nayak 	/* Up to 16B IV can be directly embedded in descriptor.
149*e9271821SNishikant Nayak 	 *  GCM supports only 12B IV for GEN LCE
150*e9271821SNishikant Nayak 	 */
151*e9271821SNishikant Nayak 	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
152*e9271821SNishikant Nayak 		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.", iv_len);
153*e9271821SNishikant Nayak 		return -EINVAL;
154*e9271821SNishikant Nayak 	}
155*e9271821SNishikant Nayak 
156*e9271821SNishikant Nayak 	rte_memcpy(cipher_param->u.cipher_IV_array,
157*e9271821SNishikant Nayak 			rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset), iv_len);
158*e9271821SNishikant Nayak 
159*e9271821SNishikant Nayak 	/* Always SGL */
160*e9271821SNishikant Nayak 	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags & ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
161*e9271821SNishikant Nayak 	/* Always inplace */
162*e9271821SNishikant Nayak 	RTE_ASSERT(op->sym->m_dst == NULL);
163*e9271821SNishikant Nayak 
164*e9271821SNishikant Nayak 	/* Key buffer address is already programmed by reusing the
165*e9271821SNishikant Nayak 	 * content-descriptor buffer
166*e9271821SNishikant Nayak 	 */
167*e9271821SNishikant Nayak 	key_len = ctx->auth_key_length;
168*e9271821SNishikant Nayak 
169*e9271821SNishikant Nayak 	cipher_param->spc_aad_sz = aad_len;
170*e9271821SNishikant Nayak 	cipher_param->cipher_length = key_len;
171*e9271821SNishikant Nayak 	cipher_param->spc_auth_res_sz = digest_len;
172*e9271821SNishikant Nayak 
173*e9271821SNishikant Nayak 	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
174*e9271821SNishikant Nayak 	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len) == digest_phys_addr)
175*e9271821SNishikant Nayak 		is_digest_adjacent = true;
176*e9271821SNishikant Nayak 
177*e9271821SNishikant Nayak 	/* SRC-SGL: 3 entries:
178*e9271821SNishikant Nayak 	 * a) AAD
179*e9271821SNishikant Nayak 	 * b) cipher
180*e9271821SNishikant Nayak 	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
181*e9271821SNishikant Nayak 	 *
182*e9271821SNishikant Nayak 	 */
183*e9271821SNishikant Nayak 	cookie->qat_sgl_src.num_bufs = 0;
184*e9271821SNishikant Nayak 	if (aad_len)
185*e9271821SNishikant Nayak 		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr, aad_len);
186*e9271821SNishikant Nayak 
187*e9271821SNishikant Nayak 	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
188*e9271821SNishikant Nayak 		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
189*e9271821SNishikant Nayak 				cipher_len + digest_len);
190*e9271821SNishikant Nayak 	} else {
191*e9271821SNishikant Nayak 		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
192*e9271821SNishikant Nayak 				cipher_len);
193*e9271821SNishikant Nayak 
194*e9271821SNishikant Nayak 		/* Digest buffer in decrypt job */
195*e9271821SNishikant Nayak 		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
196*e9271821SNishikant Nayak 			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
197*e9271821SNishikant Nayak 					digest_phys_addr, digest_len);
198*e9271821SNishikant Nayak 	}
199*e9271821SNishikant Nayak 
200*e9271821SNishikant Nayak 	/* (in-place) DST-SGL: 2 entries:
201*e9271821SNishikant Nayak 	 * a) cipher
202*e9271821SNishikant Nayak 	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
203*e9271821SNishikant Nayak 	 */
204*e9271821SNishikant Nayak 	cookie->qat_sgl_dst.num_bufs = 0;
205*e9271821SNishikant Nayak 
206*e9271821SNishikant Nayak 	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
207*e9271821SNishikant Nayak 		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
208*e9271821SNishikant Nayak 				cipher_len + digest_len);
209*e9271821SNishikant Nayak 	} else {
210*e9271821SNishikant Nayak 		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
211*e9271821SNishikant Nayak 				cipher_len);
212*e9271821SNishikant Nayak 
213*e9271821SNishikant Nayak 		/* Digest buffer in Encrypt job */
214*e9271821SNishikant Nayak 		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
215*e9271821SNishikant Nayak 			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
216*e9271821SNishikant Nayak 					digest_phys_addr, digest_len);
217*e9271821SNishikant Nayak 	}
218*e9271821SNishikant Nayak 
219*e9271821SNishikant Nayak 	/* Length values in 128B descriptor */
220*e9271821SNishikant Nayak 	qat_req->comn_mid.src_length = cipher_len;
221*e9271821SNishikant Nayak 	qat_req->comn_mid.dst_length = cipher_len;
222*e9271821SNishikant Nayak 
223*e9271821SNishikant Nayak 	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
224*e9271821SNishikant Nayak 		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
225*e9271821SNishikant Nayak 
226*e9271821SNishikant Nayak 	/* src & dst SGL addresses in 128B descriptor */
227*e9271821SNishikant Nayak 	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
228*e9271821SNishikant Nayak 	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
229*e9271821SNishikant Nayak 
230*e9271821SNishikant Nayak #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
231*e9271821SNishikant Nayak 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));
232*e9271821SNishikant Nayak 	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
233*e9271821SNishikant Nayak 			rte_pktmbuf_data_len(op->sym->m_src));
234*e9271821SNishikant Nayak 	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, digest_len);
235*e9271821SNishikant Nayak 	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
236*e9271821SNishikant Nayak #endif
237*e9271821SNishikant Nayak 	return 0;
238*e9271821SNishikant Nayak }
239*e9271821SNishikant Nayak 
240*e9271821SNishikant Nayak static int
qat_sym_crypto_set_session_gen_lce(void * cdev __rte_unused,void * session)241*e9271821SNishikant Nayak qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
242*e9271821SNishikant Nayak {
243*e9271821SNishikant Nayak 	struct qat_sym_session *ctx = session;
244*e9271821SNishikant Nayak 	qat_sym_build_request_t build_request = NULL;
245*e9271821SNishikant Nayak 	enum rte_proc_type_t proc_type = rte_eal_process_type();
246*e9271821SNishikant Nayak 
247*e9271821SNishikant Nayak 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
248*e9271821SNishikant Nayak 		return -EINVAL;
249*e9271821SNishikant Nayak 
250*e9271821SNishikant Nayak 	/* build request for aead */
251*e9271821SNishikant Nayak 	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
252*e9271821SNishikant Nayak 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
253*e9271821SNishikant Nayak 		build_request = qat_sym_build_op_aead_gen_lce;
254*e9271821SNishikant Nayak 		ctx->build_request[proc_type] = build_request;
255*e9271821SNishikant Nayak 	}
256*e9271821SNishikant Nayak 	return 0;
257*e9271821SNishikant Nayak }
258*e9271821SNishikant Nayak 
259*e9271821SNishikant Nayak 
260*e9271821SNishikant Nayak static int
qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t __rte_unused slice_map)261*e9271821SNishikant Nayak qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
262*e9271821SNishikant Nayak 	const char *capa_memz_name,
263*e9271821SNishikant Nayak 	const uint16_t __rte_unused slice_map)
264*e9271821SNishikant Nayak {
265*e9271821SNishikant Nayak 	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
266*e9271821SNishikant Nayak 	uint32_t i;
267*e9271821SNishikant Nayak 
268*e9271821SNishikant Nayak 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
269*e9271821SNishikant Nayak 	if (internals->capa_mz == NULL) {
270*e9271821SNishikant Nayak 		internals->capa_mz = rte_memzone_reserve(capa_memz_name, size, rte_socket_id(), 0);
271*e9271821SNishikant Nayak 		if (internals->capa_mz == NULL) {
272*e9271821SNishikant Nayak 			QAT_LOG(DEBUG, "Error allocating memzone for capabilities");
273*e9271821SNishikant Nayak 			return -1;
274*e9271821SNishikant Nayak 		}
275*e9271821SNishikant Nayak 	}
276*e9271821SNishikant Nayak 
277*e9271821SNishikant Nayak 	struct rte_cryptodev_capabilities *addr =
278*e9271821SNishikant Nayak 		(struct rte_cryptodev_capabilities *)
279*e9271821SNishikant Nayak 		internals->capa_mz->addr;
280*e9271821SNishikant Nayak 	const struct rte_cryptodev_capabilities *capabilities =
281*e9271821SNishikant Nayak 		qat_sym_crypto_caps_gen_lce;
282*e9271821SNishikant Nayak 	const uint32_t capa_num = size / sizeof(struct rte_cryptodev_capabilities);
283*e9271821SNishikant Nayak 	uint32_t curr_capa = 0;
284*e9271821SNishikant Nayak 
285*e9271821SNishikant Nayak 	for (i = 0; i < capa_num; i++) {
286*e9271821SNishikant Nayak 		memcpy(addr + curr_capa, capabilities + i,
287*e9271821SNishikant Nayak 				sizeof(struct rte_cryptodev_capabilities));
288*e9271821SNishikant Nayak 		curr_capa++;
289*e9271821SNishikant Nayak 	}
290*e9271821SNishikant Nayak 	internals->qat_dev_capabilities = internals->capa_mz->addr;
291*e9271821SNishikant Nayak 
292*e9271821SNishikant Nayak 	return 0;
293*e9271821SNishikant Nayak }
294*e9271821SNishikant Nayak 
RTE_INIT(qat_sym_crypto_gen_lce_init)295*e9271821SNishikant Nayak RTE_INIT(qat_sym_crypto_gen_lce_init)
296*e9271821SNishikant Nayak {
297*e9271821SNishikant Nayak 	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
298*e9271821SNishikant Nayak 	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = qat_sym_crypto_cap_get_gen_lce;
299*e9271821SNishikant Nayak 	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session = qat_sym_crypto_set_session_gen_lce;
300*e9271821SNishikant Nayak 	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
301*e9271821SNishikant Nayak 	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1;
302*e9271821SNishikant Nayak }
303*e9271821SNishikant Nayak 
RTE_INIT(qat_asym_crypto_gen_lce_init)304*e9271821SNishikant Nayak RTE_INIT(qat_asym_crypto_gen_lce_init)
305*e9271821SNishikant Nayak {
306*e9271821SNishikant Nayak 	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
307*e9271821SNishikant Nayak 	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
308*e9271821SNishikant Nayak 	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
309*e9271821SNishikant Nayak 	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
310*e9271821SNishikant Nayak }
311