xref: /dpdk/lib/vhost/vhost_crypto.c (revision 42fbb8e85d1f0b6c1d397d4e7559bc5877ba985e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9 
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14 
15 #define INHDR_LEN		(sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET		(sizeof(struct rte_crypto_op) + \
17 				sizeof(struct rte_crypto_sym_op))
18 
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)				\
21 	RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",	\
22 		"Vhost-Crypto",	__func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)				\
24 	RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",	\
25 		"Vhost-Crypto",	__func__, __LINE__, ## args)
26 
27 #define VC_LOG_DBG(fmt, args...)				\
28 	RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",	\
29 		"Vhost-Crypto",	__func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)				\
32 	RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)				\
34 	RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37 
38 #define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |	\
39 		(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |			\
40 		(1ULL << VIRTIO_RING_F_EVENT_IDX) |			\
41 		(1ULL << VIRTIO_NET_F_CTRL_VQ) |			\
42 		(1ULL << VIRTIO_F_VERSION_1) |				\
43 		(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
44 
45 #define IOVA_TO_VVA(t, r, a, l, p)					\
46 	((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47 
48 /*
49  * vhost_crypto_desc is used to copy original vring_desc to the local buffer
50  * before processing (except the next index). The copy result will be an
51  * array of vhost_crypto_desc elements that follows the sequence of original
52  * vring_desc.next is arranged.
53  */
54 #define vhost_crypto_desc vring_desc
55 
56 static int
57 cipher_algo_transform(uint32_t virtio_cipher_algo,
58 		enum rte_crypto_cipher_algorithm *algo)
59 {
60 	switch (virtio_cipher_algo) {
61 	case VIRTIO_CRYPTO_CIPHER_AES_CBC:
62 		*algo = RTE_CRYPTO_CIPHER_AES_CBC;
63 		break;
64 	case VIRTIO_CRYPTO_CIPHER_AES_CTR:
65 		*algo = RTE_CRYPTO_CIPHER_AES_CTR;
66 		break;
67 	case VIRTIO_CRYPTO_CIPHER_DES_ECB:
68 		*algo = -VIRTIO_CRYPTO_NOTSUPP;
69 		break;
70 	case VIRTIO_CRYPTO_CIPHER_DES_CBC:
71 		*algo = RTE_CRYPTO_CIPHER_DES_CBC;
72 		break;
73 	case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
74 		*algo = RTE_CRYPTO_CIPHER_3DES_ECB;
75 		break;
76 	case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
77 		*algo = RTE_CRYPTO_CIPHER_3DES_CBC;
78 		break;
79 	case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
80 		*algo = RTE_CRYPTO_CIPHER_3DES_CTR;
81 		break;
82 	case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
83 		*algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
84 		break;
85 	case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
86 		*algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
87 		break;
88 	case VIRTIO_CRYPTO_CIPHER_AES_F8:
89 		*algo = RTE_CRYPTO_CIPHER_AES_F8;
90 		break;
91 	case VIRTIO_CRYPTO_CIPHER_AES_XTS:
92 		*algo = RTE_CRYPTO_CIPHER_AES_XTS;
93 		break;
94 	case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
95 		*algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
96 		break;
97 	default:
98 		return -VIRTIO_CRYPTO_BADMSG;
99 		break;
100 	}
101 
102 	return 0;
103 }
104 
105 static int
106 auth_algo_transform(uint32_t virtio_auth_algo,
107 		enum rte_crypto_auth_algorithm *algo)
108 {
109 	switch (virtio_auth_algo) {
110 	case VIRTIO_CRYPTO_NO_MAC:
111 		*algo = RTE_CRYPTO_AUTH_NULL;
112 		break;
113 	case VIRTIO_CRYPTO_MAC_HMAC_MD5:
114 		*algo = RTE_CRYPTO_AUTH_MD5_HMAC;
115 		break;
116 	case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
117 		*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
118 		break;
119 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
120 		*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
121 		break;
122 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
123 		*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
124 		break;
125 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
126 		*algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
127 		break;
128 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
129 		*algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
130 		break;
131 	case VIRTIO_CRYPTO_MAC_CMAC_AES:
132 		*algo = RTE_CRYPTO_AUTH_AES_CMAC;
133 		break;
134 	case VIRTIO_CRYPTO_MAC_KASUMI_F9:
135 		*algo = RTE_CRYPTO_AUTH_KASUMI_F9;
136 		break;
137 	case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
138 		*algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
139 		break;
140 	case VIRTIO_CRYPTO_MAC_GMAC_AES:
141 		*algo = RTE_CRYPTO_AUTH_AES_GMAC;
142 		break;
143 	case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
144 		*algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
145 		break;
146 	case VIRTIO_CRYPTO_MAC_XCBC_AES:
147 		*algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
148 		break;
149 	case VIRTIO_CRYPTO_MAC_CMAC_3DES:
150 	case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
151 	case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
152 		return -VIRTIO_CRYPTO_NOTSUPP;
153 	default:
154 		return -VIRTIO_CRYPTO_BADMSG;
155 	}
156 
157 	return 0;
158 }
159 
160 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
161 {
162 	int len;
163 
164 	switch (algo) {
165 	case RTE_CRYPTO_CIPHER_3DES_CBC:
166 		len = 8;
167 		break;
168 	case RTE_CRYPTO_CIPHER_3DES_CTR:
169 		len = 8;
170 		break;
171 	case RTE_CRYPTO_CIPHER_3DES_ECB:
172 		len = 8;
173 		break;
174 	case RTE_CRYPTO_CIPHER_AES_CBC:
175 		len = 16;
176 		break;
177 
178 	/* TODO: add common algos */
179 
180 	default:
181 		len = -1;
182 		break;
183 	}
184 
185 	return len;
186 }
187 
188 /**
189  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
190  * one DPDK crypto device that deals with all crypto workloads. It is declared
191  * here and defined in vhost_crypto.c
192  */
193 struct vhost_crypto {
194 	/** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
195 	 *  session ID.
196 	 */
197 	struct rte_hash *session_map;
198 	struct rte_mempool *mbuf_pool;
199 	struct rte_mempool *sess_pool;
200 	struct rte_mempool *sess_priv_pool;
201 	struct rte_mempool *wb_pool;
202 
203 	/** DPDK cryptodev ID */
204 	uint8_t cid;
205 	uint16_t nb_qps;
206 
207 	uint64_t last_session_id;
208 
209 	uint64_t cache_session_id;
210 	struct rte_cryptodev_sym_session *cache_session;
211 	/** socket id for the device */
212 	int socket_id;
213 
214 	struct virtio_net *dev;
215 
216 	uint8_t option;
217 } __rte_cache_aligned;
218 
219 struct vhost_crypto_writeback_data {
220 	uint8_t *src;
221 	uint8_t *dst;
222 	uint64_t len;
223 	struct vhost_crypto_writeback_data *next;
224 };
225 
226 struct vhost_crypto_data_req {
227 	struct vring_desc *head;
228 	struct virtio_net *dev;
229 	struct virtio_crypto_inhdr *inhdr;
230 	struct vhost_virtqueue *vq;
231 	struct vhost_crypto_writeback_data *wb;
232 	struct rte_mempool *wb_pool;
233 	uint16_t desc_idx;
234 	uint16_t len;
235 	uint16_t zero_copy;
236 };
237 
238 static int
239 transform_cipher_param(struct rte_crypto_sym_xform *xform,
240 		VhostUserCryptoSessionParam *param)
241 {
242 	int ret;
243 
244 	ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
245 	if (unlikely(ret < 0))
246 		return ret;
247 
248 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
249 		VC_LOG_DBG("Invalid cipher key length\n");
250 		return -VIRTIO_CRYPTO_BADMSG;
251 	}
252 
253 	xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
254 	xform->cipher.key.length = param->cipher_key_len;
255 	if (xform->cipher.key.length > 0)
256 		xform->cipher.key.data = param->cipher_key_buf;
257 	if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
258 		xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
259 	else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
260 		xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
261 	else {
262 		VC_LOG_DBG("Bad operation type");
263 		return -VIRTIO_CRYPTO_BADMSG;
264 	}
265 
266 	ret = get_iv_len(xform->cipher.algo);
267 	if (unlikely(ret < 0))
268 		return ret;
269 	xform->cipher.iv.length = (uint16_t)ret;
270 	xform->cipher.iv.offset = IV_OFFSET;
271 	return 0;
272 }
273 
274 static int
275 transform_chain_param(struct rte_crypto_sym_xform *xforms,
276 		VhostUserCryptoSessionParam *param)
277 {
278 	struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
279 	int ret;
280 
281 	switch (param->chaining_dir) {
282 	case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
283 		xform_auth = xforms;
284 		xform_cipher = xforms->next;
285 		xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
286 		xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
287 		break;
288 	case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
289 		xform_cipher = xforms;
290 		xform_auth = xforms->next;
291 		xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
292 		xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
293 		break;
294 	default:
295 		return -VIRTIO_CRYPTO_BADMSG;
296 	}
297 
298 	/* cipher */
299 	ret = cipher_algo_transform(param->cipher_algo,
300 			&xform_cipher->cipher.algo);
301 	if (unlikely(ret < 0))
302 		return ret;
303 
304 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
305 		VC_LOG_DBG("Invalid cipher key length\n");
306 		return -VIRTIO_CRYPTO_BADMSG;
307 	}
308 
309 	xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
310 	xform_cipher->cipher.key.length = param->cipher_key_len;
311 	xform_cipher->cipher.key.data = param->cipher_key_buf;
312 	ret = get_iv_len(xform_cipher->cipher.algo);
313 	if (unlikely(ret < 0))
314 		return ret;
315 	xform_cipher->cipher.iv.length = (uint16_t)ret;
316 	xform_cipher->cipher.iv.offset = IV_OFFSET;
317 
318 	/* auth */
319 	xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
320 	ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
321 	if (unlikely(ret < 0))
322 		return ret;
323 
324 	if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
325 		VC_LOG_DBG("Invalid auth key length\n");
326 		return -VIRTIO_CRYPTO_BADMSG;
327 	}
328 
329 	xform_auth->auth.digest_length = param->digest_len;
330 	xform_auth->auth.key.length = param->auth_key_len;
331 	xform_auth->auth.key.data = param->auth_key_buf;
332 
333 	return 0;
334 }
335 
336 static void
337 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
338 		VhostUserCryptoSessionParam *sess_param)
339 {
340 	struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
341 	struct rte_cryptodev_sym_session *session;
342 	int ret;
343 
344 	switch (sess_param->op_type) {
345 	case VIRTIO_CRYPTO_SYM_OP_NONE:
346 	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
347 		ret = transform_cipher_param(&xform1, sess_param);
348 		if (unlikely(ret)) {
349 			VC_LOG_ERR("Error transform session msg (%i)", ret);
350 			sess_param->session_id = ret;
351 			return;
352 		}
353 		break;
354 	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
355 		if (unlikely(sess_param->hash_mode !=
356 				VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
357 			sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
358 			VC_LOG_ERR("Error transform session message (%i)",
359 					-VIRTIO_CRYPTO_NOTSUPP);
360 			return;
361 		}
362 
363 		xform1.next = &xform2;
364 
365 		ret = transform_chain_param(&xform1, sess_param);
366 		if (unlikely(ret)) {
367 			VC_LOG_ERR("Error transform session message (%i)", ret);
368 			sess_param->session_id = ret;
369 			return;
370 		}
371 
372 		break;
373 	default:
374 		VC_LOG_ERR("Algorithm not yet supported");
375 		sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
376 		return;
377 	}
378 
379 	session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
380 	if (!session) {
381 		VC_LOG_ERR("Failed to create session");
382 		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
383 		return;
384 	}
385 
386 	if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
387 			vcrypto->sess_priv_pool) < 0) {
388 		VC_LOG_ERR("Failed to initialize session");
389 		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
390 		return;
391 	}
392 
393 	/* insert hash to map */
394 	if (rte_hash_add_key_data(vcrypto->session_map,
395 			&vcrypto->last_session_id, session) < 0) {
396 		VC_LOG_ERR("Failed to insert session to hash table");
397 
398 		if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
399 			VC_LOG_ERR("Failed to clear session");
400 		else {
401 			if (rte_cryptodev_sym_session_free(session) < 0)
402 				VC_LOG_ERR("Failed to free session");
403 		}
404 		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
405 		return;
406 	}
407 
408 	VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
409 			vcrypto->last_session_id, vcrypto->dev->vid);
410 
411 	sess_param->session_id = vcrypto->last_session_id;
412 	vcrypto->last_session_id++;
413 }
414 
415 static int
416 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
417 {
418 	struct rte_cryptodev_sym_session *session;
419 	uint64_t sess_id = session_id;
420 	int ret;
421 
422 	ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
423 			(void **)&session);
424 
425 	if (unlikely(ret < 0)) {
426 		VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
427 		return -VIRTIO_CRYPTO_INVSESS;
428 	}
429 
430 	if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
431 		VC_LOG_DBG("Failed to clear session");
432 		return -VIRTIO_CRYPTO_ERR;
433 	}
434 
435 	if (rte_cryptodev_sym_session_free(session) < 0) {
436 		VC_LOG_DBG("Failed to free session");
437 		return -VIRTIO_CRYPTO_ERR;
438 	}
439 
440 	if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
441 		VC_LOG_DBG("Failed to delete session from hash table.");
442 		return -VIRTIO_CRYPTO_ERR;
443 	}
444 
445 	VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
446 			vcrypto->dev->vid);
447 
448 	return 0;
449 }
450 
451 static enum rte_vhost_msg_result
452 vhost_crypto_msg_post_handler(int vid, void *msg)
453 {
454 	struct virtio_net *dev = get_device(vid);
455 	struct vhost_crypto *vcrypto;
456 	struct vhu_msg_context *ctx = msg;
457 	enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
458 
459 	if (dev == NULL) {
460 		VC_LOG_ERR("Invalid vid %i", vid);
461 		return RTE_VHOST_MSG_RESULT_ERR;
462 	}
463 
464 	vcrypto = dev->extern_data;
465 	if (vcrypto == NULL) {
466 		VC_LOG_ERR("Cannot find required data, is it initialized?");
467 		return RTE_VHOST_MSG_RESULT_ERR;
468 	}
469 
470 	switch (ctx->msg.request.master) {
471 	case VHOST_USER_CRYPTO_CREATE_SESS:
472 		vhost_crypto_create_sess(vcrypto,
473 				&ctx->msg.payload.crypto_session);
474 		ctx->fd_num = 0;
475 		ret = RTE_VHOST_MSG_RESULT_REPLY;
476 		break;
477 	case VHOST_USER_CRYPTO_CLOSE_SESS:
478 		if (vhost_crypto_close_sess(vcrypto, ctx->msg.payload.u64))
479 			ret = RTE_VHOST_MSG_RESULT_ERR;
480 		break;
481 	default:
482 		ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
483 		break;
484 	}
485 
486 	return ret;
487 }
488 
489 static __rte_always_inline struct vhost_crypto_desc *
490 find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
491 		uint32_t max_n_descs)
492 {
493 	if (desc < head)
494 		return NULL;
495 
496 	while (desc - head < (int)max_n_descs) {
497 		if (desc->flags & VRING_DESC_F_WRITE)
498 			return desc;
499 		desc++;
500 	}
501 
502 	return NULL;
503 }
504 
505 static __rte_always_inline struct virtio_crypto_inhdr *
506 reach_inhdr(struct vhost_crypto_data_req *vc_req,
507 		struct vhost_crypto_desc *head,
508 		uint32_t max_n_descs)
509 {
510 	struct virtio_crypto_inhdr *inhdr;
511 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
512 	uint64_t dlen = last->len;
513 
514 	if (unlikely(dlen != sizeof(*inhdr)))
515 		return NULL;
516 
517 	inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
518 			&dlen, VHOST_ACCESS_WO);
519 	if (unlikely(!inhdr || dlen != last->len))
520 		return NULL;
521 
522 	return inhdr;
523 }
524 
525 static __rte_always_inline int
526 move_desc(struct vhost_crypto_desc *head,
527 		struct vhost_crypto_desc **cur_desc,
528 		uint32_t size, uint32_t max_n_descs)
529 {
530 	struct vhost_crypto_desc *desc = *cur_desc;
531 	int left = size - desc->len;
532 
533 	while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
534 			desc >= head &&
535 			desc - head < (int)max_n_descs) {
536 		desc++;
537 		left -= desc->len;
538 	}
539 
540 	if (unlikely(left > 0))
541 		return -1;
542 
543 	if (unlikely(head - desc == (int)max_n_descs))
544 		*cur_desc = NULL;
545 	else
546 		*cur_desc = desc + 1;
547 
548 	return 0;
549 }
550 
551 static __rte_always_inline void *
552 get_data_ptr(struct vhost_crypto_data_req *vc_req,
553 		struct vhost_crypto_desc *cur_desc,
554 		uint8_t perm)
555 {
556 	void *data;
557 	uint64_t dlen = cur_desc->len;
558 
559 	data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
560 	if (unlikely(!data || dlen != cur_desc->len)) {
561 		VC_LOG_ERR("Failed to map object");
562 		return NULL;
563 	}
564 
565 	return data;
566 }
567 
568 static __rte_always_inline uint32_t
569 copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
570 	struct vhost_crypto_desc *desc, uint32_t size)
571 {
572 	uint64_t remain;
573 	uint64_t addr;
574 
575 	remain = RTE_MIN(desc->len, size);
576 	addr = desc->addr;
577 	do {
578 		uint64_t len;
579 		void *src;
580 
581 		len = remain;
582 		src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
583 		if (unlikely(src == NULL || len == 0))
584 			return 0;
585 
586 		rte_memcpy(dst, src, len);
587 		remain -= len;
588 		/* cast is needed for 32-bit architecture */
589 		dst = RTE_PTR_ADD(dst, (size_t)len);
590 		addr += len;
591 	} while (unlikely(remain != 0));
592 
593 	return RTE_MIN(desc->len, size);
594 }
595 
596 
597 static __rte_always_inline int
598 copy_data(void *data, struct vhost_crypto_data_req *vc_req,
599 	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
600 	uint32_t size, uint32_t max_n_descs)
601 {
602 	struct vhost_crypto_desc *desc = *cur_desc;
603 	uint32_t left = size;
604 
605 	do {
606 		uint32_t copied;
607 
608 		copied = copy_data_from_desc(data, vc_req, desc, left);
609 		if (copied == 0)
610 			return -1;
611 		left -= copied;
612 		data = RTE_PTR_ADD(data, copied);
613 	} while (left != 0 && ++desc < head + max_n_descs);
614 
615 	if (unlikely(left != 0))
616 		return -1;
617 
618 	if (unlikely(desc == head + max_n_descs))
619 		*cur_desc = NULL;
620 	else
621 		*cur_desc = desc + 1;
622 
623 	return 0;
624 }
625 
626 static void
627 write_back_data(struct vhost_crypto_data_req *vc_req)
628 {
629 	struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
630 
631 	while (wb_data) {
632 		rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
633 		memset(wb_data->src, 0, wb_data->len);
634 		wb_last = wb_data;
635 		wb_data = wb_data->next;
636 		rte_mempool_put(vc_req->wb_pool, wb_last);
637 	}
638 }
639 
640 static void
641 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
642 		struct rte_mempool *mp)
643 {
644 	while (wb_data->next != NULL)
645 		free_wb_data(wb_data->next, mp);
646 
647 	rte_mempool_put(mp, wb_data);
648 }
649 
650 /**
651  * The function will allocate a vhost_crypto_writeback_data linked list
652  * containing the source and destination data pointers for the write back
653  * operation after dequeued from Cryptodev PMD queues.
654  *
655  * @param vc_req
656  *   The vhost crypto data request pointer
657  * @param cur_desc
658  *   The pointer of the current in use descriptor pointer. The content of
659  *   cur_desc is expected to be updated after the function execution.
660  * @param end_wb_data
661  *   The last write back data element to be returned. It is used only in cipher
662  *   and hash chain operations.
663  * @param src
664  *   The source data pointer
665  * @param offset
666  *   The offset to both source and destination data. For source data the offset
667  *   is the number of bytes between src and start point of cipher operation. For
668  *   destination data the offset is the number of bytes from *cur_desc->addr
669  *   to the point where the src will be written to.
670  * @param write_back_len
671  *   The size of the write back length.
672  * @return
673  *   The pointer to the start of the write back data linked list.
674  */
675 static __rte_always_inline struct vhost_crypto_writeback_data *
676 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
677 		struct vhost_crypto_desc *head_desc,
678 		struct vhost_crypto_desc **cur_desc,
679 		struct vhost_crypto_writeback_data **end_wb_data,
680 		uint8_t *src,
681 		uint32_t offset,
682 		uint64_t write_back_len,
683 		uint32_t max_n_descs)
684 {
685 	struct vhost_crypto_writeback_data *wb_data, *head;
686 	struct vhost_crypto_desc *desc = *cur_desc;
687 	uint64_t dlen;
688 	uint8_t *dst;
689 	int ret;
690 
691 	ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
692 	if (unlikely(ret < 0)) {
693 		VC_LOG_ERR("no memory");
694 		goto error_exit;
695 	}
696 
697 	wb_data = head;
698 
699 	if (likely(desc->len > offset)) {
700 		wb_data->src = src + offset;
701 		dlen = desc->len;
702 		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
703 			&dlen, VHOST_ACCESS_RW);
704 		if (unlikely(!dst || dlen != desc->len)) {
705 			VC_LOG_ERR("Failed to map descriptor");
706 			goto error_exit;
707 		}
708 
709 		wb_data->dst = dst + offset;
710 		wb_data->len = RTE_MIN(dlen - offset, write_back_len);
711 		write_back_len -= wb_data->len;
712 		src += offset + wb_data->len;
713 		offset = 0;
714 
715 		if (unlikely(write_back_len)) {
716 			ret = rte_mempool_get(vc_req->wb_pool,
717 					(void **)&(wb_data->next));
718 			if (unlikely(ret < 0)) {
719 				VC_LOG_ERR("no memory");
720 				goto error_exit;
721 			}
722 
723 			wb_data = wb_data->next;
724 		} else
725 			wb_data->next = NULL;
726 	} else
727 		offset -= desc->len;
728 
729 	while (write_back_len &&
730 			desc >= head_desc &&
731 			desc - head_desc < (int)max_n_descs) {
732 		desc++;
733 		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
734 			VC_LOG_ERR("incorrect descriptor");
735 			goto error_exit;
736 		}
737 
738 		if (desc->len <= offset) {
739 			offset -= desc->len;
740 			continue;
741 		}
742 
743 		dlen = desc->len;
744 		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
745 				VHOST_ACCESS_RW) + offset;
746 		if (unlikely(dst == NULL || dlen != desc->len)) {
747 			VC_LOG_ERR("Failed to map descriptor");
748 			goto error_exit;
749 		}
750 
751 		wb_data->src = src + offset;
752 		wb_data->dst = dst;
753 		wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
754 		write_back_len -= wb_data->len;
755 		src += wb_data->len;
756 		offset = 0;
757 
758 		if (write_back_len) {
759 			ret = rte_mempool_get(vc_req->wb_pool,
760 					(void **)&(wb_data->next));
761 			if (unlikely(ret < 0)) {
762 				VC_LOG_ERR("no memory");
763 				goto error_exit;
764 			}
765 
766 			wb_data = wb_data->next;
767 		} else
768 			wb_data->next = NULL;
769 	}
770 
771 	if (unlikely(desc - head_desc == (int)max_n_descs))
772 		*cur_desc = NULL;
773 	else
774 		*cur_desc = desc + 1;
775 
776 	*end_wb_data = wb_data;
777 
778 	return head;
779 
780 error_exit:
781 	if (head)
782 		free_wb_data(head, vc_req->wb_pool);
783 
784 	return NULL;
785 }
786 
787 static __rte_always_inline uint8_t
788 vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
789 {
790 	if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
791 		(req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
792 		(req->para.dst_data_len >= req->para.src_data_len) &&
793 		(req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
794 		return VIRTIO_CRYPTO_OK;
795 	return VIRTIO_CRYPTO_BADMSG;
796 }
797 
798 static __rte_always_inline uint8_t
799 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
800 		struct vhost_crypto_data_req *vc_req,
801 		struct virtio_crypto_cipher_data_req *cipher,
802 		struct vhost_crypto_desc *head,
803 		uint32_t max_n_descs)
804 {
805 	struct vhost_crypto_desc *desc = head;
806 	struct vhost_crypto_writeback_data *ewb = NULL;
807 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
808 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
809 	uint8_t ret = vhost_crypto_check_cipher_request(cipher);
810 
811 	if (unlikely(ret != VIRTIO_CRYPTO_OK))
812 		goto error_exit;
813 
814 	/* prepare */
815 	/* iv */
816 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
817 			cipher->para.iv_len, max_n_descs))) {
818 		VC_LOG_ERR("Incorrect virtio descriptor");
819 		ret = VIRTIO_CRYPTO_BADMSG;
820 		goto error_exit;
821 	}
822 
823 	switch (vcrypto->option) {
824 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
825 		m_src->data_len = cipher->para.src_data_len;
826 		m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
827 				cipher->para.src_data_len);
828 		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
829 		if (unlikely(m_src->buf_iova == 0 ||
830 				m_src->buf_addr == NULL)) {
831 			VC_LOG_ERR("zero_copy may fail due to cross page data");
832 			ret = VIRTIO_CRYPTO_ERR;
833 			goto error_exit;
834 		}
835 
836 		if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
837 				max_n_descs) < 0)) {
838 			VC_LOG_ERR("Incorrect descriptor");
839 			ret = VIRTIO_CRYPTO_ERR;
840 			goto error_exit;
841 		}
842 
843 		break;
844 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
845 		vc_req->wb_pool = vcrypto->wb_pool;
846 		m_src->data_len = cipher->para.src_data_len;
847 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
848 				vc_req, head, &desc, cipher->para.src_data_len,
849 				max_n_descs) < 0)) {
850 			VC_LOG_ERR("Incorrect virtio descriptor");
851 			ret = VIRTIO_CRYPTO_BADMSG;
852 			goto error_exit;
853 		}
854 		break;
855 	default:
856 		ret = VIRTIO_CRYPTO_BADMSG;
857 		goto error_exit;
858 	}
859 
860 	/* dst */
861 	desc = find_write_desc(head, desc, max_n_descs);
862 	if (unlikely(!desc)) {
863 		VC_LOG_ERR("Cannot find write location");
864 		ret = VIRTIO_CRYPTO_BADMSG;
865 		goto error_exit;
866 	}
867 
868 	switch (vcrypto->option) {
869 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
870 		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
871 				desc->addr, cipher->para.dst_data_len);
872 		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
873 		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
874 			VC_LOG_ERR("zero_copy may fail due to cross page data");
875 			ret = VIRTIO_CRYPTO_ERR;
876 			goto error_exit;
877 		}
878 
879 		if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
880 				max_n_descs) < 0)) {
881 			VC_LOG_ERR("Incorrect descriptor");
882 			ret = VIRTIO_CRYPTO_ERR;
883 			goto error_exit;
884 		}
885 
886 		m_dst->data_len = cipher->para.dst_data_len;
887 		break;
888 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
889 		vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
890 				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
891 				cipher->para.dst_data_len, max_n_descs);
892 		if (unlikely(vc_req->wb == NULL)) {
893 			ret = VIRTIO_CRYPTO_ERR;
894 			goto error_exit;
895 		}
896 
897 		break;
898 	default:
899 		ret = VIRTIO_CRYPTO_BADMSG;
900 		goto error_exit;
901 	}
902 
903 	/* src data */
904 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
905 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
906 
907 	op->sym->cipher.data.offset = 0;
908 	op->sym->cipher.data.length = cipher->para.src_data_len;
909 
910 	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
911 	if (unlikely(vc_req->inhdr == NULL)) {
912 		ret = VIRTIO_CRYPTO_BADMSG;
913 		goto error_exit;
914 	}
915 
916 	vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
917 	vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
918 
919 	return 0;
920 
921 error_exit:
922 	if (vc_req->wb)
923 		free_wb_data(vc_req->wb, vc_req->wb_pool);
924 
925 	vc_req->len = INHDR_LEN;
926 	return ret;
927 }
928 
929 static __rte_always_inline uint8_t
930 vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
931 {
932 	if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
933 		(req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
934 		(req->para.dst_data_len >= req->para.src_data_len) &&
935 		(req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
936 		(req->para.cipher_start_src_offset <
937 			VHOST_CRYPTO_MAX_DATA_SIZE) &&
938 		(req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
939 		(req->para.hash_start_src_offset <
940 			VHOST_CRYPTO_MAX_DATA_SIZE) &&
941 		(req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
942 		(req->para.cipher_start_src_offset + req->para.len_to_cipher <=
943 			req->para.src_data_len) &&
944 		(req->para.hash_start_src_offset + req->para.len_to_hash <=
945 			req->para.src_data_len) &&
946 		(req->para.dst_data_len + req->para.hash_result_len <=
947 			VHOST_CRYPTO_MAX_DATA_SIZE)))
948 		return VIRTIO_CRYPTO_OK;
949 	return VIRTIO_CRYPTO_BADMSG;
950 }
951 
952 static __rte_always_inline uint8_t
953 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
954 		struct vhost_crypto_data_req *vc_req,
955 		struct virtio_crypto_alg_chain_data_req *chain,
956 		struct vhost_crypto_desc *head,
957 		uint32_t max_n_descs)
958 {
959 	struct vhost_crypto_desc *desc = head, *digest_desc;
960 	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
961 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
962 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
963 	uint32_t digest_offset;
964 	void *digest_addr;
965 	uint8_t ret = vhost_crypto_check_chain_request(chain);
966 
967 	if (unlikely(ret != VIRTIO_CRYPTO_OK))
968 		goto error_exit;
969 
970 	/* prepare */
971 	/* iv */
972 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
973 			chain->para.iv_len, max_n_descs) < 0)) {
974 		VC_LOG_ERR("Incorrect virtio descriptor");
975 		ret = VIRTIO_CRYPTO_BADMSG;
976 		goto error_exit;
977 	}
978 
979 	switch (vcrypto->option) {
980 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
981 		m_src->data_len = chain->para.src_data_len;
982 		m_dst->data_len = chain->para.dst_data_len;
983 
984 		m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
985 				chain->para.src_data_len);
986 		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
987 		if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
988 			VC_LOG_ERR("zero_copy may fail due to cross page data");
989 			ret = VIRTIO_CRYPTO_ERR;
990 			goto error_exit;
991 		}
992 
993 		if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
994 				max_n_descs) < 0)) {
995 			VC_LOG_ERR("Incorrect descriptor");
996 			ret = VIRTIO_CRYPTO_ERR;
997 			goto error_exit;
998 		}
999 		break;
1000 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1001 		vc_req->wb_pool = vcrypto->wb_pool;
1002 		m_src->data_len = chain->para.src_data_len;
1003 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1004 				vc_req, head, &desc, chain->para.src_data_len,
1005 				max_n_descs) < 0)) {
1006 			VC_LOG_ERR("Incorrect virtio descriptor");
1007 			ret = VIRTIO_CRYPTO_BADMSG;
1008 			goto error_exit;
1009 		}
1010 
1011 		break;
1012 	default:
1013 		ret = VIRTIO_CRYPTO_BADMSG;
1014 		goto error_exit;
1015 	}
1016 
1017 	/* dst */
1018 	desc = find_write_desc(head, desc, max_n_descs);
1019 	if (unlikely(!desc)) {
1020 		VC_LOG_ERR("Cannot find write location");
1021 		ret = VIRTIO_CRYPTO_BADMSG;
1022 		goto error_exit;
1023 	}
1024 
1025 	switch (vcrypto->option) {
1026 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1027 		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1028 				desc->addr, chain->para.dst_data_len);
1029 		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1030 		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1031 			VC_LOG_ERR("zero_copy may fail due to cross page data");
1032 			ret = VIRTIO_CRYPTO_ERR;
1033 			goto error_exit;
1034 		}
1035 
1036 		if (unlikely(move_desc(vc_req->head, &desc,
1037 				chain->para.dst_data_len, max_n_descs) < 0)) {
1038 			VC_LOG_ERR("Incorrect descriptor");
1039 			ret = VIRTIO_CRYPTO_ERR;
1040 			goto error_exit;
1041 		}
1042 
1043 		op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1044 				desc->addr, chain->para.hash_result_len);
1045 		op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1046 				VHOST_ACCESS_RW);
1047 		if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1048 			VC_LOG_ERR("zero_copy may fail due to cross page data");
1049 			ret = VIRTIO_CRYPTO_ERR;
1050 			goto error_exit;
1051 		}
1052 
1053 		if (unlikely(move_desc(head, &desc,
1054 				chain->para.hash_result_len,
1055 				max_n_descs) < 0)) {
1056 			VC_LOG_ERR("Incorrect descriptor");
1057 			ret = VIRTIO_CRYPTO_ERR;
1058 			goto error_exit;
1059 		}
1060 
1061 		break;
1062 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1063 		vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
1064 				rte_pktmbuf_mtod(m_src, uint8_t *),
1065 				chain->para.cipher_start_src_offset,
1066 				chain->para.dst_data_len -
1067 					chain->para.cipher_start_src_offset,
1068 				max_n_descs);
1069 		if (unlikely(vc_req->wb == NULL)) {
1070 			ret = VIRTIO_CRYPTO_ERR;
1071 			goto error_exit;
1072 		}
1073 
1074 		digest_desc = desc;
1075 		digest_offset = m_src->data_len;
1076 		digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1077 				digest_offset);
1078 
1079 		/** create a wb_data for digest */
1080 		ewb->next = prepare_write_back_data(vc_req, head, &desc,
1081 				&ewb2, digest_addr, 0,
1082 				chain->para.hash_result_len, max_n_descs);
1083 		if (unlikely(ewb->next == NULL)) {
1084 			ret = VIRTIO_CRYPTO_ERR;
1085 			goto error_exit;
1086 		}
1087 
1088 		if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
1089 				chain->para.hash_result_len,
1090 				max_n_descs) < 0)) {
1091 			VC_LOG_ERR("Incorrect virtio descriptor");
1092 			ret = VIRTIO_CRYPTO_BADMSG;
1093 			goto error_exit;
1094 		}
1095 
1096 		op->sym->auth.digest.data = digest_addr;
1097 		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1098 				digest_offset);
1099 		break;
1100 	default:
1101 		ret = VIRTIO_CRYPTO_BADMSG;
1102 		goto error_exit;
1103 	}
1104 
1105 	/* record inhdr */
1106 	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1107 	if (unlikely(vc_req->inhdr == NULL)) {
1108 		ret = VIRTIO_CRYPTO_BADMSG;
1109 		goto error_exit;
1110 	}
1111 
1112 	vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1113 
1114 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1115 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1116 
1117 	op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1118 	op->sym->cipher.data.length = chain->para.src_data_len -
1119 			chain->para.cipher_start_src_offset;
1120 
1121 	op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1122 	op->sym->auth.data.length = chain->para.len_to_hash;
1123 
1124 	vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1125 			INHDR_LEN;
1126 	return 0;
1127 
1128 error_exit:
1129 	if (vc_req->wb)
1130 		free_wb_data(vc_req->wb, vc_req->wb_pool);
1131 	vc_req->len = INHDR_LEN;
1132 	return ret;
1133 }
1134 
1135 /**
1136  * Process on descriptor
1137  */
1138 static __rte_always_inline int
1139 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1140 		struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1141 		struct vring_desc *head, struct vhost_crypto_desc *descs,
1142 		uint16_t desc_idx)
1143 {
1144 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1145 	struct rte_cryptodev_sym_session *session;
1146 	struct virtio_crypto_op_data_req req;
1147 	struct virtio_crypto_inhdr *inhdr;
1148 	struct vhost_crypto_desc *desc = descs;
1149 	struct vring_desc *src_desc;
1150 	uint64_t session_id;
1151 	uint64_t dlen;
1152 	uint32_t nb_descs = 0, max_n_descs, i;
1153 	int err;
1154 
1155 	vc_req->desc_idx = desc_idx;
1156 	vc_req->dev = vcrypto->dev;
1157 	vc_req->vq = vq;
1158 
1159 	if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
1160 		VC_LOG_ERR("Invalid descriptor");
1161 		return -1;
1162 	}
1163 
1164 	dlen = head->len;
1165 	src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1166 			&dlen, VHOST_ACCESS_RO);
1167 	if (unlikely(!src_desc || dlen != head->len)) {
1168 		VC_LOG_ERR("Invalid descriptor");
1169 		return -1;
1170 	}
1171 	head = src_desc;
1172 
1173 	nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
1174 	if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
1175 		err = VIRTIO_CRYPTO_ERR;
1176 		VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
1177 		if (nb_descs > 0) {
1178 			struct vring_desc *inhdr_desc = head;
1179 			while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
1180 				if (inhdr_desc->next >= max_n_descs)
1181 					return -1;
1182 				inhdr_desc = &head[inhdr_desc->next];
1183 			}
1184 			if (inhdr_desc->len != sizeof(*inhdr))
1185 				return -1;
1186 			inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
1187 					vc_req, inhdr_desc->addr, &dlen,
1188 					VHOST_ACCESS_WO);
1189 			if (unlikely(!inhdr || dlen != inhdr_desc->len))
1190 				return -1;
1191 			inhdr->status = VIRTIO_CRYPTO_ERR;
1192 			return -1;
1193 		}
1194 	}
1195 
1196 	/* copy descriptors to local variable */
1197 	for (i = 0; i < max_n_descs; i++) {
1198 		desc->addr = src_desc->addr;
1199 		desc->len = src_desc->len;
1200 		desc->flags = src_desc->flags;
1201 		desc++;
1202 		if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
1203 			break;
1204 		if (unlikely(src_desc->next >= max_n_descs)) {
1205 			err = VIRTIO_CRYPTO_BADMSG;
1206 			VC_LOG_ERR("Invalid descriptor");
1207 			goto error_exit;
1208 		}
1209 		src_desc = &head[src_desc->next];
1210 	}
1211 
1212 	vc_req->head = head;
1213 	vc_req->zero_copy = vcrypto->option;
1214 
1215 	nb_descs = desc - descs;
1216 	desc = descs;
1217 
1218 	if (unlikely(desc->len < sizeof(req))) {
1219 		err = VIRTIO_CRYPTO_BADMSG;
1220 		VC_LOG_ERR("Invalid descriptor");
1221 		goto error_exit;
1222 	}
1223 
1224 	if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
1225 			max_n_descs) < 0)) {
1226 		err = VIRTIO_CRYPTO_BADMSG;
1227 		VC_LOG_ERR("Invalid descriptor");
1228 		goto error_exit;
1229 	}
1230 
1231 	/* desc is advanced by 1 now */
1232 	max_n_descs -= 1;
1233 
1234 	switch (req.header.opcode) {
1235 	case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1236 	case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1237 		session_id = req.header.session_id;
1238 
1239 		/* one branch to avoid unnecessary table lookup */
1240 		if (vcrypto->cache_session_id != session_id) {
1241 			err = rte_hash_lookup_data(vcrypto->session_map,
1242 					&session_id, (void **)&session);
1243 			if (unlikely(err < 0)) {
1244 				err = VIRTIO_CRYPTO_ERR;
1245 				VC_LOG_ERR("Failed to find session %"PRIu64,
1246 						session_id);
1247 				goto error_exit;
1248 			}
1249 
1250 			vcrypto->cache_session = session;
1251 			vcrypto->cache_session_id = session_id;
1252 		}
1253 
1254 		session = vcrypto->cache_session;
1255 
1256 		err = rte_crypto_op_attach_sym_session(op, session);
1257 		if (unlikely(err < 0)) {
1258 			err = VIRTIO_CRYPTO_ERR;
1259 			VC_LOG_ERR("Failed to attach session to op");
1260 			goto error_exit;
1261 		}
1262 
1263 		switch (req.u.sym_req.op_type) {
1264 		case VIRTIO_CRYPTO_SYM_OP_NONE:
1265 			err = VIRTIO_CRYPTO_NOTSUPP;
1266 			break;
1267 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1268 			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1269 					&req.u.sym_req.u.cipher, desc,
1270 					max_n_descs);
1271 			break;
1272 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1273 			err = prepare_sym_chain_op(vcrypto, op, vc_req,
1274 					&req.u.sym_req.u.chain, desc,
1275 					max_n_descs);
1276 			break;
1277 		}
1278 		if (unlikely(err != 0)) {
1279 			VC_LOG_ERR("Failed to process sym request");
1280 			goto error_exit;
1281 		}
1282 		break;
1283 	default:
1284 		err = VIRTIO_CRYPTO_ERR;
1285 		VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1286 				req.header.opcode);
1287 		goto error_exit;
1288 	}
1289 
1290 	return 0;
1291 
1292 error_exit:
1293 
1294 	inhdr = reach_inhdr(vc_req, descs, max_n_descs);
1295 	if (likely(inhdr != NULL))
1296 		inhdr->status = (uint8_t)err;
1297 
1298 	return -1;
1299 }
1300 
1301 static __rte_always_inline struct vhost_virtqueue *
1302 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1303 		struct vhost_virtqueue *old_vq)
1304 {
1305 	struct rte_mbuf *m_src = op->sym->m_src;
1306 	struct rte_mbuf *m_dst = op->sym->m_dst;
1307 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1308 	struct vhost_virtqueue *vq;
1309 	uint16_t used_idx, desc_idx;
1310 
1311 	if (unlikely(!vc_req)) {
1312 		VC_LOG_ERR("Failed to retrieve vc_req");
1313 		return NULL;
1314 	}
1315 	vq = vc_req->vq;
1316 	used_idx = vc_req->desc_idx;
1317 
1318 	if (old_vq && (vq != old_vq))
1319 		return vq;
1320 
1321 	if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1322 		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1323 	else {
1324 		if (vc_req->zero_copy == 0)
1325 			write_back_data(vc_req);
1326 	}
1327 
1328 	desc_idx = vq->avail->ring[used_idx];
1329 	vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
1330 	vq->used->ring[desc_idx].len = vc_req->len;
1331 
1332 	rte_mempool_put(m_src->pool, (void *)m_src);
1333 
1334 	if (m_dst)
1335 		rte_mempool_put(m_dst->pool, (void *)m_dst);
1336 
1337 	return vc_req->vq;
1338 }
1339 
1340 static __rte_always_inline uint16_t
1341 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1342 		uint16_t nb_ops, int *callfd)
1343 {
1344 	uint16_t processed = 1;
1345 	struct vhost_virtqueue *vq, *tmp_vq;
1346 
1347 	if (unlikely(nb_ops == 0))
1348 		return 0;
1349 
1350 	vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1351 	if (unlikely(vq == NULL))
1352 		return 0;
1353 	tmp_vq = vq;
1354 
1355 	while ((processed < nb_ops)) {
1356 		tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1357 				tmp_vq);
1358 
1359 		if (unlikely(vq != tmp_vq))
1360 			break;
1361 
1362 		processed++;
1363 	}
1364 
1365 	*callfd = vq->callfd;
1366 
1367 	*(volatile uint16_t *)&vq->used->idx += processed;
1368 
1369 	return processed;
1370 }
1371 
1372 int
1373 rte_vhost_crypto_driver_start(const char *path)
1374 {
1375 	uint64_t protocol_features;
1376 	int ret;
1377 
1378 	ret = rte_vhost_driver_set_features(path, VIRTIO_CRYPTO_FEATURES);
1379 	if (ret)
1380 		return -1;
1381 
1382 	ret = rte_vhost_driver_get_protocol_features(path, &protocol_features);
1383 	if (ret)
1384 		return -1;
1385 	protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1386 	ret = rte_vhost_driver_set_protocol_features(path, protocol_features);
1387 	if (ret)
1388 		return -1;
1389 
1390 	return rte_vhost_driver_start(path);
1391 }
1392 
1393 int
1394 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1395 		struct rte_mempool *sess_pool,
1396 		struct rte_mempool *sess_priv_pool,
1397 		int socket_id)
1398 {
1399 	struct virtio_net *dev = get_device(vid);
1400 	struct rte_hash_parameters params = {0};
1401 	struct vhost_crypto *vcrypto;
1402 	char name[128];
1403 	int ret;
1404 
1405 	if (!dev) {
1406 		VC_LOG_ERR("Invalid vid %i", vid);
1407 		return -EINVAL;
1408 	}
1409 
1410 	vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1411 			RTE_CACHE_LINE_SIZE, socket_id);
1412 	if (!vcrypto) {
1413 		VC_LOG_ERR("Insufficient memory");
1414 		return -ENOMEM;
1415 	}
1416 
1417 	vcrypto->sess_pool = sess_pool;
1418 	vcrypto->sess_priv_pool = sess_priv_pool;
1419 	vcrypto->cid = cryptodev_id;
1420 	vcrypto->cache_session_id = UINT64_MAX;
1421 	vcrypto->last_session_id = 1;
1422 	vcrypto->dev = dev;
1423 	vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1424 
1425 	snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1426 	params.name = name;
1427 	params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1428 	params.hash_func = rte_jhash;
1429 	params.key_len = sizeof(uint64_t);
1430 	params.socket_id = socket_id;
1431 	vcrypto->session_map = rte_hash_create(&params);
1432 	if (!vcrypto->session_map) {
1433 		VC_LOG_ERR("Failed to creath session map");
1434 		ret = -ENOMEM;
1435 		goto error_exit;
1436 	}
1437 
1438 	snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1439 	vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1440 			VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1441 			sizeof(struct vhost_crypto_data_req),
1442 			VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
1443 			rte_socket_id());
1444 	if (!vcrypto->mbuf_pool) {
1445 		VC_LOG_ERR("Failed to creath mbuf pool");
1446 		ret = -ENOMEM;
1447 		goto error_exit;
1448 	}
1449 
1450 	snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1451 	vcrypto->wb_pool = rte_mempool_create(name,
1452 			VHOST_CRYPTO_MBUF_POOL_SIZE,
1453 			sizeof(struct vhost_crypto_writeback_data),
1454 			128, 0, NULL, NULL, NULL, NULL,
1455 			rte_socket_id(), 0);
1456 	if (!vcrypto->wb_pool) {
1457 		VC_LOG_ERR("Failed to creath mempool");
1458 		ret = -ENOMEM;
1459 		goto error_exit;
1460 	}
1461 
1462 	dev->extern_data = vcrypto;
1463 	dev->extern_ops.pre_msg_handle = NULL;
1464 	dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1465 
1466 	return 0;
1467 
1468 error_exit:
1469 	rte_hash_free(vcrypto->session_map);
1470 	rte_mempool_free(vcrypto->mbuf_pool);
1471 
1472 	rte_free(vcrypto);
1473 
1474 	return ret;
1475 }
1476 
1477 int
1478 rte_vhost_crypto_free(int vid)
1479 {
1480 	struct virtio_net *dev = get_device(vid);
1481 	struct vhost_crypto *vcrypto;
1482 
1483 	if (unlikely(dev == NULL)) {
1484 		VC_LOG_ERR("Invalid vid %i", vid);
1485 		return -EINVAL;
1486 	}
1487 
1488 	vcrypto = dev->extern_data;
1489 	if (unlikely(vcrypto == NULL)) {
1490 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1491 		return -ENOENT;
1492 	}
1493 
1494 	rte_hash_free(vcrypto->session_map);
1495 	rte_mempool_free(vcrypto->mbuf_pool);
1496 	rte_mempool_free(vcrypto->wb_pool);
1497 	rte_free(vcrypto);
1498 
1499 	dev->extern_data = NULL;
1500 	dev->extern_ops.pre_msg_handle = NULL;
1501 	dev->extern_ops.post_msg_handle = NULL;
1502 
1503 	return 0;
1504 }
1505 
1506 int
1507 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1508 {
1509 	struct virtio_net *dev = get_device(vid);
1510 	struct vhost_crypto *vcrypto;
1511 
1512 	if (unlikely(dev == NULL)) {
1513 		VC_LOG_ERR("Invalid vid %i", vid);
1514 		return -EINVAL;
1515 	}
1516 
1517 	if (unlikely((uint32_t)option >=
1518 				RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1519 		VC_LOG_ERR("Invalid option %i", option);
1520 		return -EINVAL;
1521 	}
1522 
1523 	vcrypto = (struct vhost_crypto *)dev->extern_data;
1524 	if (unlikely(vcrypto == NULL)) {
1525 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1526 		return -ENOENT;
1527 	}
1528 
1529 	if (vcrypto->option == (uint8_t)option)
1530 		return 0;
1531 
1532 	if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1533 			!(rte_mempool_full(vcrypto->wb_pool))) {
1534 		VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1535 		return -EINVAL;
1536 	}
1537 
1538 	if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1539 		char name[128];
1540 
1541 		snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1542 		vcrypto->wb_pool = rte_mempool_create(name,
1543 				VHOST_CRYPTO_MBUF_POOL_SIZE,
1544 				sizeof(struct vhost_crypto_writeback_data),
1545 				128, 0, NULL, NULL, NULL, NULL,
1546 				rte_socket_id(), 0);
1547 		if (!vcrypto->wb_pool) {
1548 			VC_LOG_ERR("Failed to creath mbuf pool");
1549 			return -ENOMEM;
1550 		}
1551 	} else {
1552 		rte_mempool_free(vcrypto->wb_pool);
1553 		vcrypto->wb_pool = NULL;
1554 	}
1555 
1556 	vcrypto->option = (uint8_t)option;
1557 
1558 	return 0;
1559 }
1560 
1561 uint16_t
1562 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1563 		struct rte_crypto_op **ops, uint16_t nb_ops)
1564 {
1565 	struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1566 	struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
1567 	struct virtio_net *dev = get_device(vid);
1568 	struct vhost_crypto *vcrypto;
1569 	struct vhost_virtqueue *vq;
1570 	uint16_t avail_idx;
1571 	uint16_t start_idx;
1572 	uint16_t count;
1573 	uint16_t i = 0;
1574 
1575 	if (unlikely(dev == NULL)) {
1576 		VC_LOG_ERR("Invalid vid %i", vid);
1577 		return 0;
1578 	}
1579 
1580 	if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1581 		VC_LOG_ERR("Invalid qid %u", qid);
1582 		return 0;
1583 	}
1584 
1585 	vcrypto = (struct vhost_crypto *)dev->extern_data;
1586 	if (unlikely(vcrypto == NULL)) {
1587 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1588 		return 0;
1589 	}
1590 
1591 	vq = dev->virtqueue[qid];
1592 
1593 	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1594 	start_idx = vq->last_used_idx;
1595 	count = avail_idx - start_idx;
1596 	count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1597 	count = RTE_MIN(count, nb_ops);
1598 
1599 	if (unlikely(count == 0))
1600 		return 0;
1601 
1602 	/* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1603 	 * we need only 1 mbuf as src and dst
1604 	 */
1605 	switch (vcrypto->option) {
1606 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1607 		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1608 				(void **)mbufs, count * 2) < 0)) {
1609 			VC_LOG_ERR("Insufficient memory");
1610 			return 0;
1611 		}
1612 
1613 		for (i = 0; i < count; i++) {
1614 			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1615 			uint16_t desc_idx = vq->avail->ring[used_idx];
1616 			struct vring_desc *head = &vq->desc[desc_idx];
1617 			struct rte_crypto_op *op = ops[i];
1618 
1619 			op->sym->m_src = mbufs[i * 2];
1620 			op->sym->m_dst = mbufs[i * 2 + 1];
1621 			op->sym->m_src->data_off = 0;
1622 			op->sym->m_dst->data_off = 0;
1623 
1624 			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1625 					op, head, descs, used_idx) < 0))
1626 				break;
1627 		}
1628 
1629 		if (unlikely(i < count))
1630 			rte_mempool_put_bulk(vcrypto->mbuf_pool,
1631 					(void **)&mbufs[i * 2],
1632 					(count - i) * 2);
1633 
1634 		break;
1635 
1636 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1637 		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1638 				(void **)mbufs, count) < 0)) {
1639 			VC_LOG_ERR("Insufficient memory");
1640 			return 0;
1641 		}
1642 
1643 		for (i = 0; i < count; i++) {
1644 			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1645 			uint16_t desc_idx = vq->avail->ring[used_idx];
1646 			struct vring_desc *head = &vq->desc[desc_idx];
1647 			struct rte_crypto_op *op = ops[i];
1648 
1649 			op->sym->m_src = mbufs[i];
1650 			op->sym->m_dst = NULL;
1651 			op->sym->m_src->data_off = 0;
1652 
1653 			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1654 					op, head, descs, desc_idx) < 0))
1655 				break;
1656 		}
1657 
1658 		if (unlikely(i < count))
1659 			rte_mempool_put_bulk(vcrypto->mbuf_pool,
1660 					(void **)&mbufs[i],
1661 					count - i);
1662 
1663 		break;
1664 
1665 	}
1666 
1667 	vq->last_used_idx += i;
1668 
1669 	return i;
1670 }
1671 
1672 uint16_t
1673 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1674 		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1675 {
1676 	struct rte_crypto_op **tmp_ops = ops;
1677 	uint16_t count = 0, left = nb_ops;
1678 	int callfd;
1679 	uint16_t idx = 0;
1680 
1681 	while (left) {
1682 		count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1683 				&callfd);
1684 		if (unlikely(count == 0))
1685 			break;
1686 
1687 		tmp_ops = &tmp_ops[count];
1688 		left -= count;
1689 
1690 		callfds[idx++] = callfd;
1691 
1692 		if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1693 			VC_LOG_ERR("Too many vqs");
1694 			break;
1695 		}
1696 	}
1697 
1698 	*nb_callfds = idx;
1699 
1700 	return nb_ops - left;
1701 }
1702