xref: /dpdk/lib/vhost/vhost_crypto.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9 
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14 
15 #define INHDR_LEN		(sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET		(sizeof(struct rte_crypto_op) + \
17 				sizeof(struct rte_crypto_sym_op))
18 
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)				\
21 	RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",	\
22 		"Vhost-Crypto",	__func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)				\
24 	RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",	\
25 		"Vhost-Crypto",	__func__, __LINE__, ## args)
26 
27 #define VC_LOG_DBG(fmt, args...)				\
28 	RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",	\
29 		"Vhost-Crypto",	__func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)				\
32 	RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)				\
34 	RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37 
38 #define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |	\
39 		(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |			\
40 		(1ULL << VIRTIO_RING_F_EVENT_IDX) |			\
41 		(1ULL << VIRTIO_NET_F_CTRL_VQ) |			\
42 		(1ULL << VIRTIO_F_VERSION_1) |				\
43 		(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
44 
45 #define IOVA_TO_VVA(t, r, a, l, p)					\
46 	((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47 
48 /*
49  * vhost_crypto_desc is used to copy original vring_desc to the local buffer
50  * before processing (except the next index). The copy result will be an
51  * array of vhost_crypto_desc elements that follows the sequence of original
52  * vring_desc.next is arranged.
53  */
54 #define vhost_crypto_desc vring_desc
55 
56 static int
57 cipher_algo_transform(uint32_t virtio_cipher_algo,
58 		enum rte_crypto_cipher_algorithm *algo)
59 {
60 	switch (virtio_cipher_algo) {
61 	case VIRTIO_CRYPTO_CIPHER_AES_CBC:
62 		*algo = RTE_CRYPTO_CIPHER_AES_CBC;
63 		break;
64 	case VIRTIO_CRYPTO_CIPHER_AES_CTR:
65 		*algo = RTE_CRYPTO_CIPHER_AES_CTR;
66 		break;
67 	case VIRTIO_CRYPTO_CIPHER_DES_ECB:
68 		*algo = -VIRTIO_CRYPTO_NOTSUPP;
69 		break;
70 	case VIRTIO_CRYPTO_CIPHER_DES_CBC:
71 		*algo = RTE_CRYPTO_CIPHER_DES_CBC;
72 		break;
73 	case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
74 		*algo = RTE_CRYPTO_CIPHER_3DES_ECB;
75 		break;
76 	case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
77 		*algo = RTE_CRYPTO_CIPHER_3DES_CBC;
78 		break;
79 	case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
80 		*algo = RTE_CRYPTO_CIPHER_3DES_CTR;
81 		break;
82 	case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
83 		*algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
84 		break;
85 	case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
86 		*algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
87 		break;
88 	case VIRTIO_CRYPTO_CIPHER_AES_F8:
89 		*algo = RTE_CRYPTO_CIPHER_AES_F8;
90 		break;
91 	case VIRTIO_CRYPTO_CIPHER_AES_XTS:
92 		*algo = RTE_CRYPTO_CIPHER_AES_XTS;
93 		break;
94 	case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
95 		*algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
96 		break;
97 	default:
98 		return -VIRTIO_CRYPTO_BADMSG;
99 		break;
100 	}
101 
102 	return 0;
103 }
104 
105 static int
106 auth_algo_transform(uint32_t virtio_auth_algo,
107 		enum rte_crypto_auth_algorithm *algo)
108 {
109 	switch (virtio_auth_algo) {
110 	case VIRTIO_CRYPTO_NO_MAC:
111 		*algo = RTE_CRYPTO_AUTH_NULL;
112 		break;
113 	case VIRTIO_CRYPTO_MAC_HMAC_MD5:
114 		*algo = RTE_CRYPTO_AUTH_MD5_HMAC;
115 		break;
116 	case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
117 		*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
118 		break;
119 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
120 		*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
121 		break;
122 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
123 		*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
124 		break;
125 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
126 		*algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
127 		break;
128 	case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
129 		*algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
130 		break;
131 	case VIRTIO_CRYPTO_MAC_CMAC_AES:
132 		*algo = RTE_CRYPTO_AUTH_AES_CMAC;
133 		break;
134 	case VIRTIO_CRYPTO_MAC_KASUMI_F9:
135 		*algo = RTE_CRYPTO_AUTH_KASUMI_F9;
136 		break;
137 	case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
138 		*algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
139 		break;
140 	case VIRTIO_CRYPTO_MAC_GMAC_AES:
141 		*algo = RTE_CRYPTO_AUTH_AES_GMAC;
142 		break;
143 	case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
144 		*algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
145 		break;
146 	case VIRTIO_CRYPTO_MAC_XCBC_AES:
147 		*algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
148 		break;
149 	case VIRTIO_CRYPTO_MAC_CMAC_3DES:
150 	case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
151 	case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
152 		return -VIRTIO_CRYPTO_NOTSUPP;
153 	default:
154 		return -VIRTIO_CRYPTO_BADMSG;
155 	}
156 
157 	return 0;
158 }
159 
160 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
161 {
162 	int len;
163 
164 	switch (algo) {
165 	case RTE_CRYPTO_CIPHER_3DES_CBC:
166 		len = 8;
167 		break;
168 	case RTE_CRYPTO_CIPHER_3DES_CTR:
169 		len = 8;
170 		break;
171 	case RTE_CRYPTO_CIPHER_3DES_ECB:
172 		len = 8;
173 		break;
174 	case RTE_CRYPTO_CIPHER_AES_CBC:
175 		len = 16;
176 		break;
177 
178 	/* TODO: add common algos */
179 
180 	default:
181 		len = -1;
182 		break;
183 	}
184 
185 	return len;
186 }
187 
188 /**
189  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
190  * one DPDK crypto device that deals with all crypto workloads. It is declared
191  * here and defined in vhost_crypto.c
192  */
193 struct vhost_crypto {
194 	/** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
195 	 *  session ID.
196 	 */
197 	struct rte_hash *session_map;
198 	struct rte_mempool *mbuf_pool;
199 	struct rte_mempool *sess_pool;
200 	struct rte_mempool *wb_pool;
201 
202 	/** DPDK cryptodev ID */
203 	uint8_t cid;
204 	uint16_t nb_qps;
205 
206 	uint64_t last_session_id;
207 
208 	uint64_t cache_session_id;
209 	struct rte_cryptodev_sym_session *cache_session;
210 	/** socket id for the device */
211 	int socket_id;
212 
213 	struct virtio_net *dev;
214 
215 	uint8_t option;
216 } __rte_cache_aligned;
217 
218 struct vhost_crypto_writeback_data {
219 	uint8_t *src;
220 	uint8_t *dst;
221 	uint64_t len;
222 	struct vhost_crypto_writeback_data *next;
223 };
224 
225 struct vhost_crypto_data_req {
226 	struct vring_desc *head;
227 	struct virtio_net *dev;
228 	struct virtio_crypto_inhdr *inhdr;
229 	struct vhost_virtqueue *vq;
230 	struct vhost_crypto_writeback_data *wb;
231 	struct rte_mempool *wb_pool;
232 	uint16_t desc_idx;
233 	uint16_t len;
234 	uint16_t zero_copy;
235 };
236 
237 static int
238 transform_cipher_param(struct rte_crypto_sym_xform *xform,
239 		VhostUserCryptoSessionParam *param)
240 {
241 	int ret;
242 
243 	ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
244 	if (unlikely(ret < 0))
245 		return ret;
246 
247 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
248 		VC_LOG_DBG("Invalid cipher key length\n");
249 		return -VIRTIO_CRYPTO_BADMSG;
250 	}
251 
252 	xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
253 	xform->cipher.key.length = param->cipher_key_len;
254 	if (xform->cipher.key.length > 0)
255 		xform->cipher.key.data = param->cipher_key_buf;
256 	if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
257 		xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
258 	else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
259 		xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
260 	else {
261 		VC_LOG_DBG("Bad operation type");
262 		return -VIRTIO_CRYPTO_BADMSG;
263 	}
264 
265 	ret = get_iv_len(xform->cipher.algo);
266 	if (unlikely(ret < 0))
267 		return ret;
268 	xform->cipher.iv.length = (uint16_t)ret;
269 	xform->cipher.iv.offset = IV_OFFSET;
270 	return 0;
271 }
272 
273 static int
274 transform_chain_param(struct rte_crypto_sym_xform *xforms,
275 		VhostUserCryptoSessionParam *param)
276 {
277 	struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
278 	int ret;
279 
280 	switch (param->chaining_dir) {
281 	case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
282 		xform_auth = xforms;
283 		xform_cipher = xforms->next;
284 		xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
285 		xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
286 		break;
287 	case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
288 		xform_cipher = xforms;
289 		xform_auth = xforms->next;
290 		xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
291 		xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
292 		break;
293 	default:
294 		return -VIRTIO_CRYPTO_BADMSG;
295 	}
296 
297 	/* cipher */
298 	ret = cipher_algo_transform(param->cipher_algo,
299 			&xform_cipher->cipher.algo);
300 	if (unlikely(ret < 0))
301 		return ret;
302 
303 	if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
304 		VC_LOG_DBG("Invalid cipher key length\n");
305 		return -VIRTIO_CRYPTO_BADMSG;
306 	}
307 
308 	xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
309 	xform_cipher->cipher.key.length = param->cipher_key_len;
310 	xform_cipher->cipher.key.data = param->cipher_key_buf;
311 	ret = get_iv_len(xform_cipher->cipher.algo);
312 	if (unlikely(ret < 0))
313 		return ret;
314 	xform_cipher->cipher.iv.length = (uint16_t)ret;
315 	xform_cipher->cipher.iv.offset = IV_OFFSET;
316 
317 	/* auth */
318 	xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
319 	ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
320 	if (unlikely(ret < 0))
321 		return ret;
322 
323 	if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
324 		VC_LOG_DBG("Invalid auth key length\n");
325 		return -VIRTIO_CRYPTO_BADMSG;
326 	}
327 
328 	xform_auth->auth.digest_length = param->digest_len;
329 	xform_auth->auth.key.length = param->auth_key_len;
330 	xform_auth->auth.key.data = param->auth_key_buf;
331 
332 	return 0;
333 }
334 
335 static void
336 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
337 		VhostUserCryptoSessionParam *sess_param)
338 {
339 	struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
340 	struct rte_cryptodev_sym_session *session;
341 	int ret;
342 
343 	switch (sess_param->op_type) {
344 	case VIRTIO_CRYPTO_SYM_OP_NONE:
345 	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
346 		ret = transform_cipher_param(&xform1, sess_param);
347 		if (unlikely(ret)) {
348 			VC_LOG_ERR("Error transform session msg (%i)", ret);
349 			sess_param->session_id = ret;
350 			return;
351 		}
352 		break;
353 	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
354 		if (unlikely(sess_param->hash_mode !=
355 				VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
356 			sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
357 			VC_LOG_ERR("Error transform session message (%i)",
358 					-VIRTIO_CRYPTO_NOTSUPP);
359 			return;
360 		}
361 
362 		xform1.next = &xform2;
363 
364 		ret = transform_chain_param(&xform1, sess_param);
365 		if (unlikely(ret)) {
366 			VC_LOG_ERR("Error transform session message (%i)", ret);
367 			sess_param->session_id = ret;
368 			return;
369 		}
370 
371 		break;
372 	default:
373 		VC_LOG_ERR("Algorithm not yet supported");
374 		sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
375 		return;
376 	}
377 
378 	session = rte_cryptodev_sym_session_create(vcrypto->cid, &xform1,
379 			vcrypto->sess_pool);
380 	if (!session) {
381 		VC_LOG_ERR("Failed to create session");
382 		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
383 		return;
384 	}
385 
386 	/* insert hash to map */
387 	if (rte_hash_add_key_data(vcrypto->session_map,
388 			&vcrypto->last_session_id, session) < 0) {
389 		VC_LOG_ERR("Failed to insert session to hash table");
390 
391 		if (rte_cryptodev_sym_session_free(vcrypto->cid, session) < 0)
392 			VC_LOG_ERR("Failed to free session");
393 		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
394 		return;
395 	}
396 
397 	VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
398 			vcrypto->last_session_id, vcrypto->dev->vid);
399 
400 	sess_param->session_id = vcrypto->last_session_id;
401 	vcrypto->last_session_id++;
402 }
403 
404 static int
405 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
406 {
407 	struct rte_cryptodev_sym_session *session;
408 	uint64_t sess_id = session_id;
409 	int ret;
410 
411 	ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
412 			(void **)&session);
413 
414 	if (unlikely(ret < 0)) {
415 		VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
416 		return -VIRTIO_CRYPTO_INVSESS;
417 	}
418 
419 	if (rte_cryptodev_sym_session_free(vcrypto->cid, session) < 0) {
420 		VC_LOG_DBG("Failed to free session");
421 		return -VIRTIO_CRYPTO_ERR;
422 	}
423 
424 	if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
425 		VC_LOG_DBG("Failed to delete session from hash table.");
426 		return -VIRTIO_CRYPTO_ERR;
427 	}
428 
429 	VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
430 			vcrypto->dev->vid);
431 
432 	return 0;
433 }
434 
435 static enum rte_vhost_msg_result
436 vhost_crypto_msg_post_handler(int vid, void *msg)
437 {
438 	struct virtio_net *dev = get_device(vid);
439 	struct vhost_crypto *vcrypto;
440 	struct vhu_msg_context *ctx = msg;
441 	enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
442 
443 	if (dev == NULL) {
444 		VC_LOG_ERR("Invalid vid %i", vid);
445 		return RTE_VHOST_MSG_RESULT_ERR;
446 	}
447 
448 	vcrypto = dev->extern_data;
449 	if (vcrypto == NULL) {
450 		VC_LOG_ERR("Cannot find required data, is it initialized?");
451 		return RTE_VHOST_MSG_RESULT_ERR;
452 	}
453 
454 	switch (ctx->msg.request.master) {
455 	case VHOST_USER_CRYPTO_CREATE_SESS:
456 		vhost_crypto_create_sess(vcrypto,
457 				&ctx->msg.payload.crypto_session);
458 		ctx->fd_num = 0;
459 		ret = RTE_VHOST_MSG_RESULT_REPLY;
460 		break;
461 	case VHOST_USER_CRYPTO_CLOSE_SESS:
462 		if (vhost_crypto_close_sess(vcrypto, ctx->msg.payload.u64))
463 			ret = RTE_VHOST_MSG_RESULT_ERR;
464 		break;
465 	default:
466 		ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
467 		break;
468 	}
469 
470 	return ret;
471 }
472 
473 static __rte_always_inline struct vhost_crypto_desc *
474 find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
475 		uint32_t max_n_descs)
476 {
477 	if (desc < head)
478 		return NULL;
479 
480 	while (desc - head < (int)max_n_descs) {
481 		if (desc->flags & VRING_DESC_F_WRITE)
482 			return desc;
483 		desc++;
484 	}
485 
486 	return NULL;
487 }
488 
489 static __rte_always_inline struct virtio_crypto_inhdr *
490 reach_inhdr(struct vhost_crypto_data_req *vc_req,
491 		struct vhost_crypto_desc *head,
492 		uint32_t max_n_descs)
493 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
494 {
495 	struct virtio_crypto_inhdr *inhdr;
496 	struct vhost_crypto_desc *last = head + (max_n_descs - 1);
497 	uint64_t dlen = last->len;
498 
499 	if (unlikely(dlen != sizeof(*inhdr)))
500 		return NULL;
501 
502 	inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
503 			&dlen, VHOST_ACCESS_WO);
504 	if (unlikely(!inhdr || dlen != last->len))
505 		return NULL;
506 
507 	return inhdr;
508 }
509 
510 static __rte_always_inline int
511 move_desc(struct vhost_crypto_desc *head,
512 		struct vhost_crypto_desc **cur_desc,
513 		uint32_t size, uint32_t max_n_descs)
514 {
515 	struct vhost_crypto_desc *desc = *cur_desc;
516 	int left = size - desc->len;
517 
518 	while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
519 			desc >= head &&
520 			desc - head < (int)max_n_descs) {
521 		desc++;
522 		left -= desc->len;
523 	}
524 
525 	if (unlikely(left > 0))
526 		return -1;
527 
528 	if (unlikely(head - desc == (int)max_n_descs))
529 		*cur_desc = NULL;
530 	else
531 		*cur_desc = desc + 1;
532 
533 	return 0;
534 }
535 
536 static __rte_always_inline void *
537 get_data_ptr(struct vhost_crypto_data_req *vc_req,
538 		struct vhost_crypto_desc *cur_desc,
539 		uint8_t perm)
540 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
541 {
542 	void *data;
543 	uint64_t dlen = cur_desc->len;
544 
545 	data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
546 	if (unlikely(!data || dlen != cur_desc->len)) {
547 		VC_LOG_ERR("Failed to map object");
548 		return NULL;
549 	}
550 
551 	return data;
552 }
553 
554 static __rte_always_inline uint32_t
555 copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
556 	struct vhost_crypto_desc *desc, uint32_t size)
557 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
558 {
559 	uint64_t remain;
560 	uint64_t addr;
561 
562 	remain = RTE_MIN(desc->len, size);
563 	addr = desc->addr;
564 	do {
565 		uint64_t len;
566 		void *src;
567 
568 		len = remain;
569 		src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
570 		if (unlikely(src == NULL || len == 0))
571 			return 0;
572 
573 		rte_memcpy(dst, src, len);
574 		remain -= len;
575 		/* cast is needed for 32-bit architecture */
576 		dst = RTE_PTR_ADD(dst, (size_t)len);
577 		addr += len;
578 	} while (unlikely(remain != 0));
579 
580 	return RTE_MIN(desc->len, size);
581 }
582 
583 
584 static __rte_always_inline int
585 copy_data(void *data, struct vhost_crypto_data_req *vc_req,
586 	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
587 	uint32_t size, uint32_t max_n_descs)
588 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
589 {
590 	struct vhost_crypto_desc *desc = *cur_desc;
591 	uint32_t left = size;
592 
593 	do {
594 		uint32_t copied;
595 
596 		copied = copy_data_from_desc(data, vc_req, desc, left);
597 		if (copied == 0)
598 			return -1;
599 		left -= copied;
600 		data = RTE_PTR_ADD(data, copied);
601 	} while (left != 0 && ++desc < head + max_n_descs);
602 
603 	if (unlikely(left != 0))
604 		return -1;
605 
606 	if (unlikely(desc == head + max_n_descs))
607 		*cur_desc = NULL;
608 	else
609 		*cur_desc = desc + 1;
610 
611 	return 0;
612 }
613 
614 static void
615 write_back_data(struct vhost_crypto_data_req *vc_req)
616 {
617 	struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
618 
619 	while (wb_data) {
620 		rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
621 		memset(wb_data->src, 0, wb_data->len);
622 		wb_last = wb_data;
623 		wb_data = wb_data->next;
624 		rte_mempool_put(vc_req->wb_pool, wb_last);
625 	}
626 }
627 
628 static void
629 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
630 		struct rte_mempool *mp)
631 {
632 	while (wb_data->next != NULL)
633 		free_wb_data(wb_data->next, mp);
634 
635 	rte_mempool_put(mp, wb_data);
636 }
637 
638 /**
639  * The function will allocate a vhost_crypto_writeback_data linked list
640  * containing the source and destination data pointers for the write back
641  * operation after dequeued from Cryptodev PMD queues.
642  *
643  * @param vc_req
644  *   The vhost crypto data request pointer
645  * @param cur_desc
646  *   The pointer of the current in use descriptor pointer. The content of
647  *   cur_desc is expected to be updated after the function execution.
648  * @param end_wb_data
649  *   The last write back data element to be returned. It is used only in cipher
650  *   and hash chain operations.
651  * @param src
652  *   The source data pointer
653  * @param offset
654  *   The offset to both source and destination data. For source data the offset
655  *   is the number of bytes between src and start point of cipher operation. For
656  *   destination data the offset is the number of bytes from *cur_desc->addr
657  *   to the point where the src will be written to.
658  * @param write_back_len
659  *   The size of the write back length.
660  * @return
661  *   The pointer to the start of the write back data linked list.
662  */
663 static __rte_always_inline struct vhost_crypto_writeback_data *
664 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
665 		struct vhost_crypto_desc *head_desc,
666 		struct vhost_crypto_desc **cur_desc,
667 		struct vhost_crypto_writeback_data **end_wb_data,
668 		uint8_t *src,
669 		uint32_t offset,
670 		uint64_t write_back_len,
671 		uint32_t max_n_descs)
672 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
673 {
674 	struct vhost_crypto_writeback_data *wb_data, *head;
675 	struct vhost_crypto_desc *desc = *cur_desc;
676 	uint64_t dlen;
677 	uint8_t *dst;
678 	int ret;
679 
680 	ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
681 	if (unlikely(ret < 0)) {
682 		VC_LOG_ERR("no memory");
683 		goto error_exit;
684 	}
685 
686 	wb_data = head;
687 
688 	if (likely(desc->len > offset)) {
689 		wb_data->src = src + offset;
690 		dlen = desc->len;
691 		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
692 			&dlen, VHOST_ACCESS_RW);
693 		if (unlikely(!dst || dlen != desc->len)) {
694 			VC_LOG_ERR("Failed to map descriptor");
695 			goto error_exit;
696 		}
697 
698 		wb_data->dst = dst + offset;
699 		wb_data->len = RTE_MIN(dlen - offset, write_back_len);
700 		write_back_len -= wb_data->len;
701 		src += offset + wb_data->len;
702 		offset = 0;
703 
704 		if (unlikely(write_back_len)) {
705 			ret = rte_mempool_get(vc_req->wb_pool,
706 					(void **)&(wb_data->next));
707 			if (unlikely(ret < 0)) {
708 				VC_LOG_ERR("no memory");
709 				goto error_exit;
710 			}
711 
712 			wb_data = wb_data->next;
713 		} else
714 			wb_data->next = NULL;
715 	} else
716 		offset -= desc->len;
717 
718 	while (write_back_len &&
719 			desc >= head_desc &&
720 			desc - head_desc < (int)max_n_descs) {
721 		desc++;
722 		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
723 			VC_LOG_ERR("incorrect descriptor");
724 			goto error_exit;
725 		}
726 
727 		if (desc->len <= offset) {
728 			offset -= desc->len;
729 			continue;
730 		}
731 
732 		dlen = desc->len;
733 		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
734 				VHOST_ACCESS_RW) + offset;
735 		if (unlikely(dst == NULL || dlen != desc->len)) {
736 			VC_LOG_ERR("Failed to map descriptor");
737 			goto error_exit;
738 		}
739 
740 		wb_data->src = src + offset;
741 		wb_data->dst = dst;
742 		wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
743 		write_back_len -= wb_data->len;
744 		src += wb_data->len;
745 		offset = 0;
746 
747 		if (write_back_len) {
748 			ret = rte_mempool_get(vc_req->wb_pool,
749 					(void **)&(wb_data->next));
750 			if (unlikely(ret < 0)) {
751 				VC_LOG_ERR("no memory");
752 				goto error_exit;
753 			}
754 
755 			wb_data = wb_data->next;
756 		} else
757 			wb_data->next = NULL;
758 	}
759 
760 	if (unlikely(desc - head_desc == (int)max_n_descs))
761 		*cur_desc = NULL;
762 	else
763 		*cur_desc = desc + 1;
764 
765 	*end_wb_data = wb_data;
766 
767 	return head;
768 
769 error_exit:
770 	if (head)
771 		free_wb_data(head, vc_req->wb_pool);
772 
773 	return NULL;
774 }
775 
776 static __rte_always_inline uint8_t
777 vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
778 {
779 	if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
780 		(req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
781 		(req->para.dst_data_len >= req->para.src_data_len) &&
782 		(req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
783 		return VIRTIO_CRYPTO_OK;
784 	return VIRTIO_CRYPTO_BADMSG;
785 }
786 
787 static __rte_always_inline uint8_t
788 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
789 		struct vhost_crypto_data_req *vc_req,
790 		struct virtio_crypto_cipher_data_req *cipher,
791 		struct vhost_crypto_desc *head,
792 		uint32_t max_n_descs)
793 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
794 {
795 	struct vhost_crypto_desc *desc = head;
796 	struct vhost_crypto_writeback_data *ewb = NULL;
797 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
798 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
799 	uint8_t ret = vhost_crypto_check_cipher_request(cipher);
800 
801 	if (unlikely(ret != VIRTIO_CRYPTO_OK))
802 		goto error_exit;
803 
804 	/* prepare */
805 	/* iv */
806 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
807 			cipher->para.iv_len, max_n_descs))) {
808 		VC_LOG_ERR("Incorrect virtio descriptor");
809 		ret = VIRTIO_CRYPTO_BADMSG;
810 		goto error_exit;
811 	}
812 
813 	switch (vcrypto->option) {
814 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
815 		m_src->data_len = cipher->para.src_data_len;
816 		rte_mbuf_iova_set(m_src,
817 				  gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len));
818 		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
819 		if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
820 			VC_LOG_ERR("zero_copy may fail due to cross page data");
821 			ret = VIRTIO_CRYPTO_ERR;
822 			goto error_exit;
823 		}
824 
825 		if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
826 				max_n_descs) < 0)) {
827 			VC_LOG_ERR("Incorrect descriptor");
828 			ret = VIRTIO_CRYPTO_ERR;
829 			goto error_exit;
830 		}
831 
832 		break;
833 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
834 		vc_req->wb_pool = vcrypto->wb_pool;
835 		m_src->data_len = cipher->para.src_data_len;
836 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
837 				vc_req, head, &desc, cipher->para.src_data_len,
838 				max_n_descs) < 0)) {
839 			VC_LOG_ERR("Incorrect virtio descriptor");
840 			ret = VIRTIO_CRYPTO_BADMSG;
841 			goto error_exit;
842 		}
843 		break;
844 	default:
845 		ret = VIRTIO_CRYPTO_BADMSG;
846 		goto error_exit;
847 	}
848 
849 	/* dst */
850 	desc = find_write_desc(head, desc, max_n_descs);
851 	if (unlikely(!desc)) {
852 		VC_LOG_ERR("Cannot find write location");
853 		ret = VIRTIO_CRYPTO_BADMSG;
854 		goto error_exit;
855 	}
856 
857 	switch (vcrypto->option) {
858 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
859 		rte_mbuf_iova_set(m_dst,
860 				  gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len));
861 		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
862 		if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
863 			VC_LOG_ERR("zero_copy may fail due to cross page data");
864 			ret = VIRTIO_CRYPTO_ERR;
865 			goto error_exit;
866 		}
867 
868 		if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
869 				max_n_descs) < 0)) {
870 			VC_LOG_ERR("Incorrect descriptor");
871 			ret = VIRTIO_CRYPTO_ERR;
872 			goto error_exit;
873 		}
874 
875 		m_dst->data_len = cipher->para.dst_data_len;
876 		break;
877 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
878 		vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
879 				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
880 				cipher->para.dst_data_len, max_n_descs);
881 		if (unlikely(vc_req->wb == NULL)) {
882 			ret = VIRTIO_CRYPTO_ERR;
883 			goto error_exit;
884 		}
885 
886 		break;
887 	default:
888 		ret = VIRTIO_CRYPTO_BADMSG;
889 		goto error_exit;
890 	}
891 
892 	/* src data */
893 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
894 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
895 
896 	op->sym->cipher.data.offset = 0;
897 	op->sym->cipher.data.length = cipher->para.src_data_len;
898 
899 	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
900 	if (unlikely(vc_req->inhdr == NULL)) {
901 		ret = VIRTIO_CRYPTO_BADMSG;
902 		goto error_exit;
903 	}
904 
905 	vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
906 	vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
907 
908 	return 0;
909 
910 error_exit:
911 	if (vc_req->wb)
912 		free_wb_data(vc_req->wb, vc_req->wb_pool);
913 
914 	vc_req->len = INHDR_LEN;
915 	return ret;
916 }
917 
918 static __rte_always_inline uint8_t
919 vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
920 {
921 	if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
922 		(req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
923 		(req->para.dst_data_len >= req->para.src_data_len) &&
924 		(req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
925 		(req->para.cipher_start_src_offset <
926 			VHOST_CRYPTO_MAX_DATA_SIZE) &&
927 		(req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
928 		(req->para.hash_start_src_offset <
929 			VHOST_CRYPTO_MAX_DATA_SIZE) &&
930 		(req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
931 		(req->para.cipher_start_src_offset + req->para.len_to_cipher <=
932 			req->para.src_data_len) &&
933 		(req->para.hash_start_src_offset + req->para.len_to_hash <=
934 			req->para.src_data_len) &&
935 		(req->para.dst_data_len + req->para.hash_result_len <=
936 			VHOST_CRYPTO_MAX_DATA_SIZE)))
937 		return VIRTIO_CRYPTO_OK;
938 	return VIRTIO_CRYPTO_BADMSG;
939 }
940 
941 static __rte_always_inline uint8_t
942 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
943 		struct vhost_crypto_data_req *vc_req,
944 		struct virtio_crypto_alg_chain_data_req *chain,
945 		struct vhost_crypto_desc *head,
946 		uint32_t max_n_descs)
947 	__rte_shared_locks_required(&vc_req->vq->iotlb_lock)
948 {
949 	struct vhost_crypto_desc *desc = head, *digest_desc;
950 	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
951 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
952 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
953 	uint32_t digest_offset;
954 	void *digest_addr;
955 	uint8_t ret = vhost_crypto_check_chain_request(chain);
956 
957 	if (unlikely(ret != VIRTIO_CRYPTO_OK))
958 		goto error_exit;
959 
960 	/* prepare */
961 	/* iv */
962 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
963 			chain->para.iv_len, max_n_descs) < 0)) {
964 		VC_LOG_ERR("Incorrect virtio descriptor");
965 		ret = VIRTIO_CRYPTO_BADMSG;
966 		goto error_exit;
967 	}
968 
969 	switch (vcrypto->option) {
970 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
971 		m_src->data_len = chain->para.src_data_len;
972 		m_dst->data_len = chain->para.dst_data_len;
973 
974 		rte_mbuf_iova_set(m_src,
975 				  gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.src_data_len));
976 		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
977 		if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
978 			VC_LOG_ERR("zero_copy may fail due to cross page data");
979 			ret = VIRTIO_CRYPTO_ERR;
980 			goto error_exit;
981 		}
982 
983 		if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
984 				max_n_descs) < 0)) {
985 			VC_LOG_ERR("Incorrect descriptor");
986 			ret = VIRTIO_CRYPTO_ERR;
987 			goto error_exit;
988 		}
989 		break;
990 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
991 		vc_req->wb_pool = vcrypto->wb_pool;
992 		m_src->data_len = chain->para.src_data_len;
993 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
994 				vc_req, head, &desc, chain->para.src_data_len,
995 				max_n_descs) < 0)) {
996 			VC_LOG_ERR("Incorrect virtio descriptor");
997 			ret = VIRTIO_CRYPTO_BADMSG;
998 			goto error_exit;
999 		}
1000 
1001 		break;
1002 	default:
1003 		ret = VIRTIO_CRYPTO_BADMSG;
1004 		goto error_exit;
1005 	}
1006 
1007 	/* dst */
1008 	desc = find_write_desc(head, desc, max_n_descs);
1009 	if (unlikely(!desc)) {
1010 		VC_LOG_ERR("Cannot find write location");
1011 		ret = VIRTIO_CRYPTO_BADMSG;
1012 		goto error_exit;
1013 	}
1014 
1015 	switch (vcrypto->option) {
1016 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1017 		rte_mbuf_iova_set(m_dst,
1018 				  gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len));
1019 		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1020 		if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
1021 			VC_LOG_ERR("zero_copy may fail due to cross page data");
1022 			ret = VIRTIO_CRYPTO_ERR;
1023 			goto error_exit;
1024 		}
1025 
1026 		if (unlikely(move_desc(vc_req->head, &desc,
1027 				chain->para.dst_data_len, max_n_descs) < 0)) {
1028 			VC_LOG_ERR("Incorrect descriptor");
1029 			ret = VIRTIO_CRYPTO_ERR;
1030 			goto error_exit;
1031 		}
1032 
1033 		op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1034 				desc->addr, chain->para.hash_result_len);
1035 		op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1036 				VHOST_ACCESS_RW);
1037 		if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1038 			VC_LOG_ERR("zero_copy may fail due to cross page data");
1039 			ret = VIRTIO_CRYPTO_ERR;
1040 			goto error_exit;
1041 		}
1042 
1043 		if (unlikely(move_desc(head, &desc,
1044 				chain->para.hash_result_len,
1045 				max_n_descs) < 0)) {
1046 			VC_LOG_ERR("Incorrect descriptor");
1047 			ret = VIRTIO_CRYPTO_ERR;
1048 			goto error_exit;
1049 		}
1050 
1051 		break;
1052 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1053 		vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
1054 				rte_pktmbuf_mtod(m_src, uint8_t *),
1055 				chain->para.cipher_start_src_offset,
1056 				chain->para.dst_data_len -
1057 					chain->para.cipher_start_src_offset,
1058 				max_n_descs);
1059 		if (unlikely(vc_req->wb == NULL)) {
1060 			ret = VIRTIO_CRYPTO_ERR;
1061 			goto error_exit;
1062 		}
1063 
1064 		digest_desc = desc;
1065 		digest_offset = m_src->data_len;
1066 		digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1067 				digest_offset);
1068 
1069 		/** create a wb_data for digest */
1070 		ewb->next = prepare_write_back_data(vc_req, head, &desc,
1071 				&ewb2, digest_addr, 0,
1072 				chain->para.hash_result_len, max_n_descs);
1073 		if (unlikely(ewb->next == NULL)) {
1074 			ret = VIRTIO_CRYPTO_ERR;
1075 			goto error_exit;
1076 		}
1077 
1078 		if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
1079 				chain->para.hash_result_len,
1080 				max_n_descs) < 0)) {
1081 			VC_LOG_ERR("Incorrect virtio descriptor");
1082 			ret = VIRTIO_CRYPTO_BADMSG;
1083 			goto error_exit;
1084 		}
1085 
1086 		op->sym->auth.digest.data = digest_addr;
1087 		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1088 				digest_offset);
1089 		break;
1090 	default:
1091 		ret = VIRTIO_CRYPTO_BADMSG;
1092 		goto error_exit;
1093 	}
1094 
1095 	/* record inhdr */
1096 	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1097 	if (unlikely(vc_req->inhdr == NULL)) {
1098 		ret = VIRTIO_CRYPTO_BADMSG;
1099 		goto error_exit;
1100 	}
1101 
1102 	vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1103 
1104 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1105 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1106 
1107 	op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1108 	op->sym->cipher.data.length = chain->para.src_data_len -
1109 			chain->para.cipher_start_src_offset;
1110 
1111 	op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1112 	op->sym->auth.data.length = chain->para.len_to_hash;
1113 
1114 	vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1115 			INHDR_LEN;
1116 	return 0;
1117 
1118 error_exit:
1119 	if (vc_req->wb)
1120 		free_wb_data(vc_req->wb, vc_req->wb_pool);
1121 	vc_req->len = INHDR_LEN;
1122 	return ret;
1123 }
1124 
1125 /**
1126  * Process on descriptor
1127  */
1128 static __rte_always_inline int
1129 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1130 		struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1131 		struct vring_desc *head, struct vhost_crypto_desc *descs,
1132 		uint16_t desc_idx)
1133 	__rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
1134 {
1135 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1136 	struct rte_cryptodev_sym_session *session;
1137 	struct virtio_crypto_op_data_req req;
1138 	struct virtio_crypto_inhdr *inhdr;
1139 	struct vhost_crypto_desc *desc = descs;
1140 	struct vring_desc *src_desc;
1141 	uint64_t session_id;
1142 	uint64_t dlen;
1143 	uint32_t nb_descs = 0, max_n_descs, i;
1144 	int err;
1145 
1146 	vc_req->desc_idx = desc_idx;
1147 	vc_req->dev = vcrypto->dev;
1148 	vc_req->vq = vq;
1149 
1150 	if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
1151 		VC_LOG_ERR("Invalid descriptor");
1152 		return -1;
1153 	}
1154 
1155 	dlen = head->len;
1156 	src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1157 			&dlen, VHOST_ACCESS_RO);
1158 	if (unlikely(!src_desc || dlen != head->len)) {
1159 		VC_LOG_ERR("Invalid descriptor");
1160 		return -1;
1161 	}
1162 	head = src_desc;
1163 
1164 	nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
1165 	if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
1166 		err = VIRTIO_CRYPTO_ERR;
1167 		VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
1168 		if (nb_descs > 0) {
1169 			struct vring_desc *inhdr_desc = head;
1170 			while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
1171 				if (inhdr_desc->next >= max_n_descs)
1172 					return -1;
1173 				inhdr_desc = &head[inhdr_desc->next];
1174 			}
1175 			if (inhdr_desc->len != sizeof(*inhdr))
1176 				return -1;
1177 			inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
1178 					vc_req, inhdr_desc->addr, &dlen,
1179 					VHOST_ACCESS_WO);
1180 			if (unlikely(!inhdr || dlen != inhdr_desc->len))
1181 				return -1;
1182 			inhdr->status = VIRTIO_CRYPTO_ERR;
1183 			return -1;
1184 		}
1185 	}
1186 
1187 	/* copy descriptors to local variable */
1188 	for (i = 0; i < max_n_descs; i++) {
1189 		desc->addr = src_desc->addr;
1190 		desc->len = src_desc->len;
1191 		desc->flags = src_desc->flags;
1192 		desc++;
1193 		if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
1194 			break;
1195 		if (unlikely(src_desc->next >= max_n_descs)) {
1196 			err = VIRTIO_CRYPTO_BADMSG;
1197 			VC_LOG_ERR("Invalid descriptor");
1198 			goto error_exit;
1199 		}
1200 		src_desc = &head[src_desc->next];
1201 	}
1202 
1203 	vc_req->head = head;
1204 	vc_req->zero_copy = vcrypto->option;
1205 
1206 	nb_descs = desc - descs;
1207 	desc = descs;
1208 
1209 	if (unlikely(desc->len < sizeof(req))) {
1210 		err = VIRTIO_CRYPTO_BADMSG;
1211 		VC_LOG_ERR("Invalid descriptor");
1212 		goto error_exit;
1213 	}
1214 
1215 	if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
1216 			max_n_descs) < 0)) {
1217 		err = VIRTIO_CRYPTO_BADMSG;
1218 		VC_LOG_ERR("Invalid descriptor");
1219 		goto error_exit;
1220 	}
1221 
1222 	/* desc is advanced by 1 now */
1223 	max_n_descs -= 1;
1224 
1225 	switch (req.header.opcode) {
1226 	case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1227 	case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1228 		session_id = req.header.session_id;
1229 
1230 		/* one branch to avoid unnecessary table lookup */
1231 		if (vcrypto->cache_session_id != session_id) {
1232 			err = rte_hash_lookup_data(vcrypto->session_map,
1233 					&session_id, (void **)&session);
1234 			if (unlikely(err < 0)) {
1235 				err = VIRTIO_CRYPTO_ERR;
1236 				VC_LOG_ERR("Failed to find session %"PRIu64,
1237 						session_id);
1238 				goto error_exit;
1239 			}
1240 
1241 			vcrypto->cache_session = session;
1242 			vcrypto->cache_session_id = session_id;
1243 		}
1244 
1245 		session = vcrypto->cache_session;
1246 
1247 		err = rte_crypto_op_attach_sym_session(op, session);
1248 		if (unlikely(err < 0)) {
1249 			err = VIRTIO_CRYPTO_ERR;
1250 			VC_LOG_ERR("Failed to attach session to op");
1251 			goto error_exit;
1252 		}
1253 
1254 		switch (req.u.sym_req.op_type) {
1255 		case VIRTIO_CRYPTO_SYM_OP_NONE:
1256 			err = VIRTIO_CRYPTO_NOTSUPP;
1257 			break;
1258 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1259 			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1260 					&req.u.sym_req.u.cipher, desc,
1261 					max_n_descs);
1262 			break;
1263 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1264 			err = prepare_sym_chain_op(vcrypto, op, vc_req,
1265 					&req.u.sym_req.u.chain, desc,
1266 					max_n_descs);
1267 			break;
1268 		}
1269 		if (unlikely(err != 0)) {
1270 			VC_LOG_ERR("Failed to process sym request");
1271 			goto error_exit;
1272 		}
1273 		break;
1274 	default:
1275 		err = VIRTIO_CRYPTO_ERR;
1276 		VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1277 				req.header.opcode);
1278 		goto error_exit;
1279 	}
1280 
1281 	return 0;
1282 
1283 error_exit:
1284 
1285 	inhdr = reach_inhdr(vc_req, descs, max_n_descs);
1286 	if (likely(inhdr != NULL))
1287 		inhdr->status = (uint8_t)err;
1288 
1289 	return -1;
1290 }
1291 
1292 static __rte_always_inline struct vhost_virtqueue *
1293 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1294 		struct vhost_virtqueue *old_vq)
1295 {
1296 	struct rte_mbuf *m_src = op->sym->m_src;
1297 	struct rte_mbuf *m_dst = op->sym->m_dst;
1298 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1299 	struct vhost_virtqueue *vq;
1300 	uint16_t used_idx, desc_idx;
1301 
1302 	if (unlikely(!vc_req)) {
1303 		VC_LOG_ERR("Failed to retrieve vc_req");
1304 		return NULL;
1305 	}
1306 	vq = vc_req->vq;
1307 	used_idx = vc_req->desc_idx;
1308 
1309 	if (old_vq && (vq != old_vq))
1310 		return vq;
1311 
1312 	if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1313 		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1314 	else {
1315 		if (vc_req->zero_copy == 0)
1316 			write_back_data(vc_req);
1317 	}
1318 
1319 	desc_idx = vq->avail->ring[used_idx];
1320 	vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
1321 	vq->used->ring[desc_idx].len = vc_req->len;
1322 
1323 	rte_mempool_put(m_src->pool, (void *)m_src);
1324 
1325 	if (m_dst)
1326 		rte_mempool_put(m_dst->pool, (void *)m_dst);
1327 
1328 	return vc_req->vq;
1329 }
1330 
1331 static __rte_always_inline uint16_t
1332 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1333 		uint16_t nb_ops, int *callfd)
1334 {
1335 	uint16_t processed = 1;
1336 	struct vhost_virtqueue *vq, *tmp_vq;
1337 
1338 	if (unlikely(nb_ops == 0))
1339 		return 0;
1340 
1341 	vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1342 	if (unlikely(vq == NULL))
1343 		return 0;
1344 	tmp_vq = vq;
1345 
1346 	while ((processed < nb_ops)) {
1347 		tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1348 				tmp_vq);
1349 
1350 		if (unlikely(vq != tmp_vq))
1351 			break;
1352 
1353 		processed++;
1354 	}
1355 
1356 	*callfd = vq->callfd;
1357 
1358 	*(volatile uint16_t *)&vq->used->idx += processed;
1359 
1360 	return processed;
1361 }
1362 
1363 int
1364 rte_vhost_crypto_driver_start(const char *path)
1365 {
1366 	uint64_t protocol_features;
1367 	int ret;
1368 
1369 	ret = rte_vhost_driver_set_features(path, VIRTIO_CRYPTO_FEATURES);
1370 	if (ret)
1371 		return -1;
1372 
1373 	ret = rte_vhost_driver_get_protocol_features(path, &protocol_features);
1374 	if (ret)
1375 		return -1;
1376 	protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1377 	ret = rte_vhost_driver_set_protocol_features(path, protocol_features);
1378 	if (ret)
1379 		return -1;
1380 
1381 	return rte_vhost_driver_start(path);
1382 }
1383 
1384 int
1385 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1386 		struct rte_mempool *sess_pool,
1387 		int socket_id)
1388 {
1389 	struct virtio_net *dev = get_device(vid);
1390 	struct rte_hash_parameters params = {0};
1391 	struct vhost_crypto *vcrypto;
1392 	char name[128];
1393 	int ret;
1394 
1395 	if (!dev) {
1396 		VC_LOG_ERR("Invalid vid %i", vid);
1397 		return -EINVAL;
1398 	}
1399 
1400 	vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1401 			RTE_CACHE_LINE_SIZE, socket_id);
1402 	if (!vcrypto) {
1403 		VC_LOG_ERR("Insufficient memory");
1404 		return -ENOMEM;
1405 	}
1406 
1407 	vcrypto->sess_pool = sess_pool;
1408 	vcrypto->cid = cryptodev_id;
1409 	vcrypto->cache_session_id = UINT64_MAX;
1410 	vcrypto->last_session_id = 1;
1411 	vcrypto->dev = dev;
1412 	vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1413 
1414 	snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1415 	params.name = name;
1416 	params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1417 	params.hash_func = rte_jhash;
1418 	params.key_len = sizeof(uint64_t);
1419 	params.socket_id = socket_id;
1420 	vcrypto->session_map = rte_hash_create(&params);
1421 	if (!vcrypto->session_map) {
1422 		VC_LOG_ERR("Failed to creath session map");
1423 		ret = -ENOMEM;
1424 		goto error_exit;
1425 	}
1426 
1427 	snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1428 	vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1429 			VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1430 			sizeof(struct vhost_crypto_data_req),
1431 			VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
1432 			rte_socket_id());
1433 	if (!vcrypto->mbuf_pool) {
1434 		VC_LOG_ERR("Failed to creath mbuf pool");
1435 		ret = -ENOMEM;
1436 		goto error_exit;
1437 	}
1438 
1439 	snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1440 	vcrypto->wb_pool = rte_mempool_create(name,
1441 			VHOST_CRYPTO_MBUF_POOL_SIZE,
1442 			sizeof(struct vhost_crypto_writeback_data),
1443 			128, 0, NULL, NULL, NULL, NULL,
1444 			rte_socket_id(), 0);
1445 	if (!vcrypto->wb_pool) {
1446 		VC_LOG_ERR("Failed to creath mempool");
1447 		ret = -ENOMEM;
1448 		goto error_exit;
1449 	}
1450 
1451 	dev->extern_data = vcrypto;
1452 	dev->extern_ops.pre_msg_handle = NULL;
1453 	dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1454 
1455 	return 0;
1456 
1457 error_exit:
1458 	rte_hash_free(vcrypto->session_map);
1459 	rte_mempool_free(vcrypto->mbuf_pool);
1460 
1461 	rte_free(vcrypto);
1462 
1463 	return ret;
1464 }
1465 
1466 int
1467 rte_vhost_crypto_free(int vid)
1468 {
1469 	struct virtio_net *dev = get_device(vid);
1470 	struct vhost_crypto *vcrypto;
1471 
1472 	if (unlikely(dev == NULL)) {
1473 		VC_LOG_ERR("Invalid vid %i", vid);
1474 		return -EINVAL;
1475 	}
1476 
1477 	vcrypto = dev->extern_data;
1478 	if (unlikely(vcrypto == NULL)) {
1479 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1480 		return -ENOENT;
1481 	}
1482 
1483 	rte_hash_free(vcrypto->session_map);
1484 	rte_mempool_free(vcrypto->mbuf_pool);
1485 	rte_mempool_free(vcrypto->wb_pool);
1486 	rte_free(vcrypto);
1487 
1488 	dev->extern_data = NULL;
1489 	dev->extern_ops.pre_msg_handle = NULL;
1490 	dev->extern_ops.post_msg_handle = NULL;
1491 
1492 	return 0;
1493 }
1494 
1495 int
1496 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1497 {
1498 	struct virtio_net *dev = get_device(vid);
1499 	struct vhost_crypto *vcrypto;
1500 
1501 	if (unlikely(dev == NULL)) {
1502 		VC_LOG_ERR("Invalid vid %i", vid);
1503 		return -EINVAL;
1504 	}
1505 
1506 	if (unlikely((uint32_t)option >=
1507 				RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1508 		VC_LOG_ERR("Invalid option %i", option);
1509 		return -EINVAL;
1510 	}
1511 
1512 	vcrypto = (struct vhost_crypto *)dev->extern_data;
1513 	if (unlikely(vcrypto == NULL)) {
1514 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1515 		return -ENOENT;
1516 	}
1517 
1518 	if (vcrypto->option == (uint8_t)option)
1519 		return 0;
1520 
1521 	if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1522 			!(rte_mempool_full(vcrypto->wb_pool))) {
1523 		VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1524 		return -EINVAL;
1525 	}
1526 
1527 	if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1528 		char name[128];
1529 
1530 		snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1531 		vcrypto->wb_pool = rte_mempool_create(name,
1532 				VHOST_CRYPTO_MBUF_POOL_SIZE,
1533 				sizeof(struct vhost_crypto_writeback_data),
1534 				128, 0, NULL, NULL, NULL, NULL,
1535 				rte_socket_id(), 0);
1536 		if (!vcrypto->wb_pool) {
1537 			VC_LOG_ERR("Failed to creath mbuf pool");
1538 			return -ENOMEM;
1539 		}
1540 	} else {
1541 		rte_mempool_free(vcrypto->wb_pool);
1542 		vcrypto->wb_pool = NULL;
1543 	}
1544 
1545 	vcrypto->option = (uint8_t)option;
1546 
1547 	return 0;
1548 }
1549 
1550 uint16_t
1551 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1552 		struct rte_crypto_op **ops, uint16_t nb_ops)
1553 {
1554 	struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1555 	struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
1556 	struct virtio_net *dev = get_device(vid);
1557 	struct vhost_crypto *vcrypto;
1558 	struct vhost_virtqueue *vq;
1559 	uint16_t avail_idx;
1560 	uint16_t start_idx;
1561 	uint16_t count;
1562 	uint16_t i = 0;
1563 
1564 	if (unlikely(dev == NULL)) {
1565 		VC_LOG_ERR("Invalid vid %i", vid);
1566 		return 0;
1567 	}
1568 
1569 	if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1570 		VC_LOG_ERR("Invalid qid %u", qid);
1571 		return 0;
1572 	}
1573 
1574 	vcrypto = (struct vhost_crypto *)dev->extern_data;
1575 	if (unlikely(vcrypto == NULL)) {
1576 		VC_LOG_ERR("Cannot find required data, is it initialized?");
1577 		return 0;
1578 	}
1579 
1580 	vq = dev->virtqueue[qid];
1581 
1582 	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1583 	start_idx = vq->last_used_idx;
1584 	count = avail_idx - start_idx;
1585 	count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1586 	count = RTE_MIN(count, nb_ops);
1587 
1588 	if (unlikely(count == 0))
1589 		return 0;
1590 
1591 	/* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1592 	 * we need only 1 mbuf as src and dst
1593 	 */
1594 	switch (vcrypto->option) {
1595 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1596 		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1597 				(void **)mbufs, count * 2) < 0)) {
1598 			VC_LOG_ERR("Insufficient memory");
1599 			return 0;
1600 		}
1601 
1602 		for (i = 0; i < count; i++) {
1603 			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1604 			uint16_t desc_idx = vq->avail->ring[used_idx];
1605 			struct vring_desc *head = &vq->desc[desc_idx];
1606 			struct rte_crypto_op *op = ops[i];
1607 
1608 			op->sym->m_src = mbufs[i * 2];
1609 			op->sym->m_dst = mbufs[i * 2 + 1];
1610 			op->sym->m_src->data_off = 0;
1611 			op->sym->m_dst->data_off = 0;
1612 
1613 			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1614 					op, head, descs, used_idx) < 0))
1615 				break;
1616 		}
1617 
1618 		if (unlikely(i < count))
1619 			rte_mempool_put_bulk(vcrypto->mbuf_pool,
1620 					(void **)&mbufs[i * 2],
1621 					(count - i) * 2);
1622 
1623 		break;
1624 
1625 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1626 		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1627 				(void **)mbufs, count) < 0)) {
1628 			VC_LOG_ERR("Insufficient memory");
1629 			return 0;
1630 		}
1631 
1632 		for (i = 0; i < count; i++) {
1633 			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1634 			uint16_t desc_idx = vq->avail->ring[used_idx];
1635 			struct vring_desc *head = &vq->desc[desc_idx];
1636 			struct rte_crypto_op *op = ops[i];
1637 
1638 			op->sym->m_src = mbufs[i];
1639 			op->sym->m_dst = NULL;
1640 			op->sym->m_src->data_off = 0;
1641 
1642 			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1643 					op, head, descs, desc_idx) < 0))
1644 				break;
1645 		}
1646 
1647 		if (unlikely(i < count))
1648 			rte_mempool_put_bulk(vcrypto->mbuf_pool,
1649 					(void **)&mbufs[i],
1650 					count - i);
1651 
1652 		break;
1653 
1654 	}
1655 
1656 	vq->last_used_idx += i;
1657 
1658 	return i;
1659 }
1660 
1661 uint16_t
1662 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1663 		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1664 {
1665 	struct rte_crypto_op **tmp_ops = ops;
1666 	uint16_t count = 0, left = nb_ops;
1667 	int callfd;
1668 	uint16_t idx = 0;
1669 
1670 	while (left) {
1671 		count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1672 				&callfd);
1673 		if (unlikely(count == 0))
1674 			break;
1675 
1676 		tmp_ops = &tmp_ops[count];
1677 		left -= count;
1678 
1679 		callfds[idx++] = callfd;
1680 
1681 		if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1682 			VC_LOG_ERR("Too many vqs");
1683 			break;
1684 		}
1685 	}
1686 
1687 	*nb_callfds = idx;
1688 
1689 	return nb_ops - left;
1690 }
1691