xref: /dpdk/drivers/crypto/virtio/virtio_cryptodev.c (revision 1af8b0b2747fe6c6267fa7bedb602e569742362e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6 
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <bus_pci_driver.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_eal.h>
14 
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19 
20 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
21 		struct rte_cryptodev_config *config);
22 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
23 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
24 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
25 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
26 		struct rte_cryptodev_info *dev_info);
27 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
28 		struct rte_cryptodev_stats *stats);
29 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
30 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
31 		uint16_t queue_pair_id,
32 		const struct rte_cryptodev_qp_conf *qp_conf,
33 		int socket_id);
34 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
35 		uint16_t queue_pair_id);
36 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
37 static unsigned int virtio_crypto_sym_get_session_private_size(
38 		struct rte_cryptodev *dev);
39 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
40 		struct rte_cryptodev_sym_session *sess);
41 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
42 		struct rte_crypto_sym_xform *xform,
43 		struct rte_cryptodev_sym_session *session);
44 
45 /*
46  * The set of PCI devices this driver supports
47  */
48 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
49 	{ RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
50 				VIRTIO_CRYPTO_PCI_DEVICEID) },
51 	{ .vendor_id = 0, /* sentinel */ },
52 };
53 
54 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
55 	VIRTIO_SYM_CAPABILITIES,
56 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
57 };
58 
59 uint8_t cryptodev_virtio_driver_id;
60 
61 #define NUM_ENTRY_SYM_CREATE_SESSION 4
62 
63 static int
64 virtio_crypto_send_command(struct virtqueue *vq,
65 		struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
66 		uint8_t *auth_key, struct virtio_crypto_session *session)
67 {
68 	uint8_t idx = 0;
69 	uint8_t needed = 1;
70 	uint32_t head = 0;
71 	uint32_t len_cipher_key = 0;
72 	uint32_t len_auth_key = 0;
73 	uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
74 	uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
75 	uint32_t len_total = 0;
76 	uint32_t input_offset = 0;
77 	void *virt_addr_started = NULL;
78 	phys_addr_t phys_addr_started;
79 	struct vring_desc *desc;
80 	uint32_t desc_offset;
81 	struct virtio_crypto_session_input *input;
82 	int ret;
83 
84 	PMD_INIT_FUNC_TRACE();
85 
86 	if (session == NULL) {
87 		VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
88 		return -EINVAL;
89 	}
90 	/* cipher only is supported, it is available if auth_key is NULL */
91 	if (!cipher_key) {
92 		VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
93 		return -EINVAL;
94 	}
95 
96 	head = vq->vq_desc_head_idx;
97 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
98 					head, vq);
99 
100 	if (vq->vq_free_cnt < needed) {
101 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
102 		return -ENOSPC;
103 	}
104 
105 	/* calculate the length of cipher key */
106 	if (cipher_key) {
107 		switch (ctrl->u.sym_create_session.op_type) {
108 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
109 			len_cipher_key
110 				= ctrl->u.sym_create_session.u.cipher
111 							.para.keylen;
112 			break;
113 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
114 			len_cipher_key
115 				= ctrl->u.sym_create_session.u.chain
116 					.para.cipher_param.keylen;
117 			break;
118 		default:
119 			VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
120 			return -EINVAL;
121 		}
122 	}
123 
124 	/* calculate the length of auth key */
125 	if (auth_key) {
126 		len_auth_key =
127 			ctrl->u.sym_create_session.u.chain.para.u.mac_param
128 				.auth_key_len;
129 	}
130 
131 	/*
132 	 * malloc memory to store indirect vring_desc entries, including
133 	 * ctrl request, cipher key, auth key, session input and desc vring
134 	 */
135 	desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
136 		+ len_session_input;
137 	virt_addr_started = rte_malloc(NULL,
138 		desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
139 			* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
140 	if (virt_addr_started == NULL) {
141 		VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
142 		return -ENOSPC;
143 	}
144 	phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
145 
146 	/* address to store indirect vring desc entries */
147 	desc = (struct vring_desc *)
148 		((uint8_t *)virt_addr_started + desc_offset);
149 
150 	/*  ctrl req part */
151 	memcpy(virt_addr_started, ctrl, len_ctrl_req);
152 	desc[idx].addr = phys_addr_started;
153 	desc[idx].len = len_ctrl_req;
154 	desc[idx].flags = VRING_DESC_F_NEXT;
155 	desc[idx].next = idx + 1;
156 	idx++;
157 	len_total += len_ctrl_req;
158 	input_offset += len_ctrl_req;
159 
160 	/* cipher key part */
161 	if (len_cipher_key > 0) {
162 		memcpy((uint8_t *)virt_addr_started + len_total,
163 			cipher_key, len_cipher_key);
164 
165 		desc[idx].addr = phys_addr_started + len_total;
166 		desc[idx].len = len_cipher_key;
167 		desc[idx].flags = VRING_DESC_F_NEXT;
168 		desc[idx].next = idx + 1;
169 		idx++;
170 		len_total += len_cipher_key;
171 		input_offset += len_cipher_key;
172 	}
173 
174 	/* auth key part */
175 	if (len_auth_key > 0) {
176 		memcpy((uint8_t *)virt_addr_started + len_total,
177 			auth_key, len_auth_key);
178 
179 		desc[idx].addr = phys_addr_started + len_total;
180 		desc[idx].len = len_auth_key;
181 		desc[idx].flags = VRING_DESC_F_NEXT;
182 		desc[idx].next = idx + 1;
183 		idx++;
184 		len_total += len_auth_key;
185 		input_offset += len_auth_key;
186 	}
187 
188 	/* input part */
189 	input = (struct virtio_crypto_session_input *)
190 		((uint8_t *)virt_addr_started + input_offset);
191 	input->status = VIRTIO_CRYPTO_ERR;
192 	input->session_id = ~0ULL;
193 	desc[idx].addr = phys_addr_started + len_total;
194 	desc[idx].len = len_session_input;
195 	desc[idx].flags = VRING_DESC_F_WRITE;
196 	idx++;
197 
198 	/* use a single desc entry */
199 	vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
200 	vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
201 	vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
202 	vq->vq_free_cnt--;
203 
204 	vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
205 
206 	vq_update_avail_ring(vq, head);
207 	vq_update_avail_idx(vq);
208 
209 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
210 					vq->vq_queue_index);
211 
212 	virtqueue_notify(vq);
213 
214 	rte_rmb();
215 	while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
216 		rte_rmb();
217 		usleep(100);
218 	}
219 
220 	while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
221 		uint32_t idx, desc_idx, used_idx;
222 		struct vring_used_elem *uep;
223 
224 		used_idx = (uint32_t)(vq->vq_used_cons_idx
225 				& (vq->vq_nentries - 1));
226 		uep = &vq->vq_ring.used->ring[used_idx];
227 		idx = (uint32_t) uep->id;
228 		desc_idx = idx;
229 
230 		while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
231 			desc_idx = vq->vq_ring.desc[desc_idx].next;
232 			vq->vq_free_cnt++;
233 		}
234 
235 		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
236 		vq->vq_desc_head_idx = idx;
237 
238 		vq->vq_used_cons_idx++;
239 		vq->vq_free_cnt++;
240 	}
241 
242 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d", vq->vq_free_cnt);
243 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx=%d", vq->vq_desc_head_idx);
244 
245 	/* get the result */
246 	if (input->status != VIRTIO_CRYPTO_OK) {
247 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
248 				"status=%u, session_id=%" PRIu64 "",
249 				input->status, input->session_id);
250 		rte_free(virt_addr_started);
251 		ret = -1;
252 	} else {
253 		session->session_id = input->session_id;
254 
255 		VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
256 				"session_id=%" PRIu64 "", input->session_id);
257 		rte_free(virt_addr_started);
258 		ret = 0;
259 	}
260 
261 	return ret;
262 }
263 
264 void
265 virtio_crypto_queue_release(struct virtqueue *vq)
266 {
267 	struct virtio_crypto_hw *hw;
268 
269 	PMD_INIT_FUNC_TRACE();
270 
271 	if (vq) {
272 		hw = vq->hw;
273 		/* Select and deactivate the queue */
274 		VTPCI_OPS(hw)->del_queue(hw, vq);
275 
276 		rte_memzone_free(vq->mz);
277 		rte_mempool_free(vq->mpool);
278 		rte_free(vq);
279 	}
280 }
281 
282 #define MPOOL_MAX_NAME_SZ 32
283 
284 int
285 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
286 		int queue_type,
287 		uint16_t vtpci_queue_idx,
288 		uint16_t nb_desc,
289 		int socket_id,
290 		struct virtqueue **pvq)
291 {
292 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
293 	char mpool_name[MPOOL_MAX_NAME_SZ];
294 	const struct rte_memzone *mz;
295 	unsigned int vq_size, size;
296 	struct virtio_crypto_hw *hw = dev->data->dev_private;
297 	struct virtqueue *vq = NULL;
298 	uint32_t i = 0;
299 	uint32_t j;
300 
301 	PMD_INIT_FUNC_TRACE();
302 
303 	VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
304 
305 	/*
306 	 * Read the virtqueue size from the Queue Size field
307 	 * Always power of 2 and if 0 virtqueue does not exist
308 	 */
309 	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
310 	if (vq_size == 0) {
311 		VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
312 		return -EINVAL;
313 	}
314 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
315 
316 	if (!rte_is_power_of_2(vq_size)) {
317 		VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
318 		return -EINVAL;
319 	}
320 
321 	if (queue_type == VTCRYPTO_DATAQ) {
322 		snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
323 				dev->data->dev_id, vtpci_queue_idx);
324 		snprintf(mpool_name, sizeof(mpool_name),
325 				"dev%d_dataqueue%d_mpool",
326 				dev->data->dev_id, vtpci_queue_idx);
327 	} else if (queue_type == VTCRYPTO_CTRLQ) {
328 		snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
329 				dev->data->dev_id);
330 		snprintf(mpool_name, sizeof(mpool_name),
331 				"dev%d_controlqueue_mpool",
332 				dev->data->dev_id);
333 	}
334 	size = RTE_ALIGN_CEIL(sizeof(*vq) +
335 				vq_size * sizeof(struct vq_desc_extra),
336 				RTE_CACHE_LINE_SIZE);
337 	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
338 				socket_id);
339 	if (vq == NULL) {
340 		VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
341 		return -ENOMEM;
342 	}
343 
344 	if (queue_type == VTCRYPTO_DATAQ) {
345 		/* pre-allocate a mempool and use it in the data plane to
346 		 * improve performance
347 		 */
348 		vq->mpool = rte_mempool_lookup(mpool_name);
349 		if (vq->mpool == NULL)
350 			vq->mpool = rte_mempool_create(mpool_name,
351 					vq_size,
352 					sizeof(struct virtio_crypto_op_cookie),
353 					RTE_CACHE_LINE_SIZE, 0,
354 					NULL, NULL, NULL, NULL, socket_id,
355 					0);
356 		if (!vq->mpool) {
357 			VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
358 					"Cannot create mempool");
359 			goto mpool_create_err;
360 		}
361 		for (i = 0; i < vq_size; i++) {
362 			vq->vq_descx[i].cookie =
363 				rte_zmalloc("crypto PMD op cookie pointer",
364 					sizeof(struct virtio_crypto_op_cookie),
365 					RTE_CACHE_LINE_SIZE);
366 			if (vq->vq_descx[i].cookie == NULL) {
367 				VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
368 						"alloc mem for cookie");
369 				goto cookie_alloc_err;
370 			}
371 		}
372 	}
373 
374 	vq->hw = hw;
375 	vq->dev_id = dev->data->dev_id;
376 	vq->vq_queue_index = vtpci_queue_idx;
377 	vq->vq_nentries = vq_size;
378 
379 	/*
380 	 * Using part of the vring entries is permitted, but the maximum
381 	 * is vq_size
382 	 */
383 	if (nb_desc == 0 || nb_desc > vq_size)
384 		nb_desc = vq_size;
385 	vq->vq_free_cnt = nb_desc;
386 
387 	/*
388 	 * Reserve a memzone for vring elements
389 	 */
390 	size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
391 	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
392 	VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
393 			(queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
394 			size, vq->vq_ring_size);
395 
396 	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
397 			socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
398 	if (mz == NULL) {
399 		if (rte_errno == EEXIST)
400 			mz = rte_memzone_lookup(vq_name);
401 		if (mz == NULL) {
402 			VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
403 			goto mz_reserve_err;
404 		}
405 	}
406 
407 	/*
408 	 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
409 	 * and only accepts 32 bit page frame number.
410 	 * Check if the allocated physical memory exceeds 16TB.
411 	 */
412 	if ((mz->iova + vq->vq_ring_size - 1)
413 				>> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
414 		VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
415 					"above 16TB!");
416 		goto vring_addr_err;
417 	}
418 
419 	memset(mz->addr, 0, sizeof(mz->len));
420 	vq->mz = mz;
421 	vq->vq_ring_mem = mz->iova;
422 	vq->vq_ring_virt_mem = mz->addr;
423 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
424 					(uint64_t)mz->iova);
425 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
426 					(uint64_t)(uintptr_t)mz->addr);
427 
428 	*pvq = vq;
429 
430 	return 0;
431 
432 vring_addr_err:
433 	rte_memzone_free(mz);
434 mz_reserve_err:
435 cookie_alloc_err:
436 	rte_mempool_free(vq->mpool);
437 	if (i != 0) {
438 		for (j = 0; j < i; j++)
439 			rte_free(vq->vq_descx[j].cookie);
440 	}
441 mpool_create_err:
442 	rte_free(vq);
443 	return -ENOMEM;
444 }
445 
446 static int
447 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
448 {
449 	int ret;
450 	struct virtqueue *vq;
451 	struct virtio_crypto_hw *hw = dev->data->dev_private;
452 
453 	/* if virtio device has started, do not touch the virtqueues */
454 	if (dev->data->dev_started)
455 		return 0;
456 
457 	PMD_INIT_FUNC_TRACE();
458 
459 	ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
460 			0, SOCKET_ID_ANY, &vq);
461 	if (ret < 0) {
462 		VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
463 		return ret;
464 	}
465 
466 	hw->cvq = vq;
467 
468 	return 0;
469 }
470 
471 static void
472 virtio_crypto_free_queues(struct rte_cryptodev *dev)
473 {
474 	unsigned int i;
475 	struct virtio_crypto_hw *hw = dev->data->dev_private;
476 
477 	PMD_INIT_FUNC_TRACE();
478 
479 	/* control queue release */
480 	virtio_crypto_queue_release(hw->cvq);
481 
482 	/* data queue release */
483 	for (i = 0; i < hw->max_dataqueues; i++)
484 		virtio_crypto_queue_release(dev->data->queue_pairs[i]);
485 }
486 
487 static int
488 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
489 {
490 	return 0;
491 }
492 
493 /*
494  * dev_ops for virtio, bare necessities for basic operation
495  */
496 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
497 	/* Device related operations */
498 	.dev_configure			 = virtio_crypto_dev_configure,
499 	.dev_start			 = virtio_crypto_dev_start,
500 	.dev_stop			 = virtio_crypto_dev_stop,
501 	.dev_close			 = virtio_crypto_dev_close,
502 	.dev_infos_get			 = virtio_crypto_dev_info_get,
503 
504 	.stats_get			 = virtio_crypto_dev_stats_get,
505 	.stats_reset			 = virtio_crypto_dev_stats_reset,
506 
507 	.queue_pair_setup                = virtio_crypto_qp_setup,
508 	.queue_pair_release              = virtio_crypto_qp_release,
509 
510 	/* Crypto related operations */
511 	.sym_session_get_size		= virtio_crypto_sym_get_session_private_size,
512 	.sym_session_configure		= virtio_crypto_sym_configure_session,
513 	.sym_session_clear		= virtio_crypto_sym_clear_session
514 };
515 
516 static void
517 virtio_crypto_update_stats(struct rte_cryptodev *dev,
518 		struct rte_cryptodev_stats *stats)
519 {
520 	unsigned int i;
521 	struct virtio_crypto_hw *hw = dev->data->dev_private;
522 
523 	PMD_INIT_FUNC_TRACE();
524 
525 	if (stats == NULL) {
526 		VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
527 		return;
528 	}
529 
530 	for (i = 0; i < hw->max_dataqueues; i++) {
531 		const struct virtqueue *data_queue
532 			= dev->data->queue_pairs[i];
533 		if (data_queue == NULL)
534 			continue;
535 
536 		stats->enqueued_count += data_queue->packets_sent_total;
537 		stats->enqueue_err_count += data_queue->packets_sent_failed;
538 
539 		stats->dequeued_count += data_queue->packets_received_total;
540 		stats->dequeue_err_count
541 			+= data_queue->packets_received_failed;
542 	}
543 }
544 
545 static void
546 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
547 		struct rte_cryptodev_stats *stats)
548 {
549 	PMD_INIT_FUNC_TRACE();
550 
551 	virtio_crypto_update_stats(dev, stats);
552 }
553 
554 static void
555 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
556 {
557 	unsigned int i;
558 	struct virtio_crypto_hw *hw = dev->data->dev_private;
559 
560 	PMD_INIT_FUNC_TRACE();
561 
562 	for (i = 0; i < hw->max_dataqueues; i++) {
563 		struct virtqueue *data_queue = dev->data->queue_pairs[i];
564 		if (data_queue == NULL)
565 			continue;
566 
567 		data_queue->packets_sent_total = 0;
568 		data_queue->packets_sent_failed = 0;
569 
570 		data_queue->packets_received_total = 0;
571 		data_queue->packets_received_failed = 0;
572 	}
573 }
574 
575 static int
576 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
577 		const struct rte_cryptodev_qp_conf *qp_conf,
578 		int socket_id)
579 {
580 	int ret;
581 	struct virtqueue *vq;
582 
583 	PMD_INIT_FUNC_TRACE();
584 
585 	/* if virtio dev is started, do not touch the virtqueues */
586 	if (dev->data->dev_started)
587 		return 0;
588 
589 	ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
590 			qp_conf->nb_descriptors, socket_id, &vq);
591 	if (ret < 0) {
592 		VIRTIO_CRYPTO_INIT_LOG_ERR(
593 			"virtio crypto data queue initialization failed");
594 		return ret;
595 	}
596 
597 	dev->data->queue_pairs[queue_pair_id] = vq;
598 
599 	return 0;
600 }
601 
602 static int
603 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
604 {
605 	struct virtqueue *vq
606 		= (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
607 
608 	PMD_INIT_FUNC_TRACE();
609 
610 	if (vq == NULL) {
611 		VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
612 		return 0;
613 	}
614 
615 	virtio_crypto_queue_release(vq);
616 	return 0;
617 }
618 
619 static int
620 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
621 {
622 	uint64_t host_features;
623 
624 	PMD_INIT_FUNC_TRACE();
625 
626 	/* Prepare guest_features: feature that driver wants to support */
627 	VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
628 		req_features);
629 
630 	/* Read device(host) feature bits */
631 	host_features = VTPCI_OPS(hw)->get_features(hw);
632 	VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
633 		host_features);
634 
635 	/*
636 	 * Negotiate features: Subset of device feature bits are written back
637 	 * guest feature bits.
638 	 */
639 	hw->guest_features = req_features;
640 	hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
641 							host_features);
642 	VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
643 		hw->guest_features);
644 
645 	if (hw->modern) {
646 		if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
647 			VIRTIO_CRYPTO_INIT_LOG_ERR(
648 				"VIRTIO_F_VERSION_1 features is not enabled.");
649 			return -1;
650 		}
651 		vtpci_cryptodev_set_status(hw,
652 			VIRTIO_CONFIG_STATUS_FEATURES_OK);
653 		if (!(vtpci_cryptodev_get_status(hw) &
654 			VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
655 			VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
656 						"status!");
657 			return -1;
658 		}
659 	}
660 
661 	hw->req_guest_features = req_features;
662 
663 	return 0;
664 }
665 
666 /* reset device and renegotiate features if needed */
667 static int
668 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
669 	uint64_t req_features)
670 {
671 	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
672 	struct virtio_crypto_config local_config;
673 	struct virtio_crypto_config *config = &local_config;
674 
675 	PMD_INIT_FUNC_TRACE();
676 
677 	/* Reset the device although not necessary at startup */
678 	vtpci_cryptodev_reset(hw);
679 
680 	/* Tell the host we've noticed this device. */
681 	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
682 
683 	/* Tell the host we've known how to drive the device. */
684 	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
685 	if (virtio_negotiate_features(hw, req_features) < 0)
686 		return -1;
687 
688 	/* Get status of the device */
689 	vtpci_read_cryptodev_config(hw,
690 		offsetof(struct virtio_crypto_config, status),
691 		&config->status, sizeof(config->status));
692 	if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
693 		VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
694 				"not ready");
695 		return -1;
696 	}
697 
698 	/* Get number of data queues */
699 	vtpci_read_cryptodev_config(hw,
700 		offsetof(struct virtio_crypto_config, max_dataqueues),
701 		&config->max_dataqueues,
702 		sizeof(config->max_dataqueues));
703 	hw->max_dataqueues = config->max_dataqueues;
704 
705 	VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
706 		hw->max_dataqueues);
707 
708 	return 0;
709 }
710 
711 /*
712  * This function is based on probe() function
713  * It returns 0 on success.
714  */
715 static int
716 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
717 		struct rte_cryptodev_pmd_init_params *init_params)
718 {
719 	struct rte_cryptodev *cryptodev;
720 	struct virtio_crypto_hw *hw;
721 
722 	PMD_INIT_FUNC_TRACE();
723 
724 	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
725 					init_params);
726 	if (cryptodev == NULL)
727 		return -ENODEV;
728 
729 	cryptodev->driver_id = cryptodev_virtio_driver_id;
730 	cryptodev->dev_ops = &virtio_crypto_dev_ops;
731 
732 	cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
733 	cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
734 
735 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
736 		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
737 		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
738 
739 	hw = cryptodev->data->dev_private;
740 	hw->dev_id = cryptodev->data->dev_id;
741 	hw->virtio_dev_capabilities = virtio_capabilities;
742 
743 	VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
744 		cryptodev->data->dev_id, pci_dev->id.vendor_id,
745 		pci_dev->id.device_id);
746 
747 	/* pci device init */
748 	if (vtpci_cryptodev_init(pci_dev, hw))
749 		return -1;
750 
751 	if (virtio_crypto_init_device(cryptodev,
752 			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
753 		return -1;
754 
755 	rte_cryptodev_pmd_probing_finish(cryptodev);
756 
757 	return 0;
758 }
759 
760 static int
761 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
762 {
763 	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
764 
765 	PMD_INIT_FUNC_TRACE();
766 
767 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
768 		return -EPERM;
769 
770 	if (cryptodev->data->dev_started) {
771 		virtio_crypto_dev_stop(cryptodev);
772 		virtio_crypto_dev_close(cryptodev);
773 	}
774 
775 	cryptodev->dev_ops = NULL;
776 	cryptodev->enqueue_burst = NULL;
777 	cryptodev->dequeue_burst = NULL;
778 
779 	/* release control queue */
780 	virtio_crypto_queue_release(hw->cvq);
781 
782 	rte_free(cryptodev->data);
783 	cryptodev->data = NULL;
784 
785 	VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
786 
787 	return 0;
788 }
789 
790 static int
791 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
792 	struct rte_cryptodev_config *config __rte_unused)
793 {
794 	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
795 
796 	PMD_INIT_FUNC_TRACE();
797 
798 	if (virtio_crypto_init_device(cryptodev,
799 			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
800 		return -1;
801 
802 	/* setup control queue
803 	 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
804 	 * config->max_dataqueues is the control queue
805 	 */
806 	if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
807 		VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
808 		return -1;
809 	}
810 	virtio_crypto_ctrlq_start(cryptodev);
811 
812 	return 0;
813 }
814 
815 static void
816 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
817 {
818 	struct virtio_crypto_hw *hw = dev->data->dev_private;
819 
820 	PMD_INIT_FUNC_TRACE();
821 	VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
822 
823 	vtpci_cryptodev_reset(hw);
824 
825 	virtio_crypto_dev_free_mbufs(dev);
826 	virtio_crypto_free_queues(dev);
827 
828 	dev->data->dev_started = 0;
829 }
830 
831 static int
832 virtio_crypto_dev_start(struct rte_cryptodev *dev)
833 {
834 	struct virtio_crypto_hw *hw = dev->data->dev_private;
835 
836 	if (dev->data->dev_started)
837 		return 0;
838 
839 	/* Do final configuration before queue engine starts */
840 	virtio_crypto_dataq_start(dev);
841 	vtpci_cryptodev_reinit_complete(hw);
842 
843 	dev->data->dev_started = 1;
844 
845 	return 0;
846 }
847 
848 static void
849 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
850 {
851 	uint32_t i;
852 	struct virtio_crypto_hw *hw = dev->data->dev_private;
853 
854 	for (i = 0; i < hw->max_dataqueues; i++) {
855 		VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
856 			"and unused buf", i);
857 		VIRTQUEUE_DUMP((struct virtqueue *)
858 			dev->data->queue_pairs[i]);
859 
860 		VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
861 				i, dev->data->queue_pairs[i]);
862 
863 		virtqueue_detatch_unused(dev->data->queue_pairs[i]);
864 
865 		VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
866 					"unused buf", i);
867 		VIRTQUEUE_DUMP(
868 			(struct virtqueue *)dev->data->queue_pairs[i]);
869 	}
870 }
871 
872 static unsigned int
873 virtio_crypto_sym_get_session_private_size(
874 		struct rte_cryptodev *dev __rte_unused)
875 {
876 	PMD_INIT_FUNC_TRACE();
877 
878 	return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
879 }
880 
881 static int
882 virtio_crypto_check_sym_session_paras(
883 		struct rte_cryptodev *dev)
884 {
885 	struct virtio_crypto_hw *hw;
886 
887 	PMD_INIT_FUNC_TRACE();
888 
889 	if (unlikely(dev == NULL)) {
890 		VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
891 		return -1;
892 	}
893 	if (unlikely(dev->data == NULL)) {
894 		VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
895 		return -1;
896 	}
897 	hw = dev->data->dev_private;
898 	if (unlikely(hw == NULL)) {
899 		VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
900 		return -1;
901 	}
902 	if (unlikely(hw->cvq == NULL)) {
903 		VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
904 		return -1;
905 	}
906 
907 	return 0;
908 }
909 
910 static int
911 virtio_crypto_check_sym_clear_session_paras(
912 		struct rte_cryptodev *dev,
913 		struct rte_cryptodev_sym_session *sess)
914 {
915 	PMD_INIT_FUNC_TRACE();
916 
917 	if (sess == NULL) {
918 		VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
919 		return -1;
920 	}
921 
922 	return virtio_crypto_check_sym_session_paras(dev);
923 }
924 
925 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
926 
927 static void
928 virtio_crypto_sym_clear_session(
929 		struct rte_cryptodev *dev,
930 		struct rte_cryptodev_sym_session *sess)
931 {
932 	struct virtio_crypto_hw *hw;
933 	struct virtqueue *vq;
934 	struct virtio_crypto_session *session;
935 	struct virtio_crypto_op_ctrl_req *ctrl;
936 	struct vring_desc *desc;
937 	uint8_t *status;
938 	uint8_t needed = 1;
939 	uint32_t head;
940 	uint8_t *malloc_virt_addr;
941 	uint64_t malloc_phys_addr;
942 	uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
943 	uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
944 	uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
945 
946 	PMD_INIT_FUNC_TRACE();
947 
948 	if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
949 		return;
950 
951 	hw = dev->data->dev_private;
952 	vq = hw->cvq;
953 	session = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
954 
955 	VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
956 			"vq = %p", vq->vq_desc_head_idx, vq);
957 
958 	if (vq->vq_free_cnt < needed) {
959 		VIRTIO_CRYPTO_SESSION_LOG_ERR(
960 				"vq->vq_free_cnt = %d is less than %d, "
961 				"not enough", vq->vq_free_cnt, needed);
962 		return;
963 	}
964 
965 	/*
966 	 * malloc memory to store information of ctrl request op,
967 	 * returned status and desc vring
968 	 */
969 	malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
970 		+ NUM_ENTRY_SYM_CLEAR_SESSION
971 		* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
972 	if (malloc_virt_addr == NULL) {
973 		VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
974 		return;
975 	}
976 	malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
977 
978 	/* assign ctrl request op part */
979 	ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
980 	ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
981 	/* default data virtqueue is 0 */
982 	ctrl->header.queue_id = 0;
983 	ctrl->u.destroy_session.session_id = session->session_id;
984 
985 	/* status part */
986 	status = &(((struct virtio_crypto_inhdr *)
987 		((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
988 	*status = VIRTIO_CRYPTO_ERR;
989 
990 	/* indirect desc vring part */
991 	desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
992 		+ desc_offset);
993 
994 	/* ctrl request part */
995 	desc[0].addr = malloc_phys_addr;
996 	desc[0].len = len_op_ctrl_req;
997 	desc[0].flags = VRING_DESC_F_NEXT;
998 	desc[0].next = 1;
999 
1000 	/* status part */
1001 	desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1002 	desc[1].len = len_inhdr;
1003 	desc[1].flags = VRING_DESC_F_WRITE;
1004 
1005 	/* use only a single desc entry */
1006 	head = vq->vq_desc_head_idx;
1007 	vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1008 	vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1009 	vq->vq_ring.desc[head].len
1010 		= NUM_ENTRY_SYM_CLEAR_SESSION
1011 		* sizeof(struct vring_desc);
1012 	vq->vq_free_cnt -= needed;
1013 
1014 	vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1015 
1016 	vq_update_avail_ring(vq, head);
1017 	vq_update_avail_idx(vq);
1018 
1019 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1020 					vq->vq_queue_index);
1021 
1022 	virtqueue_notify(vq);
1023 
1024 	rte_rmb();
1025 	while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1026 		rte_rmb();
1027 		usleep(100);
1028 	}
1029 
1030 	while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1031 		uint32_t idx, desc_idx, used_idx;
1032 		struct vring_used_elem *uep;
1033 
1034 		used_idx = (uint32_t)(vq->vq_used_cons_idx
1035 				& (vq->vq_nentries - 1));
1036 		uep = &vq->vq_ring.used->ring[used_idx];
1037 		idx = (uint32_t) uep->id;
1038 		desc_idx = idx;
1039 		while (vq->vq_ring.desc[desc_idx].flags
1040 				& VRING_DESC_F_NEXT) {
1041 			desc_idx = vq->vq_ring.desc[desc_idx].next;
1042 			vq->vq_free_cnt++;
1043 		}
1044 
1045 		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1046 		vq->vq_desc_head_idx = idx;
1047 		vq->vq_used_cons_idx++;
1048 		vq->vq_free_cnt++;
1049 	}
1050 
1051 	if (*status != VIRTIO_CRYPTO_OK) {
1052 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1053 				"status=%"PRIu32", session_id=%"PRIu64"",
1054 				*status, session->session_id);
1055 		rte_free(malloc_virt_addr);
1056 		return;
1057 	}
1058 
1059 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d", vq->vq_free_cnt);
1060 	VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx=%d", vq->vq_desc_head_idx);
1061 
1062 	VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1063 			session->session_id);
1064 
1065 	rte_free(malloc_virt_addr);
1066 }
1067 
1068 static struct rte_crypto_cipher_xform *
1069 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1070 {
1071 	do {
1072 		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1073 			return &xform->cipher;
1074 
1075 		xform = xform->next;
1076 	} while (xform);
1077 
1078 	return NULL;
1079 }
1080 
1081 static struct rte_crypto_auth_xform *
1082 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1083 {
1084 	do {
1085 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1086 			return &xform->auth;
1087 
1088 		xform = xform->next;
1089 	} while (xform);
1090 
1091 	return NULL;
1092 }
1093 
1094 /** Get xform chain order */
1095 static int
1096 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1097 {
1098 	if (xform == NULL)
1099 		return -1;
1100 
1101 	/* Cipher Only */
1102 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1103 			xform->next == NULL)
1104 		return VIRTIO_CRYPTO_CMD_CIPHER;
1105 
1106 	/* Authentication Only */
1107 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1108 			xform->next == NULL)
1109 		return VIRTIO_CRYPTO_CMD_AUTH;
1110 
1111 	/* Authenticate then Cipher */
1112 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1113 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1114 		return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1115 
1116 	/* Cipher then Authenticate */
1117 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1118 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1119 		return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1120 
1121 	return -1;
1122 }
1123 
1124 static int
1125 virtio_crypto_sym_pad_cipher_param(
1126 		struct virtio_crypto_cipher_session_para *para,
1127 		struct rte_crypto_cipher_xform *cipher_xform)
1128 {
1129 	switch (cipher_xform->algo) {
1130 	case RTE_CRYPTO_CIPHER_AES_CBC:
1131 		para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1132 		break;
1133 	default:
1134 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1135 				"Cipher alg %u", cipher_xform->algo);
1136 		return -1;
1137 	}
1138 
1139 	para->keylen = cipher_xform->key.length;
1140 	switch (cipher_xform->op) {
1141 	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1142 		para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1143 		break;
1144 	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1145 		para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1146 		break;
1147 	default:
1148 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1149 					"parameter");
1150 		return -1;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static int
1157 virtio_crypto_sym_pad_auth_param(
1158 		struct virtio_crypto_op_ctrl_req *ctrl,
1159 		struct rte_crypto_auth_xform *auth_xform)
1160 {
1161 	uint32_t *algo;
1162 	struct virtio_crypto_alg_chain_session_para *para =
1163 		&(ctrl->u.sym_create_session.u.chain.para);
1164 
1165 	switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1166 	case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1167 		algo = &(para->u.hash_param.algo);
1168 		break;
1169 	case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1170 		algo = &(para->u.mac_param.algo);
1171 		break;
1172 	default:
1173 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1174 			"specified",
1175 			ctrl->u.sym_create_session.u.chain.para.hash_mode);
1176 		return -1;
1177 	}
1178 
1179 	switch (auth_xform->algo) {
1180 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1181 		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1182 		break;
1183 	default:
1184 		VIRTIO_CRYPTO_SESSION_LOG_ERR(
1185 			"Crypto: Undefined Hash algo %u specified",
1186 			auth_xform->algo);
1187 		return -1;
1188 	}
1189 
1190 	return 0;
1191 }
1192 
1193 static int
1194 virtio_crypto_sym_pad_op_ctrl_req(
1195 		struct virtio_crypto_op_ctrl_req *ctrl,
1196 		struct rte_crypto_sym_xform *xform, bool is_chainned,
1197 		uint8_t *cipher_key_data, uint8_t *auth_key_data,
1198 		struct virtio_crypto_session *session)
1199 {
1200 	int ret;
1201 	struct rte_crypto_auth_xform *auth_xform = NULL;
1202 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
1203 
1204 	/* Get cipher xform from crypto xform chain */
1205 	cipher_xform = virtio_crypto_get_cipher_xform(xform);
1206 	if (cipher_xform) {
1207 		if (cipher_xform->key.length > VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1208 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1209 				"cipher key size cannot be longer than %u",
1210 				VIRTIO_CRYPTO_MAX_KEY_SIZE);
1211 			return -1;
1212 		}
1213 		if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1214 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1215 				"cipher IV size cannot be longer than %u",
1216 				VIRTIO_CRYPTO_MAX_IV_SIZE);
1217 			return -1;
1218 		}
1219 		if (is_chainned)
1220 			ret = virtio_crypto_sym_pad_cipher_param(
1221 				&ctrl->u.sym_create_session.u.chain.para
1222 						.cipher_param, cipher_xform);
1223 		else
1224 			ret = virtio_crypto_sym_pad_cipher_param(
1225 				&ctrl->u.sym_create_session.u.cipher.para,
1226 				cipher_xform);
1227 
1228 		if (ret < 0) {
1229 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1230 				"pad cipher parameter failed");
1231 			return -1;
1232 		}
1233 
1234 		memcpy(cipher_key_data, cipher_xform->key.data,
1235 				cipher_xform->key.length);
1236 
1237 		session->iv.offset = cipher_xform->iv.offset;
1238 		session->iv.length = cipher_xform->iv.length;
1239 	}
1240 
1241 	/* Get auth xform from crypto xform chain */
1242 	auth_xform = virtio_crypto_get_auth_xform(xform);
1243 	if (auth_xform) {
1244 		/* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1245 		struct virtio_crypto_alg_chain_session_para *para =
1246 			&(ctrl->u.sym_create_session.u.chain.para);
1247 		if (auth_xform->key.length) {
1248 			if (auth_xform->key.length >
1249 					VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1250 				VIRTIO_CRYPTO_SESSION_LOG_ERR(
1251 				"auth key size cannot be longer than %u",
1252 					VIRTIO_CRYPTO_MAX_KEY_SIZE);
1253 				return -1;
1254 			}
1255 			para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1256 			para->u.mac_param.auth_key_len =
1257 				(uint32_t)auth_xform->key.length;
1258 			para->u.mac_param.hash_result_len =
1259 				auth_xform->digest_length;
1260 			memcpy(auth_key_data, auth_xform->key.data,
1261 					auth_xform->key.length);
1262 		} else {
1263 			para->hash_mode	= VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1264 			para->u.hash_param.hash_result_len =
1265 				auth_xform->digest_length;
1266 		}
1267 
1268 		ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1269 		if (ret < 0) {
1270 			VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1271 						"failed");
1272 			return -1;
1273 		}
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 static int
1280 virtio_crypto_check_sym_configure_session_paras(
1281 		struct rte_cryptodev *dev,
1282 		struct rte_crypto_sym_xform *xform,
1283 		struct rte_cryptodev_sym_session *sym_sess)
1284 {
1285 	if (unlikely(xform == NULL) || unlikely(sym_sess == NULL)) {
1286 		VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1287 		return -1;
1288 	}
1289 
1290 	if (virtio_crypto_check_sym_session_paras(dev) < 0)
1291 		return -1;
1292 
1293 	return 0;
1294 }
1295 
1296 static int
1297 virtio_crypto_sym_configure_session(
1298 		struct rte_cryptodev *dev,
1299 		struct rte_crypto_sym_xform *xform,
1300 		struct rte_cryptodev_sym_session *sess)
1301 {
1302 	int ret;
1303 	struct virtio_crypto_session *session;
1304 	struct virtio_crypto_op_ctrl_req *ctrl_req;
1305 	enum virtio_crypto_cmd_id cmd_id;
1306 	uint8_t cipher_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1307 	uint8_t auth_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1308 	struct virtio_crypto_hw *hw;
1309 	struct virtqueue *control_vq;
1310 
1311 	PMD_INIT_FUNC_TRACE();
1312 
1313 	ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1314 			sess);
1315 	if (ret < 0) {
1316 		VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1317 		return ret;
1318 	}
1319 	session = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
1320 	memset(session, 0, sizeof(struct virtio_crypto_session));
1321 	ctrl_req = &session->ctrl;
1322 	ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1323 	/* FIXME: support multiqueue */
1324 	ctrl_req->header.queue_id = 0;
1325 
1326 	hw = dev->data->dev_private;
1327 	control_vq = hw->cvq;
1328 
1329 	cmd_id = virtio_crypto_get_chain_order(xform);
1330 	if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1331 		ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1332 			= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1333 	if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1334 		ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1335 			= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1336 
1337 	switch (cmd_id) {
1338 	case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1339 	case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1340 		ctrl_req->u.sym_create_session.op_type
1341 			= VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1342 
1343 		ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1344 			xform, true, cipher_key_data, auth_key_data, session);
1345 		if (ret < 0) {
1346 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1347 				"padding sym op ctrl req failed");
1348 			goto error_out;
1349 		}
1350 		ret = virtio_crypto_send_command(control_vq, ctrl_req,
1351 			cipher_key_data, auth_key_data, session);
1352 		if (ret < 0) {
1353 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1354 				"create session failed: %d", ret);
1355 			goto error_out;
1356 		}
1357 		break;
1358 	case VIRTIO_CRYPTO_CMD_CIPHER:
1359 		ctrl_req->u.sym_create_session.op_type
1360 			= VIRTIO_CRYPTO_SYM_OP_CIPHER;
1361 		ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1362 			false, cipher_key_data, auth_key_data, session);
1363 		if (ret < 0) {
1364 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1365 				"padding sym op ctrl req failed");
1366 			goto error_out;
1367 		}
1368 		ret = virtio_crypto_send_command(control_vq, ctrl_req,
1369 			cipher_key_data, NULL, session);
1370 		if (ret < 0) {
1371 			VIRTIO_CRYPTO_SESSION_LOG_ERR(
1372 				"create session failed: %d", ret);
1373 			goto error_out;
1374 		}
1375 		break;
1376 	default:
1377 		VIRTIO_CRYPTO_SESSION_LOG_ERR(
1378 			"Unsupported operation chain order parameter");
1379 		goto error_out;
1380 	}
1381 	return 0;
1382 
1383 error_out:
1384 	return -1;
1385 }
1386 
1387 static void
1388 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1389 		struct rte_cryptodev_info *info)
1390 {
1391 	struct virtio_crypto_hw *hw = dev->data->dev_private;
1392 
1393 	PMD_INIT_FUNC_TRACE();
1394 
1395 	if (info != NULL) {
1396 		info->driver_id = cryptodev_virtio_driver_id;
1397 		info->feature_flags = dev->feature_flags;
1398 		info->max_nb_queue_pairs = hw->max_dataqueues;
1399 		/* No limit of number of sessions */
1400 		info->sym.max_nb_sessions = 0;
1401 		info->capabilities = hw->virtio_dev_capabilities;
1402 	}
1403 }
1404 
1405 static int
1406 crypto_virtio_pci_probe(
1407 	struct rte_pci_driver *pci_drv __rte_unused,
1408 	struct rte_pci_device *pci_dev)
1409 {
1410 	struct rte_cryptodev_pmd_init_params init_params = {
1411 		.name = "",
1412 		.socket_id = pci_dev->device.numa_node,
1413 		.private_data_size = sizeof(struct virtio_crypto_hw)
1414 	};
1415 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1416 
1417 	VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1418 			pci_dev->addr.bus,
1419 			pci_dev->addr.devid,
1420 			pci_dev->addr.function);
1421 
1422 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1423 
1424 	return crypto_virtio_create(name, pci_dev, &init_params);
1425 }
1426 
1427 static int
1428 crypto_virtio_pci_remove(
1429 	struct rte_pci_device *pci_dev __rte_unused)
1430 {
1431 	struct rte_cryptodev *cryptodev;
1432 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1433 
1434 	if (pci_dev == NULL)
1435 		return -EINVAL;
1436 
1437 	rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1438 			sizeof(cryptodev_name));
1439 
1440 	cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1441 	if (cryptodev == NULL)
1442 		return -ENODEV;
1443 
1444 	return virtio_crypto_dev_uninit(cryptodev);
1445 }
1446 
1447 static struct rte_pci_driver rte_virtio_crypto_driver = {
1448 	.id_table = pci_id_virtio_crypto_map,
1449 	.drv_flags = 0,
1450 	.probe = crypto_virtio_pci_probe,
1451 	.remove = crypto_virtio_pci_remove
1452 };
1453 
1454 static struct cryptodev_driver virtio_crypto_drv;
1455 
1456 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1457 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1458 	rte_virtio_crypto_driver.driver,
1459 	cryptodev_virtio_driver_id);
1460 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_init, init, NOTICE);
1461 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_session, session, NOTICE);
1462 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_rx, rx, NOTICE);
1463 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_tx, tx, NOTICE);
1464 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_driver, driver, NOTICE);
1465