xref: /dpdk/drivers/crypto/virtio/virtio_rxtx.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <cryptodev_pmd.h>
5 
6 #include "virtqueue.h"
7 #include "virtio_cryptodev.h"
8 #include "virtio_crypto_algs.h"
9 
10 static void
11 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
12 {
13 	struct vring_desc *dp, *dp_tail;
14 	struct vq_desc_extra *dxp;
15 	uint16_t desc_idx_last = desc_idx;
16 
17 	dp = &vq->vq_ring.desc[desc_idx];
18 	dxp = &vq->vq_descx[desc_idx];
19 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
20 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
21 		while (dp->flags & VRING_DESC_F_NEXT) {
22 			desc_idx_last = dp->next;
23 			dp = &vq->vq_ring.desc[dp->next];
24 		}
25 	}
26 	dxp->ndescs = 0;
27 
28 	/*
29 	 * We must append the existing free chain, if any, to the end of
30 	 * newly freed chain. If the virtqueue was completely used, then
31 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
32 	 */
33 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
34 		vq->vq_desc_head_idx = desc_idx;
35 	} else {
36 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
37 		dp_tail->next = desc_idx;
38 	}
39 
40 	vq->vq_desc_tail_idx = desc_idx_last;
41 	dp->next = VQ_RING_DESC_CHAIN_END;
42 }
43 
44 static uint16_t
45 virtqueue_dequeue_burst_rx(struct virtqueue *vq,
46 		struct rte_crypto_op **rx_pkts, uint16_t num)
47 {
48 	struct vring_used_elem *uep;
49 	struct rte_crypto_op *cop;
50 	uint16_t used_idx, desc_idx;
51 	uint16_t i;
52 	struct virtio_crypto_inhdr *inhdr;
53 	struct virtio_crypto_op_cookie *op_cookie;
54 
55 	/* Caller does the check */
56 	for (i = 0; i < num ; i++) {
57 		used_idx = (uint16_t)(vq->vq_used_cons_idx
58 				& (vq->vq_nentries - 1));
59 		uep = &vq->vq_ring.used->ring[used_idx];
60 		desc_idx = (uint16_t)uep->id;
61 		cop = (struct rte_crypto_op *)
62 				vq->vq_descx[desc_idx].crypto_op;
63 		if (unlikely(cop == NULL)) {
64 			VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
65 					"mbuf cookie at %u",
66 					vq->vq_used_cons_idx);
67 			break;
68 		}
69 
70 		op_cookie = (struct virtio_crypto_op_cookie *)
71 						vq->vq_descx[desc_idx].cookie;
72 		inhdr = &(op_cookie->inhdr);
73 		switch (inhdr->status) {
74 		case VIRTIO_CRYPTO_OK:
75 			cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
76 			break;
77 		case VIRTIO_CRYPTO_ERR:
78 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
79 			vq->packets_received_failed++;
80 			break;
81 		case VIRTIO_CRYPTO_BADMSG:
82 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
83 			vq->packets_received_failed++;
84 			break;
85 		case VIRTIO_CRYPTO_NOTSUPP:
86 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
87 			vq->packets_received_failed++;
88 			break;
89 		case VIRTIO_CRYPTO_INVSESS:
90 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
91 			vq->packets_received_failed++;
92 			break;
93 		default:
94 			break;
95 		}
96 
97 		vq->packets_received_total++;
98 
99 		rx_pkts[i] = cop;
100 		rte_mempool_put(vq->mpool, op_cookie);
101 
102 		vq->vq_used_cons_idx++;
103 		vq_ring_free_chain(vq, desc_idx);
104 		vq->vq_descx[desc_idx].crypto_op = NULL;
105 	}
106 
107 	return i;
108 }
109 
110 static int
111 virtqueue_crypto_sym_pkt_header_arrange(
112 		struct rte_crypto_op *cop,
113 		struct virtio_crypto_op_data_req *data,
114 		struct virtio_crypto_session *session)
115 {
116 	struct rte_crypto_sym_op *sym_op = cop->sym;
117 	struct virtio_crypto_op_data_req *req_data = data;
118 	struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
119 	struct virtio_crypto_sym_create_session_req *sym_sess_req =
120 		&ctrl->u.sym_create_session;
121 	struct virtio_crypto_alg_chain_session_para *chain_para =
122 		&sym_sess_req->u.chain.para;
123 	struct virtio_crypto_cipher_session_para *cipher_para;
124 
125 	req_data->header.session_id = session->session_id;
126 
127 	switch (sym_sess_req->op_type) {
128 	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
129 		req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
130 
131 		cipher_para = &sym_sess_req->u.cipher.para;
132 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
133 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
134 		else
135 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
136 
137 		req_data->u.sym_req.u.cipher.para.iv_len
138 			= session->iv.length;
139 
140 		req_data->u.sym_req.u.cipher.para.src_data_len =
141 			(sym_op->cipher.data.length +
142 				sym_op->cipher.data.offset);
143 		req_data->u.sym_req.u.cipher.para.dst_data_len =
144 			req_data->u.sym_req.u.cipher.para.src_data_len;
145 		break;
146 	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
147 		req_data->u.sym_req.op_type =
148 			VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
149 
150 		cipher_para = &chain_para->cipher_param;
151 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
152 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
153 		else
154 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
155 
156 		req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
157 		req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
158 
159 		req_data->u.sym_req.u.chain.para.src_data_len =
160 			(sym_op->cipher.data.length +
161 				sym_op->cipher.data.offset);
162 		req_data->u.sym_req.u.chain.para.dst_data_len =
163 			req_data->u.sym_req.u.chain.para.src_data_len;
164 		req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
165 			sym_op->cipher.data.offset;
166 		req_data->u.sym_req.u.chain.para.len_to_cipher =
167 			sym_op->cipher.data.length;
168 		req_data->u.sym_req.u.chain.para.hash_start_src_offset =
169 			sym_op->auth.data.offset;
170 		req_data->u.sym_req.u.chain.para.len_to_hash =
171 			sym_op->auth.data.length;
172 		req_data->u.sym_req.u.chain.para.aad_len =
173 			chain_para->aad_len;
174 
175 		if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
176 			req_data->u.sym_req.u.chain.para.hash_result_len =
177 				chain_para->u.hash_param.hash_result_len;
178 		if (chain_para->hash_mode ==
179 			VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
180 			req_data->u.sym_req.u.chain.para.hash_result_len =
181 				chain_para->u.mac_param.hash_result_len;
182 		break;
183 	default:
184 		return -1;
185 	}
186 
187 	return 0;
188 }
189 
190 static int
191 virtqueue_crypto_sym_enqueue_xmit(
192 		struct virtqueue *txvq,
193 		struct rte_crypto_op *cop)
194 {
195 	uint16_t idx = 0;
196 	uint16_t num_entry;
197 	uint16_t needed = 1;
198 	uint16_t head_idx;
199 	struct vq_desc_extra *dxp;
200 	struct vring_desc *start_dp;
201 	struct vring_desc *desc;
202 	uint64_t indirect_op_data_req_phys_addr;
203 	uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
204 	uint32_t indirect_vring_addr_offset = req_data_len +
205 		sizeof(struct virtio_crypto_inhdr);
206 	uint32_t indirect_iv_addr_offset =
207 			offsetof(struct virtio_crypto_op_cookie, iv);
208 	struct rte_crypto_sym_op *sym_op = cop->sym;
209 	struct virtio_crypto_session *session =
210 		(struct virtio_crypto_session *)get_sym_session_private_data(
211 		cop->sym->session, cryptodev_virtio_driver_id);
212 	struct virtio_crypto_op_data_req *op_data_req;
213 	uint32_t hash_result_len = 0;
214 	struct virtio_crypto_op_cookie *crypto_op_cookie;
215 	struct virtio_crypto_alg_chain_session_para *para;
216 
217 	if (unlikely(sym_op->m_src->nb_segs != 1))
218 		return -EMSGSIZE;
219 	if (unlikely(txvq->vq_free_cnt == 0))
220 		return -ENOSPC;
221 	if (unlikely(txvq->vq_free_cnt < needed))
222 		return -EMSGSIZE;
223 	head_idx = txvq->vq_desc_head_idx;
224 	if (unlikely(head_idx >= txvq->vq_nentries))
225 		return -EFAULT;
226 	if (unlikely(session == NULL))
227 		return -EFAULT;
228 
229 	dxp = &txvq->vq_descx[head_idx];
230 
231 	if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
232 		VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
233 		return -EFAULT;
234 	}
235 	crypto_op_cookie = dxp->cookie;
236 	indirect_op_data_req_phys_addr =
237 		rte_mempool_virt2iova(crypto_op_cookie);
238 	op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
239 
240 	if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
241 		return -EFAULT;
242 
243 	/* status is initialized to VIRTIO_CRYPTO_ERR */
244 	((struct virtio_crypto_inhdr *)
245 		((uint8_t *)op_data_req + req_data_len))->status =
246 		VIRTIO_CRYPTO_ERR;
247 
248 	/* point to indirect vring entry */
249 	desc = (struct vring_desc *)
250 		((uint8_t *)op_data_req + indirect_vring_addr_offset);
251 	for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
252 		desc[idx].next = idx + 1;
253 	desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
254 
255 	idx = 0;
256 
257 	/* indirect vring: first part, virtio_crypto_op_data_req */
258 	desc[idx].addr = indirect_op_data_req_phys_addr;
259 	desc[idx].len = req_data_len;
260 	desc[idx++].flags = VRING_DESC_F_NEXT;
261 
262 	/* indirect vring: iv of cipher */
263 	if (session->iv.length) {
264 		if (cop->phys_addr)
265 			desc[idx].addr = cop->phys_addr + session->iv.offset;
266 		else {
267 			if (session->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE)
268 				return -ENOMEM;
269 
270 			rte_memcpy(crypto_op_cookie->iv,
271 					rte_crypto_op_ctod_offset(cop,
272 					uint8_t *, session->iv.offset),
273 					session->iv.length);
274 			desc[idx].addr = indirect_op_data_req_phys_addr +
275 				indirect_iv_addr_offset;
276 		}
277 
278 		desc[idx].len = session->iv.length;
279 		desc[idx++].flags = VRING_DESC_F_NEXT;
280 	}
281 
282 	/* indirect vring: additional auth data */
283 	if (session->aad.length) {
284 		desc[idx].addr = session->aad.phys_addr;
285 		desc[idx].len = session->aad.length;
286 		desc[idx++].flags = VRING_DESC_F_NEXT;
287 	}
288 
289 	/* indirect vring: src data */
290 	desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
291 	desc[idx].len = (sym_op->cipher.data.offset
292 		+ sym_op->cipher.data.length);
293 	desc[idx++].flags = VRING_DESC_F_NEXT;
294 
295 	/* indirect vring: dst data */
296 	if (sym_op->m_dst) {
297 		desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
298 		desc[idx].len = (sym_op->cipher.data.offset
299 			+ sym_op->cipher.data.length);
300 	} else {
301 		desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
302 		desc[idx].len = (sym_op->cipher.data.offset
303 			+ sym_op->cipher.data.length);
304 	}
305 	desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
306 
307 	/* indirect vring: digest result */
308 	para = &(session->ctrl.u.sym_create_session.u.chain.para);
309 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
310 		hash_result_len = para->u.hash_param.hash_result_len;
311 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
312 		hash_result_len = para->u.mac_param.hash_result_len;
313 	if (hash_result_len > 0) {
314 		desc[idx].addr = sym_op->auth.digest.phys_addr;
315 		desc[idx].len = hash_result_len;
316 		desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
317 	}
318 
319 	/* indirect vring: last part, status returned */
320 	desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
321 	desc[idx].len = sizeof(struct virtio_crypto_inhdr);
322 	desc[idx++].flags = VRING_DESC_F_WRITE;
323 
324 	num_entry = idx;
325 
326 	/* save the infos to use when receiving packets */
327 	dxp->crypto_op = (void *)cop;
328 	dxp->ndescs = needed;
329 
330 	/* use a single buffer */
331 	start_dp = txvq->vq_ring.desc;
332 	start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
333 		indirect_vring_addr_offset;
334 	start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
335 	start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
336 
337 	idx = start_dp[head_idx].next;
338 	txvq->vq_desc_head_idx = idx;
339 	if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
340 		txvq->vq_desc_tail_idx = idx;
341 	txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
342 	vq_update_avail_ring(txvq, head_idx);
343 
344 	return 0;
345 }
346 
347 static int
348 virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
349 		struct rte_crypto_op *cop)
350 {
351 	int ret;
352 
353 	switch (cop->type) {
354 	case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
355 		ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
356 		break;
357 	default:
358 		VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
359 				cop->type);
360 		ret = -EFAULT;
361 		break;
362 	}
363 
364 	return ret;
365 }
366 
367 static int
368 virtio_crypto_vring_start(struct virtqueue *vq)
369 {
370 	struct virtio_crypto_hw *hw = vq->hw;
371 	int i, size = vq->vq_nentries;
372 	struct vring *vr = &vq->vq_ring;
373 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
374 
375 	PMD_INIT_FUNC_TRACE();
376 
377 	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
378 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
379 	vq->vq_free_cnt = vq->vq_nentries;
380 
381 	/* Chain all the descriptors in the ring with an END */
382 	for (i = 0; i < size - 1; i++)
383 		vr->desc[i].next = (uint16_t)(i + 1);
384 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
385 
386 	/*
387 	 * Disable device(host) interrupting guest
388 	 */
389 	virtqueue_disable_intr(vq);
390 
391 	/*
392 	 * Set guest physical address of the virtqueue
393 	 * in VIRTIO_PCI_QUEUE_PFN config register of device
394 	 * to share with the backend
395 	 */
396 	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
397 		VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 }
403 
404 void
405 virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
406 {
407 	struct virtio_crypto_hw *hw = dev->data->dev_private;
408 
409 	if (hw->cvq) {
410 		virtio_crypto_vring_start(hw->cvq);
411 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
412 	}
413 }
414 
415 void
416 virtio_crypto_dataq_start(struct rte_cryptodev *dev)
417 {
418 	/*
419 	 * Start data vrings
420 	 * -	Setup vring structure for data queues
421 	 */
422 	uint16_t i;
423 	struct virtio_crypto_hw *hw = dev->data->dev_private;
424 
425 	PMD_INIT_FUNC_TRACE();
426 
427 	/* Start data vring. */
428 	for (i = 0; i < hw->max_dataqueues; i++) {
429 		virtio_crypto_vring_start(dev->data->queue_pairs[i]);
430 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
431 	}
432 }
433 
434 /* vring size of data queue is 1024 */
435 #define VIRTIO_MBUF_BURST_SZ 1024
436 
437 uint16_t
438 virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
439 		uint16_t nb_pkts)
440 {
441 	struct virtqueue *txvq = tx_queue;
442 	uint16_t nb_used, num, nb_rx;
443 
444 	nb_used = VIRTQUEUE_NUSED(txvq);
445 
446 	virtio_rmb();
447 
448 	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
449 	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
450 		? num : VIRTIO_MBUF_BURST_SZ);
451 
452 	if (num == 0)
453 		return 0;
454 
455 	nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
456 	VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
457 
458 	return nb_rx;
459 }
460 
461 uint16_t
462 virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
463 		uint16_t nb_pkts)
464 {
465 	struct virtqueue *txvq;
466 	uint16_t nb_tx;
467 	int error;
468 
469 	if (unlikely(nb_pkts < 1))
470 		return nb_pkts;
471 	if (unlikely(tx_queue == NULL)) {
472 		VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
473 		return 0;
474 	}
475 	txvq = tx_queue;
476 
477 	VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
478 
479 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
480 		struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
481 		/* nb_segs is always 1 at virtio crypto situation */
482 		int need = txm->nb_segs - txvq->vq_free_cnt;
483 
484 		/*
485 		 * Positive value indicates it hasn't enough space in vring
486 		 * descriptors
487 		 */
488 		if (unlikely(need > 0)) {
489 			/*
490 			 * try it again because the receive process may be
491 			 * free some space
492 			 */
493 			need = txm->nb_segs - txvq->vq_free_cnt;
494 			if (unlikely(need > 0)) {
495 				VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
496 					"descriptors to transmit");
497 				break;
498 			}
499 		}
500 
501 		txvq->packets_sent_total++;
502 
503 		/* Enqueue Packet buffers */
504 		error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
505 		if (unlikely(error)) {
506 			if (error == ENOSPC)
507 				VIRTIO_CRYPTO_TX_LOG_ERR(
508 					"virtqueue_enqueue Free count = 0");
509 			else if (error == EMSGSIZE)
510 				VIRTIO_CRYPTO_TX_LOG_ERR(
511 					"virtqueue_enqueue Free count < 1");
512 			else
513 				VIRTIO_CRYPTO_TX_LOG_ERR(
514 					"virtqueue_enqueue error: %d", error);
515 			txvq->packets_sent_failed++;
516 			break;
517 		}
518 	}
519 
520 	if (likely(nb_tx)) {
521 		vq_update_avail_idx(txvq);
522 
523 		if (unlikely(virtqueue_kick_prepare(txvq))) {
524 			virtqueue_notify(txvq);
525 			VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
526 		}
527 	}
528 
529 	return nb_tx;
530 }
531