xref: /dpdk/drivers/crypto/virtio/virtio_rxtx.c (revision 3b8bcfcd96e64d199392550928be7c7665571bcb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <rte_cryptodev_pmd.h>
5 
6 #include "virtqueue.h"
7 #include "virtio_cryptodev.h"
8 #include "virtio_crypto_algs.h"
9 
10 static void
11 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
12 {
13 	struct vring_desc *dp, *dp_tail;
14 	struct vq_desc_extra *dxp;
15 	uint16_t desc_idx_last = desc_idx;
16 
17 	dp = &vq->vq_ring.desc[desc_idx];
18 	dxp = &vq->vq_descx[desc_idx];
19 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
20 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
21 		while (dp->flags & VRING_DESC_F_NEXT) {
22 			desc_idx_last = dp->next;
23 			dp = &vq->vq_ring.desc[dp->next];
24 		}
25 	}
26 	dxp->ndescs = 0;
27 
28 	/*
29 	 * We must append the existing free chain, if any, to the end of
30 	 * newly freed chain. If the virtqueue was completely used, then
31 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
32 	 */
33 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
34 		vq->vq_desc_head_idx = desc_idx;
35 	} else {
36 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
37 		dp_tail->next = desc_idx;
38 	}
39 
40 	vq->vq_desc_tail_idx = desc_idx_last;
41 	dp->next = VQ_RING_DESC_CHAIN_END;
42 }
43 
44 static uint16_t
45 virtqueue_dequeue_burst_rx(struct virtqueue *vq,
46 		struct rte_crypto_op **rx_pkts, uint16_t num)
47 {
48 	struct vring_used_elem *uep;
49 	struct rte_crypto_op *cop;
50 	uint16_t used_idx, desc_idx;
51 	uint16_t i;
52 	struct virtio_crypto_inhdr *inhdr;
53 	struct virtio_crypto_op_cookie *op_cookie;
54 
55 	/* Caller does the check */
56 	for (i = 0; i < num ; i++) {
57 		used_idx = (uint16_t)(vq->vq_used_cons_idx
58 				& (vq->vq_nentries - 1));
59 		uep = &vq->vq_ring.used->ring[used_idx];
60 		desc_idx = (uint16_t)uep->id;
61 		cop = (struct rte_crypto_op *)
62 				vq->vq_descx[desc_idx].crypto_op;
63 		if (unlikely(cop == NULL)) {
64 			VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
65 					"mbuf cookie at %u",
66 					vq->vq_used_cons_idx);
67 			break;
68 		}
69 
70 		op_cookie = (struct virtio_crypto_op_cookie *)
71 						vq->vq_descx[desc_idx].cookie;
72 		inhdr = &(op_cookie->inhdr);
73 		switch (inhdr->status) {
74 		case VIRTIO_CRYPTO_OK:
75 			cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
76 			break;
77 		case VIRTIO_CRYPTO_ERR:
78 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
79 			vq->packets_received_failed++;
80 			break;
81 		case VIRTIO_CRYPTO_BADMSG:
82 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
83 			vq->packets_received_failed++;
84 			break;
85 		case VIRTIO_CRYPTO_NOTSUPP:
86 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
87 			vq->packets_received_failed++;
88 			break;
89 		case VIRTIO_CRYPTO_INVSESS:
90 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
91 			vq->packets_received_failed++;
92 			break;
93 		default:
94 			break;
95 		}
96 
97 		vq->packets_received_total++;
98 
99 		rx_pkts[i] = cop;
100 		rte_mempool_put(vq->mpool, op_cookie);
101 
102 		vq->vq_used_cons_idx++;
103 		vq_ring_free_chain(vq, desc_idx);
104 		vq->vq_descx[desc_idx].crypto_op = NULL;
105 	}
106 
107 	return i;
108 }
109 
110 static int
111 virtqueue_crypto_sym_pkt_header_arrange(
112 		struct rte_crypto_op *cop,
113 		struct virtio_crypto_op_data_req *data,
114 		struct virtio_crypto_session *session)
115 {
116 	struct rte_crypto_sym_op *sym_op = cop->sym;
117 	struct virtio_crypto_op_data_req *req_data = data;
118 	struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
119 	struct virtio_crypto_sym_create_session_req *sym_sess_req =
120 		&ctrl->u.sym_create_session;
121 	struct virtio_crypto_alg_chain_session_para *chain_para =
122 		&sym_sess_req->u.chain.para;
123 	struct virtio_crypto_cipher_session_para *cipher_para;
124 
125 	req_data->header.session_id = session->session_id;
126 
127 	switch (sym_sess_req->op_type) {
128 	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
129 		req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
130 
131 		cipher_para = &sym_sess_req->u.cipher.para;
132 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
133 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
134 		else
135 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
136 
137 		req_data->u.sym_req.u.cipher.para.iv_len
138 			= session->iv.length;
139 
140 		req_data->u.sym_req.u.cipher.para.src_data_len =
141 			(sym_op->cipher.data.length +
142 				sym_op->cipher.data.offset);
143 		req_data->u.sym_req.u.cipher.para.dst_data_len =
144 			req_data->u.sym_req.u.cipher.para.src_data_len;
145 		break;
146 	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
147 		req_data->u.sym_req.op_type =
148 			VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
149 
150 		cipher_para = &chain_para->cipher_param;
151 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
152 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
153 		else
154 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
155 
156 		req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
157 		req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
158 
159 		req_data->u.sym_req.u.chain.para.src_data_len =
160 			(sym_op->cipher.data.length +
161 				sym_op->cipher.data.offset);
162 		req_data->u.sym_req.u.chain.para.dst_data_len =
163 			req_data->u.sym_req.u.chain.para.src_data_len;
164 		req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
165 			sym_op->cipher.data.offset;
166 		req_data->u.sym_req.u.chain.para.len_to_cipher =
167 			sym_op->cipher.data.length;
168 		req_data->u.sym_req.u.chain.para.hash_start_src_offset =
169 			sym_op->auth.data.offset;
170 		req_data->u.sym_req.u.chain.para.len_to_hash =
171 			sym_op->auth.data.length;
172 		req_data->u.sym_req.u.chain.para.aad_len =
173 			chain_para->aad_len;
174 
175 		if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
176 			req_data->u.sym_req.u.chain.para.hash_result_len =
177 				chain_para->u.hash_param.hash_result_len;
178 		if (chain_para->hash_mode ==
179 			VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
180 			req_data->u.sym_req.u.chain.para.hash_result_len =
181 				chain_para->u.mac_param.hash_result_len;
182 		break;
183 	default:
184 		return -1;
185 	}
186 
187 	return 0;
188 }
189 
190 static int
191 virtqueue_crypto_sym_enqueue_xmit(
192 		struct virtqueue *txvq,
193 		struct rte_crypto_op *cop)
194 {
195 	uint16_t idx = 0;
196 	uint16_t num_entry;
197 	uint16_t needed = 1;
198 	uint16_t head_idx;
199 	struct vq_desc_extra *dxp;
200 	struct vring_desc *start_dp;
201 	struct vring_desc *desc;
202 	uint64_t indirect_op_data_req_phys_addr;
203 	uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
204 	uint32_t indirect_vring_addr_offset = req_data_len +
205 		sizeof(struct virtio_crypto_inhdr);
206 	struct rte_crypto_sym_op *sym_op = cop->sym;
207 	struct virtio_crypto_session *session =
208 		(struct virtio_crypto_session *)get_session_private_data(
209 		cop->sym->session, cryptodev_virtio_driver_id);
210 	struct virtio_crypto_op_data_req *op_data_req;
211 	uint32_t hash_result_len = 0;
212 	struct virtio_crypto_op_cookie *crypto_op_cookie;
213 	struct virtio_crypto_alg_chain_session_para *para;
214 
215 	if (unlikely(sym_op->m_src->nb_segs != 1))
216 		return -EMSGSIZE;
217 	if (unlikely(txvq->vq_free_cnt == 0))
218 		return -ENOSPC;
219 	if (unlikely(txvq->vq_free_cnt < needed))
220 		return -EMSGSIZE;
221 	head_idx = txvq->vq_desc_head_idx;
222 	if (unlikely(head_idx >= txvq->vq_nentries))
223 		return -EFAULT;
224 	if (unlikely(session == NULL))
225 		return -EFAULT;
226 
227 	dxp = &txvq->vq_descx[head_idx];
228 
229 	if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
230 		VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
231 		return -EFAULT;
232 	}
233 	crypto_op_cookie = dxp->cookie;
234 	indirect_op_data_req_phys_addr =
235 		rte_mempool_virt2iova(crypto_op_cookie);
236 	op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
237 
238 	if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
239 		return -EFAULT;
240 
241 	/* status is initialized to VIRTIO_CRYPTO_ERR */
242 	((struct virtio_crypto_inhdr *)
243 		((uint8_t *)op_data_req + req_data_len))->status =
244 		VIRTIO_CRYPTO_ERR;
245 
246 	/* point to indirect vring entry */
247 	desc = (struct vring_desc *)
248 		((uint8_t *)op_data_req + indirect_vring_addr_offset);
249 	for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
250 		desc[idx].next = idx + 1;
251 	desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
252 
253 	idx = 0;
254 
255 	/* indirect vring: first part, virtio_crypto_op_data_req */
256 	desc[idx].addr = indirect_op_data_req_phys_addr;
257 	desc[idx].len = req_data_len;
258 	desc[idx++].flags = VRING_DESC_F_NEXT;
259 
260 	/* indirect vring: iv of cipher */
261 	if (session->iv.length) {
262 		desc[idx].addr = cop->phys_addr + session->iv.offset;
263 		desc[idx].len = session->iv.length;
264 		desc[idx++].flags = VRING_DESC_F_NEXT;
265 	}
266 
267 	/* indirect vring: additional auth data */
268 	if (session->aad.length) {
269 		desc[idx].addr = session->aad.phys_addr;
270 		desc[idx].len = session->aad.length;
271 		desc[idx++].flags = VRING_DESC_F_NEXT;
272 	}
273 
274 	/* indirect vring: src data */
275 	desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
276 	desc[idx].len = (sym_op->cipher.data.offset
277 		+ sym_op->cipher.data.length);
278 	desc[idx++].flags = VRING_DESC_F_NEXT;
279 
280 	/* indirect vring: dst data */
281 	if (sym_op->m_dst) {
282 		desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
283 		desc[idx].len = (sym_op->cipher.data.offset
284 			+ sym_op->cipher.data.length);
285 	} else {
286 		desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
287 		desc[idx].len = (sym_op->cipher.data.offset
288 			+ sym_op->cipher.data.length);
289 	}
290 	desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
291 
292 	/* indirect vring: digest result */
293 	para = &(session->ctrl.u.sym_create_session.u.chain.para);
294 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
295 		hash_result_len = para->u.hash_param.hash_result_len;
296 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
297 		hash_result_len = para->u.mac_param.hash_result_len;
298 	if (hash_result_len > 0) {
299 		desc[idx].addr = sym_op->auth.digest.phys_addr;
300 		desc[idx].len = hash_result_len;
301 		desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
302 	}
303 
304 	/* indirect vring: last part, status returned */
305 	desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
306 	desc[idx].len = sizeof(struct virtio_crypto_inhdr);
307 	desc[idx++].flags = VRING_DESC_F_WRITE;
308 
309 	num_entry = idx;
310 
311 	/* save the infos to use when receiving packets */
312 	dxp->crypto_op = (void *)cop;
313 	dxp->ndescs = needed;
314 
315 	/* use a single buffer */
316 	start_dp = txvq->vq_ring.desc;
317 	start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
318 		indirect_vring_addr_offset;
319 	start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
320 	start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
321 
322 	idx = start_dp[head_idx].next;
323 	txvq->vq_desc_head_idx = idx;
324 	if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
325 		txvq->vq_desc_tail_idx = idx;
326 	txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
327 	vq_update_avail_ring(txvq, head_idx);
328 
329 	return 0;
330 }
331 
332 static int
333 virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
334 		struct rte_crypto_op *cop)
335 {
336 	int ret;
337 
338 	switch (cop->type) {
339 	case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
340 		ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
341 		break;
342 	default:
343 		VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
344 				cop->type);
345 		ret = -EFAULT;
346 		break;
347 	}
348 
349 	return ret;
350 }
351 
352 static int
353 virtio_crypto_vring_start(struct virtqueue *vq)
354 {
355 	struct virtio_crypto_hw *hw = vq->hw;
356 	int i, size = vq->vq_nentries;
357 	struct vring *vr = &vq->vq_ring;
358 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
359 
360 	PMD_INIT_FUNC_TRACE();
361 
362 	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
363 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
364 	vq->vq_free_cnt = vq->vq_nentries;
365 
366 	/* Chain all the descriptors in the ring with an END */
367 	for (i = 0; i < size - 1; i++)
368 		vr->desc[i].next = (uint16_t)(i + 1);
369 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
370 
371 	/*
372 	 * Disable device(host) interrupting guest
373 	 */
374 	virtqueue_disable_intr(vq);
375 
376 	/*
377 	 * Set guest physical address of the virtqueue
378 	 * in VIRTIO_PCI_QUEUE_PFN config register of device
379 	 * to share with the backend
380 	 */
381 	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
382 		VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
383 		return -EINVAL;
384 	}
385 
386 	return 0;
387 }
388 
389 void
390 virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
391 {
392 	struct virtio_crypto_hw *hw = dev->data->dev_private;
393 
394 	if (hw->cvq) {
395 		virtio_crypto_vring_start(hw->cvq);
396 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
397 	}
398 }
399 
400 void
401 virtio_crypto_dataq_start(struct rte_cryptodev *dev)
402 {
403 	/*
404 	 * Start data vrings
405 	 * -	Setup vring structure for data queues
406 	 */
407 	uint16_t i;
408 	struct virtio_crypto_hw *hw = dev->data->dev_private;
409 
410 	PMD_INIT_FUNC_TRACE();
411 
412 	/* Start data vring. */
413 	for (i = 0; i < hw->max_dataqueues; i++) {
414 		virtio_crypto_vring_start(dev->data->queue_pairs[i]);
415 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
416 	}
417 }
418 
419 /* vring size of data queue is 1024 */
420 #define VIRTIO_MBUF_BURST_SZ 1024
421 
422 uint16_t
423 virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
424 		uint16_t nb_pkts)
425 {
426 	struct virtqueue *txvq = tx_queue;
427 	uint16_t nb_used, num, nb_rx;
428 
429 	nb_used = VIRTQUEUE_NUSED(txvq);
430 
431 	virtio_rmb();
432 
433 	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
434 	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
435 		? num : VIRTIO_MBUF_BURST_SZ);
436 
437 	if (num == 0)
438 		return 0;
439 
440 	nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
441 	VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
442 
443 	return nb_rx;
444 }
445 
446 uint16_t
447 virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
448 		uint16_t nb_pkts)
449 {
450 	struct virtqueue *txvq;
451 	uint16_t nb_tx;
452 	int error;
453 
454 	if (unlikely(nb_pkts < 1))
455 		return nb_pkts;
456 	if (unlikely(tx_queue == NULL)) {
457 		VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
458 		return 0;
459 	}
460 	txvq = tx_queue;
461 
462 	VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
463 
464 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
465 		struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
466 		/* nb_segs is always 1 at virtio crypto situation */
467 		int need = txm->nb_segs - txvq->vq_free_cnt;
468 
469 		/*
470 		 * Positive value indicates it hasn't enough space in vring
471 		 * descriptors
472 		 */
473 		if (unlikely(need > 0)) {
474 			/*
475 			 * try it again because the receive process may be
476 			 * free some space
477 			 */
478 			need = txm->nb_segs - txvq->vq_free_cnt;
479 			if (unlikely(need > 0)) {
480 				VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
481 					"descriptors to transmit");
482 				break;
483 			}
484 		}
485 
486 		txvq->packets_sent_total++;
487 
488 		/* Enqueue Packet buffers */
489 		error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
490 		if (unlikely(error)) {
491 			if (error == ENOSPC)
492 				VIRTIO_CRYPTO_TX_LOG_ERR(
493 					"virtqueue_enqueue Free count = 0");
494 			else if (error == EMSGSIZE)
495 				VIRTIO_CRYPTO_TX_LOG_ERR(
496 					"virtqueue_enqueue Free count < 1");
497 			else
498 				VIRTIO_CRYPTO_TX_LOG_ERR(
499 					"virtqueue_enqueue error: %d", error);
500 			txvq->packets_sent_failed++;
501 			break;
502 		}
503 	}
504 
505 	if (likely(nb_tx)) {
506 		vq_update_avail_idx(txvq);
507 
508 		if (unlikely(virtqueue_kick_prepare(txvq))) {
509 			virtqueue_notify(txvq);
510 			VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
511 		}
512 	}
513 
514 	return nb_tx;
515 }
516