xref: /dpdk/drivers/crypto/virtio/virtio_rxtx.c (revision 2a440d6ab362de705d99c6740b27a3e0755a87f4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <cryptodev_pmd.h>
5 
6 #include "virtqueue.h"
7 #include "virtio_cryptodev.h"
8 #include "virtio_crypto_algs.h"
9 
10 static void
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)11 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
12 {
13 	struct vring_desc *dp, *dp_tail;
14 	struct vq_desc_extra *dxp;
15 	uint16_t desc_idx_last = desc_idx;
16 
17 	dp = &vq->vq_ring.desc[desc_idx];
18 	dxp = &vq->vq_descx[desc_idx];
19 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
20 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
21 		while (dp->flags & VRING_DESC_F_NEXT) {
22 			desc_idx_last = dp->next;
23 			dp = &vq->vq_ring.desc[dp->next];
24 		}
25 	}
26 	dxp->ndescs = 0;
27 
28 	/*
29 	 * We must append the existing free chain, if any, to the end of
30 	 * newly freed chain. If the virtqueue was completely used, then
31 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
32 	 */
33 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
34 		vq->vq_desc_head_idx = desc_idx;
35 	} else {
36 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
37 		dp_tail->next = desc_idx;
38 	}
39 
40 	vq->vq_desc_tail_idx = desc_idx_last;
41 	dp->next = VQ_RING_DESC_CHAIN_END;
42 }
43 
44 static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue * vq,struct rte_crypto_op ** rx_pkts,uint16_t num)45 virtqueue_dequeue_burst_rx(struct virtqueue *vq,
46 		struct rte_crypto_op **rx_pkts, uint16_t num)
47 {
48 	struct vring_used_elem *uep;
49 	struct rte_crypto_op *cop;
50 	uint16_t used_idx, desc_idx;
51 	uint16_t i;
52 	struct virtio_crypto_inhdr *inhdr;
53 	struct virtio_crypto_op_cookie *op_cookie;
54 
55 	/* Caller does the check */
56 	for (i = 0; i < num ; i++) {
57 		used_idx = (uint16_t)(vq->vq_used_cons_idx
58 				& (vq->vq_nentries - 1));
59 		uep = &vq->vq_ring.used->ring[used_idx];
60 		desc_idx = (uint16_t)uep->id;
61 		cop = (struct rte_crypto_op *)
62 				vq->vq_descx[desc_idx].crypto_op;
63 		if (unlikely(cop == NULL)) {
64 			VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
65 					"mbuf cookie at %u",
66 					vq->vq_used_cons_idx);
67 			break;
68 		}
69 
70 		op_cookie = (struct virtio_crypto_op_cookie *)
71 						vq->vq_descx[desc_idx].cookie;
72 		inhdr = &(op_cookie->inhdr);
73 		switch (inhdr->status) {
74 		case VIRTIO_CRYPTO_OK:
75 			cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
76 			break;
77 		case VIRTIO_CRYPTO_ERR:
78 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
79 			vq->packets_received_failed++;
80 			break;
81 		case VIRTIO_CRYPTO_BADMSG:
82 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
83 			vq->packets_received_failed++;
84 			break;
85 		case VIRTIO_CRYPTO_NOTSUPP:
86 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
87 			vq->packets_received_failed++;
88 			break;
89 		case VIRTIO_CRYPTO_INVSESS:
90 			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
91 			vq->packets_received_failed++;
92 			break;
93 		default:
94 			break;
95 		}
96 
97 		vq->packets_received_total++;
98 
99 		rx_pkts[i] = cop;
100 		rte_mempool_put(vq->mpool, op_cookie);
101 
102 		vq->vq_used_cons_idx++;
103 		vq_ring_free_chain(vq, desc_idx);
104 		vq->vq_descx[desc_idx].crypto_op = NULL;
105 	}
106 
107 	return i;
108 }
109 
110 static int
virtqueue_crypto_sym_pkt_header_arrange(struct rte_crypto_op * cop,struct virtio_crypto_op_data_req * data,struct virtio_crypto_session * session)111 virtqueue_crypto_sym_pkt_header_arrange(
112 		struct rte_crypto_op *cop,
113 		struct virtio_crypto_op_data_req *data,
114 		struct virtio_crypto_session *session)
115 {
116 	struct rte_crypto_sym_op *sym_op = cop->sym;
117 	struct virtio_crypto_op_data_req *req_data = data;
118 	struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
119 	struct virtio_crypto_sym_create_session_req *sym_sess_req =
120 		&ctrl->u.sym_create_session;
121 	struct virtio_crypto_alg_chain_session_para *chain_para =
122 		&sym_sess_req->u.chain.para;
123 	struct virtio_crypto_cipher_session_para *cipher_para;
124 
125 	req_data->header.session_id = session->session_id;
126 
127 	switch (sym_sess_req->op_type) {
128 	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
129 		req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
130 
131 		cipher_para = &sym_sess_req->u.cipher.para;
132 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
133 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
134 		else
135 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
136 
137 		req_data->u.sym_req.u.cipher.para.iv_len
138 			= session->iv.length;
139 
140 		req_data->u.sym_req.u.cipher.para.src_data_len =
141 			(sym_op->cipher.data.length +
142 				sym_op->cipher.data.offset);
143 		req_data->u.sym_req.u.cipher.para.dst_data_len =
144 			req_data->u.sym_req.u.cipher.para.src_data_len;
145 		break;
146 	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
147 		req_data->u.sym_req.op_type =
148 			VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
149 
150 		cipher_para = &chain_para->cipher_param;
151 		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
152 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
153 		else
154 			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
155 
156 		req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
157 		req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
158 
159 		req_data->u.sym_req.u.chain.para.src_data_len =
160 			(sym_op->cipher.data.length +
161 				sym_op->cipher.data.offset);
162 		req_data->u.sym_req.u.chain.para.dst_data_len =
163 			req_data->u.sym_req.u.chain.para.src_data_len;
164 		req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
165 			sym_op->cipher.data.offset;
166 		req_data->u.sym_req.u.chain.para.len_to_cipher =
167 			sym_op->cipher.data.length;
168 		req_data->u.sym_req.u.chain.para.hash_start_src_offset =
169 			sym_op->auth.data.offset;
170 		req_data->u.sym_req.u.chain.para.len_to_hash =
171 			sym_op->auth.data.length;
172 		req_data->u.sym_req.u.chain.para.aad_len =
173 			chain_para->aad_len;
174 
175 		if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
176 			req_data->u.sym_req.u.chain.para.hash_result_len =
177 				chain_para->u.hash_param.hash_result_len;
178 		if (chain_para->hash_mode ==
179 			VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
180 			req_data->u.sym_req.u.chain.para.hash_result_len =
181 				chain_para->u.mac_param.hash_result_len;
182 		break;
183 	default:
184 		return -1;
185 	}
186 
187 	return 0;
188 }
189 
190 static int
virtqueue_crypto_sym_enqueue_xmit(struct virtqueue * txvq,struct rte_crypto_op * cop)191 virtqueue_crypto_sym_enqueue_xmit(
192 		struct virtqueue *txvq,
193 		struct rte_crypto_op *cop)
194 {
195 	uint16_t idx = 0;
196 	uint16_t num_entry;
197 	uint16_t needed = 1;
198 	uint16_t head_idx;
199 	struct vq_desc_extra *dxp;
200 	struct vring_desc *start_dp;
201 	struct vring_desc *desc;
202 	uint64_t indirect_op_data_req_phys_addr;
203 	uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
204 	uint32_t indirect_vring_addr_offset = req_data_len +
205 		sizeof(struct virtio_crypto_inhdr);
206 	uint32_t indirect_iv_addr_offset =
207 			offsetof(struct virtio_crypto_op_cookie, iv);
208 	struct rte_crypto_sym_op *sym_op = cop->sym;
209 	struct virtio_crypto_session *session =
210 		CRYPTODEV_GET_SYM_SESS_PRIV(cop->sym->session);
211 	struct virtio_crypto_op_data_req *op_data_req;
212 	uint32_t hash_result_len = 0;
213 	struct virtio_crypto_op_cookie *crypto_op_cookie;
214 	struct virtio_crypto_alg_chain_session_para *para;
215 
216 	if (unlikely(sym_op->m_src->nb_segs != 1))
217 		return -EMSGSIZE;
218 	if (unlikely(txvq->vq_free_cnt == 0))
219 		return -ENOSPC;
220 	if (unlikely(txvq->vq_free_cnt < needed))
221 		return -EMSGSIZE;
222 	head_idx = txvq->vq_desc_head_idx;
223 	if (unlikely(head_idx >= txvq->vq_nentries))
224 		return -EFAULT;
225 	if (unlikely(session == NULL))
226 		return -EFAULT;
227 
228 	dxp = &txvq->vq_descx[head_idx];
229 
230 	if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
231 		VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
232 		return -EFAULT;
233 	}
234 	crypto_op_cookie = dxp->cookie;
235 	indirect_op_data_req_phys_addr =
236 		rte_mempool_virt2iova(crypto_op_cookie);
237 	op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
238 
239 	if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
240 		return -EFAULT;
241 
242 	/* status is initialized to VIRTIO_CRYPTO_ERR */
243 	((struct virtio_crypto_inhdr *)
244 		((uint8_t *)op_data_req + req_data_len))->status =
245 		VIRTIO_CRYPTO_ERR;
246 
247 	/* point to indirect vring entry */
248 	desc = (struct vring_desc *)
249 		((uint8_t *)op_data_req + indirect_vring_addr_offset);
250 	for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
251 		desc[idx].next = idx + 1;
252 	desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
253 
254 	idx = 0;
255 
256 	/* indirect vring: first part, virtio_crypto_op_data_req */
257 	desc[idx].addr = indirect_op_data_req_phys_addr;
258 	desc[idx].len = req_data_len;
259 	desc[idx++].flags = VRING_DESC_F_NEXT;
260 
261 	/* indirect vring: iv of cipher */
262 	if (session->iv.length) {
263 		if (cop->phys_addr)
264 			desc[idx].addr = cop->phys_addr + session->iv.offset;
265 		else {
266 			if (session->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE)
267 				return -ENOMEM;
268 
269 			rte_memcpy(crypto_op_cookie->iv,
270 					rte_crypto_op_ctod_offset(cop,
271 					uint8_t *, session->iv.offset),
272 					session->iv.length);
273 			desc[idx].addr = indirect_op_data_req_phys_addr +
274 				indirect_iv_addr_offset;
275 		}
276 
277 		desc[idx].len = session->iv.length;
278 		desc[idx++].flags = VRING_DESC_F_NEXT;
279 	}
280 
281 	/* indirect vring: additional auth data */
282 	if (session->aad.length) {
283 		desc[idx].addr = session->aad.phys_addr;
284 		desc[idx].len = session->aad.length;
285 		desc[idx++].flags = VRING_DESC_F_NEXT;
286 	}
287 
288 	/* indirect vring: src data */
289 	desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
290 	desc[idx].len = (sym_op->cipher.data.offset
291 		+ sym_op->cipher.data.length);
292 	desc[idx++].flags = VRING_DESC_F_NEXT;
293 
294 	/* indirect vring: dst data */
295 	if (sym_op->m_dst) {
296 		desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
297 		desc[idx].len = (sym_op->cipher.data.offset
298 			+ sym_op->cipher.data.length);
299 	} else {
300 		desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
301 		desc[idx].len = (sym_op->cipher.data.offset
302 			+ sym_op->cipher.data.length);
303 	}
304 	desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
305 
306 	/* indirect vring: digest result */
307 	para = &(session->ctrl.u.sym_create_session.u.chain.para);
308 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
309 		hash_result_len = para->u.hash_param.hash_result_len;
310 	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
311 		hash_result_len = para->u.mac_param.hash_result_len;
312 	if (hash_result_len > 0) {
313 		desc[idx].addr = sym_op->auth.digest.phys_addr;
314 		desc[idx].len = hash_result_len;
315 		desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
316 	}
317 
318 	/* indirect vring: last part, status returned */
319 	desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
320 	desc[idx].len = sizeof(struct virtio_crypto_inhdr);
321 	desc[idx++].flags = VRING_DESC_F_WRITE;
322 
323 	num_entry = idx;
324 
325 	/* save the infos to use when receiving packets */
326 	dxp->crypto_op = (void *)cop;
327 	dxp->ndescs = needed;
328 
329 	/* use a single buffer */
330 	start_dp = txvq->vq_ring.desc;
331 	start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
332 		indirect_vring_addr_offset;
333 	start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
334 	start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
335 
336 	idx = start_dp[head_idx].next;
337 	txvq->vq_desc_head_idx = idx;
338 	if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
339 		txvq->vq_desc_tail_idx = idx;
340 	txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
341 	vq_update_avail_ring(txvq, head_idx);
342 
343 	return 0;
344 }
345 
346 static int
virtqueue_crypto_enqueue_xmit(struct virtqueue * txvq,struct rte_crypto_op * cop)347 virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
348 		struct rte_crypto_op *cop)
349 {
350 	int ret;
351 
352 	switch (cop->type) {
353 	case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
354 		ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
355 		break;
356 	default:
357 		VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
358 				cop->type);
359 		ret = -EFAULT;
360 		break;
361 	}
362 
363 	return ret;
364 }
365 
366 static int
virtio_crypto_vring_start(struct virtqueue * vq)367 virtio_crypto_vring_start(struct virtqueue *vq)
368 {
369 	struct virtio_crypto_hw *hw = vq->hw;
370 	int i, size = vq->vq_nentries;
371 	struct vring *vr = &vq->vq_ring;
372 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
373 
374 	PMD_INIT_FUNC_TRACE();
375 
376 	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
377 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
378 	vq->vq_free_cnt = vq->vq_nentries;
379 
380 	/* Chain all the descriptors in the ring with an END */
381 	for (i = 0; i < size - 1; i++)
382 		vr->desc[i].next = (uint16_t)(i + 1);
383 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
384 
385 	/*
386 	 * Disable device(host) interrupting guest
387 	 */
388 	virtqueue_disable_intr(vq);
389 
390 	/*
391 	 * Set guest physical address of the virtqueue
392 	 * in VIRTIO_PCI_QUEUE_PFN config register of device
393 	 * to share with the backend
394 	 */
395 	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
396 		VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
397 		return -EINVAL;
398 	}
399 
400 	return 0;
401 }
402 
403 void
virtio_crypto_ctrlq_start(struct rte_cryptodev * dev)404 virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
405 {
406 	struct virtio_crypto_hw *hw = dev->data->dev_private;
407 
408 	if (hw->cvq) {
409 		virtio_crypto_vring_start(hw->cvq);
410 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
411 	}
412 }
413 
414 void
virtio_crypto_dataq_start(struct rte_cryptodev * dev)415 virtio_crypto_dataq_start(struct rte_cryptodev *dev)
416 {
417 	/*
418 	 * Start data vrings
419 	 * -	Setup vring structure for data queues
420 	 */
421 	uint16_t i;
422 	struct virtio_crypto_hw *hw = dev->data->dev_private;
423 
424 	PMD_INIT_FUNC_TRACE();
425 
426 	/* Start data vring. */
427 	for (i = 0; i < hw->max_dataqueues; i++) {
428 		virtio_crypto_vring_start(dev->data->queue_pairs[i]);
429 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
430 	}
431 }
432 
433 /* vring size of data queue is 1024 */
434 #define VIRTIO_MBUF_BURST_SZ 1024
435 
436 uint16_t
virtio_crypto_pkt_rx_burst(void * tx_queue,struct rte_crypto_op ** rx_pkts,uint16_t nb_pkts)437 virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
438 		uint16_t nb_pkts)
439 {
440 	struct virtqueue *txvq = tx_queue;
441 	uint16_t nb_used, num, nb_rx;
442 
443 	nb_used = VIRTQUEUE_NUSED(txvq);
444 
445 	virtio_rmb();
446 
447 	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
448 	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
449 		? num : VIRTIO_MBUF_BURST_SZ);
450 
451 	if (num == 0)
452 		return 0;
453 
454 	nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
455 	VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
456 
457 	return nb_rx;
458 }
459 
460 uint16_t
virtio_crypto_pkt_tx_burst(void * tx_queue,struct rte_crypto_op ** tx_pkts,uint16_t nb_pkts)461 virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
462 		uint16_t nb_pkts)
463 {
464 	struct virtqueue *txvq;
465 	uint16_t nb_tx;
466 	int error;
467 
468 	if (unlikely(nb_pkts < 1))
469 		return nb_pkts;
470 	if (unlikely(tx_queue == NULL)) {
471 		VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
472 		return 0;
473 	}
474 	txvq = tx_queue;
475 
476 	VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
477 
478 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
479 		struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
480 		/* nb_segs is always 1 at virtio crypto situation */
481 		int need = txm->nb_segs - txvq->vq_free_cnt;
482 
483 		/*
484 		 * Positive value indicates it hasn't enough space in vring
485 		 * descriptors
486 		 */
487 		if (unlikely(need > 0)) {
488 			/*
489 			 * try it again because the receive process may be
490 			 * free some space
491 			 */
492 			need = txm->nb_segs - txvq->vq_free_cnt;
493 			if (unlikely(need > 0)) {
494 				VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
495 					"descriptors to transmit");
496 				break;
497 			}
498 		}
499 
500 		txvq->packets_sent_total++;
501 
502 		/* Enqueue Packet buffers */
503 		error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
504 		if (unlikely(error)) {
505 			if (error == ENOSPC)
506 				VIRTIO_CRYPTO_TX_LOG_ERR(
507 					"virtqueue_enqueue Free count = 0");
508 			else if (error == EMSGSIZE)
509 				VIRTIO_CRYPTO_TX_LOG_ERR(
510 					"virtqueue_enqueue Free count < 1");
511 			else
512 				VIRTIO_CRYPTO_TX_LOG_ERR(
513 					"virtqueue_enqueue error: %d", error);
514 			txvq->packets_sent_failed++;
515 			break;
516 		}
517 	}
518 
519 	if (likely(nb_tx)) {
520 		vq_update_avail_idx(txvq);
521 
522 		if (unlikely(virtqueue_kick_prepare(txvq))) {
523 			virtqueue_notify(txvq);
524 			VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
525 		}
526 	}
527 
528 	return nb_tx;
529 }
530