125500d4bSJay Zhou /* SPDX-License-Identifier: BSD-3-Clause
225500d4bSJay Zhou * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
325500d4bSJay Zhou */
4af668035SAkhil Goyal #include <cryptodev_pmd.h>
582adb12aSJay Zhou
66f0175ffSJay Zhou #include "virtqueue.h"
725500d4bSJay Zhou #include "virtio_cryptodev.h"
882adb12aSJay Zhou #include "virtio_crypto_algs.h"
982adb12aSJay Zhou
1082adb12aSJay Zhou static void
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)1182adb12aSJay Zhou vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
1282adb12aSJay Zhou {
1382adb12aSJay Zhou struct vring_desc *dp, *dp_tail;
1482adb12aSJay Zhou struct vq_desc_extra *dxp;
1582adb12aSJay Zhou uint16_t desc_idx_last = desc_idx;
1682adb12aSJay Zhou
1782adb12aSJay Zhou dp = &vq->vq_ring.desc[desc_idx];
1882adb12aSJay Zhou dxp = &vq->vq_descx[desc_idx];
1982adb12aSJay Zhou vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
2082adb12aSJay Zhou if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
2182adb12aSJay Zhou while (dp->flags & VRING_DESC_F_NEXT) {
2282adb12aSJay Zhou desc_idx_last = dp->next;
2382adb12aSJay Zhou dp = &vq->vq_ring.desc[dp->next];
2482adb12aSJay Zhou }
2582adb12aSJay Zhou }
2682adb12aSJay Zhou dxp->ndescs = 0;
2782adb12aSJay Zhou
2882adb12aSJay Zhou /*
2982adb12aSJay Zhou * We must append the existing free chain, if any, to the end of
3082adb12aSJay Zhou * newly freed chain. If the virtqueue was completely used, then
3182adb12aSJay Zhou * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
3282adb12aSJay Zhou */
3382adb12aSJay Zhou if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
3482adb12aSJay Zhou vq->vq_desc_head_idx = desc_idx;
3582adb12aSJay Zhou } else {
3682adb12aSJay Zhou dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
3782adb12aSJay Zhou dp_tail->next = desc_idx;
3882adb12aSJay Zhou }
3982adb12aSJay Zhou
4082adb12aSJay Zhou vq->vq_desc_tail_idx = desc_idx_last;
4182adb12aSJay Zhou dp->next = VQ_RING_DESC_CHAIN_END;
4282adb12aSJay Zhou }
4382adb12aSJay Zhou
4482adb12aSJay Zhou static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue * vq,struct rte_crypto_op ** rx_pkts,uint16_t num)4582adb12aSJay Zhou virtqueue_dequeue_burst_rx(struct virtqueue *vq,
4682adb12aSJay Zhou struct rte_crypto_op **rx_pkts, uint16_t num)
4782adb12aSJay Zhou {
4882adb12aSJay Zhou struct vring_used_elem *uep;
4982adb12aSJay Zhou struct rte_crypto_op *cop;
5082adb12aSJay Zhou uint16_t used_idx, desc_idx;
5182adb12aSJay Zhou uint16_t i;
5282adb12aSJay Zhou struct virtio_crypto_inhdr *inhdr;
5382adb12aSJay Zhou struct virtio_crypto_op_cookie *op_cookie;
5482adb12aSJay Zhou
5582adb12aSJay Zhou /* Caller does the check */
5682adb12aSJay Zhou for (i = 0; i < num ; i++) {
5782adb12aSJay Zhou used_idx = (uint16_t)(vq->vq_used_cons_idx
5882adb12aSJay Zhou & (vq->vq_nentries - 1));
5982adb12aSJay Zhou uep = &vq->vq_ring.used->ring[used_idx];
6082adb12aSJay Zhou desc_idx = (uint16_t)uep->id;
6182adb12aSJay Zhou cop = (struct rte_crypto_op *)
6282adb12aSJay Zhou vq->vq_descx[desc_idx].crypto_op;
6382adb12aSJay Zhou if (unlikely(cop == NULL)) {
6482adb12aSJay Zhou VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
6582adb12aSJay Zhou "mbuf cookie at %u",
6682adb12aSJay Zhou vq->vq_used_cons_idx);
6782adb12aSJay Zhou break;
6882adb12aSJay Zhou }
6982adb12aSJay Zhou
7082adb12aSJay Zhou op_cookie = (struct virtio_crypto_op_cookie *)
7182adb12aSJay Zhou vq->vq_descx[desc_idx].cookie;
7282adb12aSJay Zhou inhdr = &(op_cookie->inhdr);
7382adb12aSJay Zhou switch (inhdr->status) {
7482adb12aSJay Zhou case VIRTIO_CRYPTO_OK:
7582adb12aSJay Zhou cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
7682adb12aSJay Zhou break;
7782adb12aSJay Zhou case VIRTIO_CRYPTO_ERR:
7882adb12aSJay Zhou cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
7982adb12aSJay Zhou vq->packets_received_failed++;
8082adb12aSJay Zhou break;
8182adb12aSJay Zhou case VIRTIO_CRYPTO_BADMSG:
8282adb12aSJay Zhou cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
8382adb12aSJay Zhou vq->packets_received_failed++;
8482adb12aSJay Zhou break;
8582adb12aSJay Zhou case VIRTIO_CRYPTO_NOTSUPP:
8682adb12aSJay Zhou cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
8782adb12aSJay Zhou vq->packets_received_failed++;
8882adb12aSJay Zhou break;
8982adb12aSJay Zhou case VIRTIO_CRYPTO_INVSESS:
9082adb12aSJay Zhou cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
9182adb12aSJay Zhou vq->packets_received_failed++;
9282adb12aSJay Zhou break;
9382adb12aSJay Zhou default:
9482adb12aSJay Zhou break;
9582adb12aSJay Zhou }
9682adb12aSJay Zhou
9782adb12aSJay Zhou vq->packets_received_total++;
9882adb12aSJay Zhou
9982adb12aSJay Zhou rx_pkts[i] = cop;
10082adb12aSJay Zhou rte_mempool_put(vq->mpool, op_cookie);
10182adb12aSJay Zhou
10282adb12aSJay Zhou vq->vq_used_cons_idx++;
10382adb12aSJay Zhou vq_ring_free_chain(vq, desc_idx);
10482adb12aSJay Zhou vq->vq_descx[desc_idx].crypto_op = NULL;
10582adb12aSJay Zhou }
10682adb12aSJay Zhou
10782adb12aSJay Zhou return i;
10882adb12aSJay Zhou }
10982adb12aSJay Zhou
11082adb12aSJay Zhou static int
virtqueue_crypto_sym_pkt_header_arrange(struct rte_crypto_op * cop,struct virtio_crypto_op_data_req * data,struct virtio_crypto_session * session)11182adb12aSJay Zhou virtqueue_crypto_sym_pkt_header_arrange(
11282adb12aSJay Zhou struct rte_crypto_op *cop,
11382adb12aSJay Zhou struct virtio_crypto_op_data_req *data,
11482adb12aSJay Zhou struct virtio_crypto_session *session)
11582adb12aSJay Zhou {
11682adb12aSJay Zhou struct rte_crypto_sym_op *sym_op = cop->sym;
11782adb12aSJay Zhou struct virtio_crypto_op_data_req *req_data = data;
11882adb12aSJay Zhou struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
11982adb12aSJay Zhou struct virtio_crypto_sym_create_session_req *sym_sess_req =
12082adb12aSJay Zhou &ctrl->u.sym_create_session;
12182adb12aSJay Zhou struct virtio_crypto_alg_chain_session_para *chain_para =
12282adb12aSJay Zhou &sym_sess_req->u.chain.para;
12382adb12aSJay Zhou struct virtio_crypto_cipher_session_para *cipher_para;
12482adb12aSJay Zhou
12582adb12aSJay Zhou req_data->header.session_id = session->session_id;
12682adb12aSJay Zhou
12782adb12aSJay Zhou switch (sym_sess_req->op_type) {
12882adb12aSJay Zhou case VIRTIO_CRYPTO_SYM_OP_CIPHER:
12982adb12aSJay Zhou req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
13082adb12aSJay Zhou
13182adb12aSJay Zhou cipher_para = &sym_sess_req->u.cipher.para;
13282adb12aSJay Zhou if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
13382adb12aSJay Zhou req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
13482adb12aSJay Zhou else
13582adb12aSJay Zhou req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
13682adb12aSJay Zhou
13782adb12aSJay Zhou req_data->u.sym_req.u.cipher.para.iv_len
13882adb12aSJay Zhou = session->iv.length;
13982adb12aSJay Zhou
14082adb12aSJay Zhou req_data->u.sym_req.u.cipher.para.src_data_len =
14182adb12aSJay Zhou (sym_op->cipher.data.length +
14282adb12aSJay Zhou sym_op->cipher.data.offset);
14382adb12aSJay Zhou req_data->u.sym_req.u.cipher.para.dst_data_len =
14482adb12aSJay Zhou req_data->u.sym_req.u.cipher.para.src_data_len;
14582adb12aSJay Zhou break;
14682adb12aSJay Zhou case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
14782adb12aSJay Zhou req_data->u.sym_req.op_type =
14882adb12aSJay Zhou VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
14982adb12aSJay Zhou
15082adb12aSJay Zhou cipher_para = &chain_para->cipher_param;
15182adb12aSJay Zhou if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
15282adb12aSJay Zhou req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
15382adb12aSJay Zhou else
15482adb12aSJay Zhou req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
15582adb12aSJay Zhou
15682adb12aSJay Zhou req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
15782adb12aSJay Zhou req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
15882adb12aSJay Zhou
15982adb12aSJay Zhou req_data->u.sym_req.u.chain.para.src_data_len =
16082adb12aSJay Zhou (sym_op->cipher.data.length +
16182adb12aSJay Zhou sym_op->cipher.data.offset);
16282adb12aSJay Zhou req_data->u.sym_req.u.chain.para.dst_data_len =
16382adb12aSJay Zhou req_data->u.sym_req.u.chain.para.src_data_len;
16482adb12aSJay Zhou req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
16582adb12aSJay Zhou sym_op->cipher.data.offset;
16682adb12aSJay Zhou req_data->u.sym_req.u.chain.para.len_to_cipher =
16782adb12aSJay Zhou sym_op->cipher.data.length;
16882adb12aSJay Zhou req_data->u.sym_req.u.chain.para.hash_start_src_offset =
16982adb12aSJay Zhou sym_op->auth.data.offset;
17082adb12aSJay Zhou req_data->u.sym_req.u.chain.para.len_to_hash =
17182adb12aSJay Zhou sym_op->auth.data.length;
17282adb12aSJay Zhou req_data->u.sym_req.u.chain.para.aad_len =
17382adb12aSJay Zhou chain_para->aad_len;
17482adb12aSJay Zhou
17582adb12aSJay Zhou if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
17682adb12aSJay Zhou req_data->u.sym_req.u.chain.para.hash_result_len =
17782adb12aSJay Zhou chain_para->u.hash_param.hash_result_len;
17882adb12aSJay Zhou if (chain_para->hash_mode ==
17982adb12aSJay Zhou VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
18082adb12aSJay Zhou req_data->u.sym_req.u.chain.para.hash_result_len =
18182adb12aSJay Zhou chain_para->u.mac_param.hash_result_len;
18282adb12aSJay Zhou break;
18382adb12aSJay Zhou default:
18482adb12aSJay Zhou return -1;
18582adb12aSJay Zhou }
18682adb12aSJay Zhou
18782adb12aSJay Zhou return 0;
18882adb12aSJay Zhou }
18982adb12aSJay Zhou
19082adb12aSJay Zhou static int
virtqueue_crypto_sym_enqueue_xmit(struct virtqueue * txvq,struct rte_crypto_op * cop)19182adb12aSJay Zhou virtqueue_crypto_sym_enqueue_xmit(
19282adb12aSJay Zhou struct virtqueue *txvq,
19382adb12aSJay Zhou struct rte_crypto_op *cop)
19482adb12aSJay Zhou {
19582adb12aSJay Zhou uint16_t idx = 0;
19682adb12aSJay Zhou uint16_t num_entry;
19782adb12aSJay Zhou uint16_t needed = 1;
19882adb12aSJay Zhou uint16_t head_idx;
19982adb12aSJay Zhou struct vq_desc_extra *dxp;
20082adb12aSJay Zhou struct vring_desc *start_dp;
20182adb12aSJay Zhou struct vring_desc *desc;
20282adb12aSJay Zhou uint64_t indirect_op_data_req_phys_addr;
20382adb12aSJay Zhou uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
20482adb12aSJay Zhou uint32_t indirect_vring_addr_offset = req_data_len +
20582adb12aSJay Zhou sizeof(struct virtio_crypto_inhdr);
206fd4fa52cSFan Zhang uint32_t indirect_iv_addr_offset =
207fd4fa52cSFan Zhang offsetof(struct virtio_crypto_op_cookie, iv);
20882adb12aSJay Zhou struct rte_crypto_sym_op *sym_op = cop->sym;
20982adb12aSJay Zhou struct virtio_crypto_session *session =
210*2a440d6aSAkhil Goyal CRYPTODEV_GET_SYM_SESS_PRIV(cop->sym->session);
21182adb12aSJay Zhou struct virtio_crypto_op_data_req *op_data_req;
21282adb12aSJay Zhou uint32_t hash_result_len = 0;
21382adb12aSJay Zhou struct virtio_crypto_op_cookie *crypto_op_cookie;
21482adb12aSJay Zhou struct virtio_crypto_alg_chain_session_para *para;
21582adb12aSJay Zhou
21682adb12aSJay Zhou if (unlikely(sym_op->m_src->nb_segs != 1))
21782adb12aSJay Zhou return -EMSGSIZE;
21882adb12aSJay Zhou if (unlikely(txvq->vq_free_cnt == 0))
21982adb12aSJay Zhou return -ENOSPC;
22082adb12aSJay Zhou if (unlikely(txvq->vq_free_cnt < needed))
22182adb12aSJay Zhou return -EMSGSIZE;
22282adb12aSJay Zhou head_idx = txvq->vq_desc_head_idx;
22382adb12aSJay Zhou if (unlikely(head_idx >= txvq->vq_nentries))
22482adb12aSJay Zhou return -EFAULT;
22582adb12aSJay Zhou if (unlikely(session == NULL))
22682adb12aSJay Zhou return -EFAULT;
22782adb12aSJay Zhou
22882adb12aSJay Zhou dxp = &txvq->vq_descx[head_idx];
22982adb12aSJay Zhou
23082adb12aSJay Zhou if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
23182adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
23282adb12aSJay Zhou return -EFAULT;
23382adb12aSJay Zhou }
23482adb12aSJay Zhou crypto_op_cookie = dxp->cookie;
23582adb12aSJay Zhou indirect_op_data_req_phys_addr =
23682adb12aSJay Zhou rte_mempool_virt2iova(crypto_op_cookie);
23782adb12aSJay Zhou op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
23882adb12aSJay Zhou
23982adb12aSJay Zhou if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
24082adb12aSJay Zhou return -EFAULT;
24182adb12aSJay Zhou
24282adb12aSJay Zhou /* status is initialized to VIRTIO_CRYPTO_ERR */
24382adb12aSJay Zhou ((struct virtio_crypto_inhdr *)
24482adb12aSJay Zhou ((uint8_t *)op_data_req + req_data_len))->status =
24582adb12aSJay Zhou VIRTIO_CRYPTO_ERR;
24682adb12aSJay Zhou
24782adb12aSJay Zhou /* point to indirect vring entry */
24882adb12aSJay Zhou desc = (struct vring_desc *)
24982adb12aSJay Zhou ((uint8_t *)op_data_req + indirect_vring_addr_offset);
25082adb12aSJay Zhou for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
25182adb12aSJay Zhou desc[idx].next = idx + 1;
25282adb12aSJay Zhou desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
25382adb12aSJay Zhou
25482adb12aSJay Zhou idx = 0;
25582adb12aSJay Zhou
25682adb12aSJay Zhou /* indirect vring: first part, virtio_crypto_op_data_req */
25782adb12aSJay Zhou desc[idx].addr = indirect_op_data_req_phys_addr;
25882adb12aSJay Zhou desc[idx].len = req_data_len;
25982adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_NEXT;
26082adb12aSJay Zhou
26182adb12aSJay Zhou /* indirect vring: iv of cipher */
26282adb12aSJay Zhou if (session->iv.length) {
263b063e843SFan Zhang if (cop->phys_addr)
26482adb12aSJay Zhou desc[idx].addr = cop->phys_addr + session->iv.offset;
265b063e843SFan Zhang else {
266a965e768SBrian Dooley if (session->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE)
267a965e768SBrian Dooley return -ENOMEM;
268a965e768SBrian Dooley
269b063e843SFan Zhang rte_memcpy(crypto_op_cookie->iv,
270b063e843SFan Zhang rte_crypto_op_ctod_offset(cop,
271b063e843SFan Zhang uint8_t *, session->iv.offset),
272b063e843SFan Zhang session->iv.length);
273b063e843SFan Zhang desc[idx].addr = indirect_op_data_req_phys_addr +
274b063e843SFan Zhang indirect_iv_addr_offset;
275b063e843SFan Zhang }
276b063e843SFan Zhang
27782adb12aSJay Zhou desc[idx].len = session->iv.length;
27882adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_NEXT;
27982adb12aSJay Zhou }
28082adb12aSJay Zhou
28182adb12aSJay Zhou /* indirect vring: additional auth data */
28282adb12aSJay Zhou if (session->aad.length) {
28382adb12aSJay Zhou desc[idx].addr = session->aad.phys_addr;
28482adb12aSJay Zhou desc[idx].len = session->aad.length;
28582adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_NEXT;
28682adb12aSJay Zhou }
28782adb12aSJay Zhou
28882adb12aSJay Zhou /* indirect vring: src data */
289ce627d63SThomas Monjalon desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
29082adb12aSJay Zhou desc[idx].len = (sym_op->cipher.data.offset
29182adb12aSJay Zhou + sym_op->cipher.data.length);
29282adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_NEXT;
29382adb12aSJay Zhou
29482adb12aSJay Zhou /* indirect vring: dst data */
29582adb12aSJay Zhou if (sym_op->m_dst) {
296ce627d63SThomas Monjalon desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
29782adb12aSJay Zhou desc[idx].len = (sym_op->cipher.data.offset
29882adb12aSJay Zhou + sym_op->cipher.data.length);
29982adb12aSJay Zhou } else {
300ce627d63SThomas Monjalon desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
30182adb12aSJay Zhou desc[idx].len = (sym_op->cipher.data.offset
30282adb12aSJay Zhou + sym_op->cipher.data.length);
30382adb12aSJay Zhou }
30482adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
30582adb12aSJay Zhou
30682adb12aSJay Zhou /* indirect vring: digest result */
30782adb12aSJay Zhou para = &(session->ctrl.u.sym_create_session.u.chain.para);
30882adb12aSJay Zhou if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
30982adb12aSJay Zhou hash_result_len = para->u.hash_param.hash_result_len;
31082adb12aSJay Zhou if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
31182adb12aSJay Zhou hash_result_len = para->u.mac_param.hash_result_len;
31282adb12aSJay Zhou if (hash_result_len > 0) {
31382adb12aSJay Zhou desc[idx].addr = sym_op->auth.digest.phys_addr;
31482adb12aSJay Zhou desc[idx].len = hash_result_len;
31582adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
31682adb12aSJay Zhou }
31782adb12aSJay Zhou
31882adb12aSJay Zhou /* indirect vring: last part, status returned */
31982adb12aSJay Zhou desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
32082adb12aSJay Zhou desc[idx].len = sizeof(struct virtio_crypto_inhdr);
32182adb12aSJay Zhou desc[idx++].flags = VRING_DESC_F_WRITE;
32282adb12aSJay Zhou
32382adb12aSJay Zhou num_entry = idx;
32482adb12aSJay Zhou
32582adb12aSJay Zhou /* save the infos to use when receiving packets */
32682adb12aSJay Zhou dxp->crypto_op = (void *)cop;
32782adb12aSJay Zhou dxp->ndescs = needed;
32882adb12aSJay Zhou
32982adb12aSJay Zhou /* use a single buffer */
33082adb12aSJay Zhou start_dp = txvq->vq_ring.desc;
33182adb12aSJay Zhou start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
33282adb12aSJay Zhou indirect_vring_addr_offset;
33382adb12aSJay Zhou start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
33482adb12aSJay Zhou start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
33582adb12aSJay Zhou
33682adb12aSJay Zhou idx = start_dp[head_idx].next;
33782adb12aSJay Zhou txvq->vq_desc_head_idx = idx;
33882adb12aSJay Zhou if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
33982adb12aSJay Zhou txvq->vq_desc_tail_idx = idx;
34082adb12aSJay Zhou txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
34182adb12aSJay Zhou vq_update_avail_ring(txvq, head_idx);
34282adb12aSJay Zhou
34382adb12aSJay Zhou return 0;
34482adb12aSJay Zhou }
34582adb12aSJay Zhou
34682adb12aSJay Zhou static int
virtqueue_crypto_enqueue_xmit(struct virtqueue * txvq,struct rte_crypto_op * cop)34782adb12aSJay Zhou virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
34882adb12aSJay Zhou struct rte_crypto_op *cop)
34982adb12aSJay Zhou {
35082adb12aSJay Zhou int ret;
35182adb12aSJay Zhou
35282adb12aSJay Zhou switch (cop->type) {
35382adb12aSJay Zhou case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
35482adb12aSJay Zhou ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
35582adb12aSJay Zhou break;
35682adb12aSJay Zhou default:
35782adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
35882adb12aSJay Zhou cop->type);
35982adb12aSJay Zhou ret = -EFAULT;
36082adb12aSJay Zhou break;
36182adb12aSJay Zhou }
36282adb12aSJay Zhou
36382adb12aSJay Zhou return ret;
36482adb12aSJay Zhou }
36525500d4bSJay Zhou
3666f0175ffSJay Zhou static int
virtio_crypto_vring_start(struct virtqueue * vq)3676f0175ffSJay Zhou virtio_crypto_vring_start(struct virtqueue *vq)
3686f0175ffSJay Zhou {
3696f0175ffSJay Zhou struct virtio_crypto_hw *hw = vq->hw;
3706f0175ffSJay Zhou int i, size = vq->vq_nentries;
3716f0175ffSJay Zhou struct vring *vr = &vq->vq_ring;
3726f0175ffSJay Zhou uint8_t *ring_mem = vq->vq_ring_virt_mem;
3736f0175ffSJay Zhou
3746f0175ffSJay Zhou PMD_INIT_FUNC_TRACE();
3756f0175ffSJay Zhou
3766f0175ffSJay Zhou vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
3776f0175ffSJay Zhou vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
3786f0175ffSJay Zhou vq->vq_free_cnt = vq->vq_nentries;
3796f0175ffSJay Zhou
3806f0175ffSJay Zhou /* Chain all the descriptors in the ring with an END */
3816f0175ffSJay Zhou for (i = 0; i < size - 1; i++)
3826f0175ffSJay Zhou vr->desc[i].next = (uint16_t)(i + 1);
3836f0175ffSJay Zhou vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
3846f0175ffSJay Zhou
3856f0175ffSJay Zhou /*
3866f0175ffSJay Zhou * Disable device(host) interrupting guest
3876f0175ffSJay Zhou */
3886f0175ffSJay Zhou virtqueue_disable_intr(vq);
3896f0175ffSJay Zhou
3906f0175ffSJay Zhou /*
3916f0175ffSJay Zhou * Set guest physical address of the virtqueue
3926f0175ffSJay Zhou * in VIRTIO_PCI_QUEUE_PFN config register of device
3936f0175ffSJay Zhou * to share with the backend
3946f0175ffSJay Zhou */
3956f0175ffSJay Zhou if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
3966f0175ffSJay Zhou VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
3976f0175ffSJay Zhou return -EINVAL;
3986f0175ffSJay Zhou }
3996f0175ffSJay Zhou
4006f0175ffSJay Zhou return 0;
4016f0175ffSJay Zhou }
4026f0175ffSJay Zhou
4036f0175ffSJay Zhou void
virtio_crypto_ctrlq_start(struct rte_cryptodev * dev)4046f0175ffSJay Zhou virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
4056f0175ffSJay Zhou {
4066f0175ffSJay Zhou struct virtio_crypto_hw *hw = dev->data->dev_private;
4076f0175ffSJay Zhou
4086f0175ffSJay Zhou if (hw->cvq) {
4096f0175ffSJay Zhou virtio_crypto_vring_start(hw->cvq);
4106f0175ffSJay Zhou VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
4116f0175ffSJay Zhou }
4126f0175ffSJay Zhou }
4136f0175ffSJay Zhou
4146f0175ffSJay Zhou void
virtio_crypto_dataq_start(struct rte_cryptodev * dev)4156f0175ffSJay Zhou virtio_crypto_dataq_start(struct rte_cryptodev *dev)
4166f0175ffSJay Zhou {
4176f0175ffSJay Zhou /*
4186f0175ffSJay Zhou * Start data vrings
4196f0175ffSJay Zhou * - Setup vring structure for data queues
4206f0175ffSJay Zhou */
4216f0175ffSJay Zhou uint16_t i;
4226f0175ffSJay Zhou struct virtio_crypto_hw *hw = dev->data->dev_private;
4236f0175ffSJay Zhou
4246f0175ffSJay Zhou PMD_INIT_FUNC_TRACE();
4256f0175ffSJay Zhou
4266f0175ffSJay Zhou /* Start data vring. */
4276f0175ffSJay Zhou for (i = 0; i < hw->max_dataqueues; i++) {
4286f0175ffSJay Zhou virtio_crypto_vring_start(dev->data->queue_pairs[i]);
4296f0175ffSJay Zhou VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
4306f0175ffSJay Zhou }
4316f0175ffSJay Zhou }
4326f0175ffSJay Zhou
43382adb12aSJay Zhou /* vring size of data queue is 1024 */
43482adb12aSJay Zhou #define VIRTIO_MBUF_BURST_SZ 1024
43582adb12aSJay Zhou
43625500d4bSJay Zhou uint16_t
virtio_crypto_pkt_rx_burst(void * tx_queue,struct rte_crypto_op ** rx_pkts,uint16_t nb_pkts)43782adb12aSJay Zhou virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
43882adb12aSJay Zhou uint16_t nb_pkts)
43925500d4bSJay Zhou {
44082adb12aSJay Zhou struct virtqueue *txvq = tx_queue;
44182adb12aSJay Zhou uint16_t nb_used, num, nb_rx;
44282adb12aSJay Zhou
44382adb12aSJay Zhou nb_used = VIRTQUEUE_NUSED(txvq);
44482adb12aSJay Zhou
44582adb12aSJay Zhou virtio_rmb();
44682adb12aSJay Zhou
44782adb12aSJay Zhou num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
44882adb12aSJay Zhou num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
44982adb12aSJay Zhou ? num : VIRTIO_MBUF_BURST_SZ);
45082adb12aSJay Zhou
45182adb12aSJay Zhou if (num == 0)
45282adb12aSJay Zhou return 0;
45382adb12aSJay Zhou
45482adb12aSJay Zhou nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
45582adb12aSJay Zhou VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
45625500d4bSJay Zhou
45725500d4bSJay Zhou return nb_rx;
45825500d4bSJay Zhou }
45925500d4bSJay Zhou
46025500d4bSJay Zhou uint16_t
virtio_crypto_pkt_tx_burst(void * tx_queue,struct rte_crypto_op ** tx_pkts,uint16_t nb_pkts)46182adb12aSJay Zhou virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
46282adb12aSJay Zhou uint16_t nb_pkts)
46325500d4bSJay Zhou {
46482adb12aSJay Zhou struct virtqueue *txvq;
46582adb12aSJay Zhou uint16_t nb_tx;
46682adb12aSJay Zhou int error;
46782adb12aSJay Zhou
46882adb12aSJay Zhou if (unlikely(nb_pkts < 1))
46982adb12aSJay Zhou return nb_pkts;
47082adb12aSJay Zhou if (unlikely(tx_queue == NULL)) {
47182adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
47282adb12aSJay Zhou return 0;
47382adb12aSJay Zhou }
47482adb12aSJay Zhou txvq = tx_queue;
47582adb12aSJay Zhou
47682adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
47782adb12aSJay Zhou
47882adb12aSJay Zhou for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
47982adb12aSJay Zhou struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
48082adb12aSJay Zhou /* nb_segs is always 1 at virtio crypto situation */
48182adb12aSJay Zhou int need = txm->nb_segs - txvq->vq_free_cnt;
48282adb12aSJay Zhou
48382adb12aSJay Zhou /*
48482adb12aSJay Zhou * Positive value indicates it hasn't enough space in vring
48582adb12aSJay Zhou * descriptors
48682adb12aSJay Zhou */
48782adb12aSJay Zhou if (unlikely(need > 0)) {
48882adb12aSJay Zhou /*
48982adb12aSJay Zhou * try it again because the receive process may be
49082adb12aSJay Zhou * free some space
49182adb12aSJay Zhou */
49282adb12aSJay Zhou need = txm->nb_segs - txvq->vq_free_cnt;
49382adb12aSJay Zhou if (unlikely(need > 0)) {
49482adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
49582adb12aSJay Zhou "descriptors to transmit");
49682adb12aSJay Zhou break;
49782adb12aSJay Zhou }
49882adb12aSJay Zhou }
49982adb12aSJay Zhou
50082adb12aSJay Zhou txvq->packets_sent_total++;
50182adb12aSJay Zhou
50282adb12aSJay Zhou /* Enqueue Packet buffers */
50382adb12aSJay Zhou error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
50482adb12aSJay Zhou if (unlikely(error)) {
50582adb12aSJay Zhou if (error == ENOSPC)
50682adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR(
50782adb12aSJay Zhou "virtqueue_enqueue Free count = 0");
50882adb12aSJay Zhou else if (error == EMSGSIZE)
50982adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR(
51082adb12aSJay Zhou "virtqueue_enqueue Free count < 1");
51182adb12aSJay Zhou else
51282adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_ERR(
51382adb12aSJay Zhou "virtqueue_enqueue error: %d", error);
51482adb12aSJay Zhou txvq->packets_sent_failed++;
51582adb12aSJay Zhou break;
51682adb12aSJay Zhou }
51782adb12aSJay Zhou }
51882adb12aSJay Zhou
51982adb12aSJay Zhou if (likely(nb_tx)) {
52082adb12aSJay Zhou vq_update_avail_idx(txvq);
52182adb12aSJay Zhou
52282adb12aSJay Zhou if (unlikely(virtqueue_kick_prepare(txvq))) {
52382adb12aSJay Zhou virtqueue_notify(txvq);
52482adb12aSJay Zhou VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
52582adb12aSJay Zhou }
52682adb12aSJay Zhou }
52725500d4bSJay Zhou
52825500d4bSJay Zhou return nb_tx;
52925500d4bSJay Zhou }
530