1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3 */
4
5 #ifndef _VIRTQUEUE_H_
6 #define _VIRTQUEUE_H_
7
8 #include <stdint.h>
9
10 #include <rte_atomic.h>
11 #include <rte_memory.h>
12 #include <rte_memzone.h>
13 #include <rte_mempool.h>
14
15 #include "virtio_pci.h"
16 #include "virtio_ring.h"
17 #include "virtio_logs.h"
18 #include "virtio_crypto.h"
19
20 struct rte_mbuf;
21
22 /*
23 * Per virtio_config.h in Linux.
24 * For virtio_pci on SMP, we don't need to order with respect to MMIO
25 * accesses through relaxed memory I/O windows, so smp_mb() et al are
26 * sufficient.
27 *
28 */
29 #define virtio_mb() rte_smp_mb()
30 #define virtio_rmb() rte_smp_rmb()
31 #define virtio_wmb() rte_smp_wmb()
32
33 #define VIRTQUEUE_MAX_NAME_SZ 32
34
35 enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
36
37 /**
38 * The maximum virtqueue size is 2^15. Use that value as the end of
39 * descriptor chain terminator since it will never be a valid index
40 * in the descriptor table. This is used to verify we are correctly
41 * handling vq_free_cnt.
42 */
43 #define VQ_RING_DESC_CHAIN_END 32768
44
45 struct vq_desc_extra {
46 void *crypto_op;
47 void *cookie;
48 uint16_t ndescs;
49 };
50
51 struct virtqueue {
52 /**< virtio_crypto_hw structure pointer. */
53 struct virtio_crypto_hw *hw;
54 /**< mem zone to populate RX ring. */
55 const struct rte_memzone *mz;
56 /**< memzone to populate hdr and request. */
57 struct rte_mempool *mpool;
58 uint8_t dev_id; /**< Device identifier. */
59 uint16_t vq_queue_index; /**< PCI queue index */
60
61 void *vq_ring_virt_mem; /**< linear address of vring*/
62 unsigned int vq_ring_size;
63 phys_addr_t vq_ring_mem; /**< physical address of vring */
64
65 struct vring vq_ring; /**< vring keeping desc, used and avail */
66 uint16_t vq_free_cnt; /**< num of desc available */
67 uint16_t vq_nentries; /**< vring desc numbers */
68
69 /**
70 * Head of the free chain in the descriptor table. If
71 * there are no free descriptors, this will be set to
72 * VQ_RING_DESC_CHAIN_END.
73 */
74 uint16_t vq_desc_head_idx;
75 uint16_t vq_desc_tail_idx;
76 /**
77 * Last consumed descriptor in the used table,
78 * trails vq_ring.used->idx.
79 */
80 uint16_t vq_used_cons_idx;
81 uint16_t vq_avail_idx;
82
83 /* Statistics */
84 uint64_t packets_sent_total;
85 uint64_t packets_sent_failed;
86 uint64_t packets_received_total;
87 uint64_t packets_received_failed;
88
89 uint16_t *notify_addr;
90
91 struct vq_desc_extra vq_descx[];
92 };
93
94 /**
95 * Tell the backend not to interrupt us.
96 */
97 void virtqueue_disable_intr(struct virtqueue *vq);
98
99 /**
100 * Get all mbufs to be freed.
101 */
102 void virtqueue_detatch_unused(struct virtqueue *vq);
103
104 static inline int
virtqueue_full(const struct virtqueue * vq)105 virtqueue_full(const struct virtqueue *vq)
106 {
107 return vq->vq_free_cnt == 0;
108 }
109
110 #define VIRTQUEUE_NUSED(vq) \
111 ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
112
113 static inline void
vq_update_avail_idx(struct virtqueue * vq)114 vq_update_avail_idx(struct virtqueue *vq)
115 {
116 virtio_wmb();
117 vq->vq_ring.avail->idx = vq->vq_avail_idx;
118 }
119
120 static inline void
vq_update_avail_ring(struct virtqueue * vq,uint16_t desc_idx)121 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
122 {
123 uint16_t avail_idx;
124 /*
125 * Place the head of the descriptor chain into the next slot and make
126 * it usable to the host. The chain is made available now rather than
127 * deferring to virtqueue_notify() in the hopes that if the host is
128 * currently running on another CPU, we can keep it processing the new
129 * descriptor.
130 */
131 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
132 if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
133 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
134 vq->vq_avail_idx++;
135 }
136
137 static inline int
virtqueue_kick_prepare(struct virtqueue * vq)138 virtqueue_kick_prepare(struct virtqueue *vq)
139 {
140 return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
141 }
142
143 static inline void
virtqueue_notify(struct virtqueue * vq)144 virtqueue_notify(struct virtqueue *vq)
145 {
146 /*
147 * Ensure updated avail->idx is visible to host.
148 * For virtio on IA, the notification is through io port operation
149 * which is a serialization instruction itself.
150 */
151 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
152 }
153
154 /**
155 * Dump virtqueue internal structures, for debug purpose only.
156 */
157 #define VIRTQUEUE_DUMP(vq) do { \
158 uint16_t used_idx, nused; \
159 used_idx = (vq)->vq_ring.used->idx; \
160 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
161 VIRTIO_CRYPTO_INIT_LOG_DBG(\
162 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
163 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
164 " avail.flags=0x%x; used.flags=0x%x", \
165 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
166 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
167 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
168 (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
169 } while (0)
170
171 #endif /* _VIRTQUEUE_H_ */
172