xref: /dpdk/drivers/net/virtio/virtio_ring.h (revision 802a0389b54599b04b5e873dd9bc3db012f3ba65)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _VIRTIO_RING_H_
6 #define _VIRTIO_RING_H_
7 
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 
12 /* This marks a buffer as continuing via the next field. */
13 #define VRING_DESC_F_NEXT       1
14 /* This marks a buffer as write-only (otherwise read-only). */
15 #define VRING_DESC_F_WRITE      2
16 /* This means the buffer contains a list of buffer descriptors. */
17 #define VRING_DESC_F_INDIRECT   4
18 
19 /* This flag means the descriptor was made available by the driver */
20 #define VRING_PACKED_DESC_F_AVAIL	(1 << 7)
21 /* This flag means the descriptor was used by the device */
22 #define VRING_PACKED_DESC_F_USED	(1 << 15)
23 
24 /* Frequently used combinations */
25 #define VRING_PACKED_DESC_F_AVAIL_USED	(VRING_PACKED_DESC_F_AVAIL | \
26 					 VRING_PACKED_DESC_F_USED)
27 
28 /* The Host uses this in used->flags to advise the Guest: don't kick me
29  * when you add a buffer.  It's unreliable, so it's simply an
30  * optimization.  Guest will still kick if it's out of buffers. */
31 #define VRING_USED_F_NO_NOTIFY  1
32 /* The Guest uses this in avail->flags to advise the Host: don't
33  * interrupt me when you consume a buffer.  It's unreliable, so it's
34  * simply an optimization.  */
35 #define VRING_AVAIL_F_NO_INTERRUPT  1
36 
37 /* VirtIO ring descriptors: 16 bytes.
38  * These can chain together via "next". */
39 struct vring_desc {
40 	uint64_t addr;  /*  Address (guest-physical). */
41 	uint32_t len;   /* Length. */
42 	uint16_t flags; /* The flags as indicated above. */
43 	uint16_t next;  /* We chain unused descriptors via this. */
44 };
45 
46 struct vring_avail {
47 	uint16_t flags;
48 	uint16_t idx;
49 	uint16_t ring[];
50 };
51 
52 /* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
53 struct vring_used_elem {
54 	/* Index of start of used descriptor chain. */
55 	uint32_t id;
56 	/* Total length of the descriptor chain which was written to. */
57 	uint32_t len;
58 };
59 
60 struct vring_used {
61 	uint16_t flags;
62 	RTE_ATOMIC(uint16_t) idx;
63 	struct vring_used_elem ring[];
64 };
65 
66 /* For support of packed virtqueues in Virtio 1.1 the format of descriptors
67  * looks like this.
68  */
69 struct vring_packed_desc {
70 	uint64_t addr;
71 	uint32_t len;
72 	uint16_t id;
73 	RTE_ATOMIC(uint16_t) flags;
74 };
75 
76 #define RING_EVENT_FLAGS_ENABLE 0x0
77 #define RING_EVENT_FLAGS_DISABLE 0x1
78 #define RING_EVENT_FLAGS_DESC 0x2
79 struct vring_packed_desc_event {
80 	uint16_t desc_event_off_wrap;
81 	uint16_t desc_event_flags;
82 };
83 
84 struct vring_packed {
85 	unsigned int num;
86 	rte_iova_t desc_iova;
87 	struct vring_packed_desc *desc;
88 	struct vring_packed_desc_event *driver;
89 	struct vring_packed_desc_event *device;
90 };
91 
92 struct vring {
93 	unsigned int num;
94 	rte_iova_t desc_iova;
95 	struct vring_desc  *desc;
96 	struct vring_avail *avail;
97 	struct vring_used  *used;
98 };
99 
100 /* The standard layout for the ring is a continuous chunk of memory which
101  * looks like this.  We assume num is a power of 2.
102  *
103  * struct vring {
104  *      // The actual descriptors (16 bytes each)
105  *      struct vring_desc desc[num];
106  *
107  *      // A ring of available descriptor heads with free-running index.
108  *      __u16 avail_flags;
109  *      __u16 avail_idx;
110  *      __u16 available[num];
111  *      __u16 used_event_idx;
112  *
113  *      // Padding to the next align boundary.
114  *      char pad[];
115  *
116  *      // A ring of used descriptor heads with free-running index.
117  *      __u16 used_flags;
118  *      __u16 used_idx;
119  *      struct vring_used_elem used[num];
120  *      __u16 avail_event_idx;
121  * };
122  *
123  * NOTE: for VirtIO PCI, align is 4096.
124  */
125 
126 /*
127  * We publish the used event index at the end of the available ring, and vice
128  * versa. They are at the end for backwards compatibility.
129  */
130 #define vring_used_event(vr)  ((vr)->avail->ring[(vr)->num])
131 #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
132 
133 static inline size_t
vring_size(struct virtio_hw * hw,unsigned int num,unsigned long align)134 vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
135 {
136 	size_t size;
137 
138 	if (virtio_with_packed_queue(hw)) {
139 		size = num * sizeof(struct vring_packed_desc);
140 		size += sizeof(struct vring_packed_desc_event);
141 		size = RTE_ALIGN_CEIL(size, align);
142 		size += sizeof(struct vring_packed_desc_event);
143 		return size;
144 	}
145 
146 	size = num * sizeof(struct vring_desc);
147 	size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
148 	size = RTE_ALIGN_CEIL(size, align);
149 	size += sizeof(struct vring_used) +
150 		(num * sizeof(struct vring_used_elem));
151 	return size;
152 }
153 static inline void
vring_init_split(struct vring * vr,uint8_t * p,rte_iova_t iova,unsigned long align,unsigned int num)154 vring_init_split(struct vring *vr, uint8_t *p, rte_iova_t iova,
155 		 unsigned long align, unsigned int num)
156 {
157 	vr->num = num;
158 	vr->desc = (struct vring_desc *) p;
159 	vr->desc_iova = iova;
160 	vr->avail = (struct vring_avail *) (p +
161 		num * sizeof(struct vring_desc));
162 	vr->used = (void *)
163 		RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
164 }
165 
166 static inline void
vring_init_packed(struct vring_packed * vr,uint8_t * p,rte_iova_t iova,unsigned long align,unsigned int num)167 vring_init_packed(struct vring_packed *vr, uint8_t *p, rte_iova_t iova,
168 		  unsigned long align, unsigned int num)
169 {
170 	vr->num = num;
171 	vr->desc = (struct vring_packed_desc *)p;
172 	vr->desc_iova = iova;
173 	vr->driver = (struct vring_packed_desc_event *)(p +
174 			vr->num * sizeof(struct vring_packed_desc));
175 	vr->device = (struct vring_packed_desc_event *)
176 		RTE_ALIGN_CEIL(((uintptr_t)vr->driver +
177 				sizeof(struct vring_packed_desc_event)), align);
178 }
179 
180 /*
181  * The following is used with VIRTIO_RING_F_EVENT_IDX.
182  * Assuming a given event_idx value from the other size, if we have
183  * just incremented index from old to new_idx, should we trigger an
184  * event?
185  */
186 static inline int
vring_need_event(uint16_t event_idx,uint16_t new_idx,uint16_t old)187 vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
188 {
189 	return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
190 }
191 
192 #endif /* _VIRTIO_RING_H_ */
193