xref: /minix3/minix/lib/libvirtio/virtio_ring.h (revision 433d6423c39e34ec4b79c950597bb2d236f886be)
1*433d6423SLionel Sambuc #ifndef _LINUX_VIRTIO_RING_H
2*433d6423SLionel Sambuc #define _LINUX_VIRTIO_RING_H
3*433d6423SLionel Sambuc /* An interface for efficient virtio implementation, currently for use by KVM
4*433d6423SLionel Sambuc  * and lguest, but hopefully others soon.  Do NOT change this since it will
5*433d6423SLionel Sambuc  * break existing servers and clients.
6*433d6423SLionel Sambuc  *
7*433d6423SLionel Sambuc  * This header is BSD licensed so anyone can use the definitions to implement
8*433d6423SLionel Sambuc  * compatible drivers/servers.
9*433d6423SLionel Sambuc  *
10*433d6423SLionel Sambuc  * Redistribution and use in source and binary forms, with or without
11*433d6423SLionel Sambuc  * modification, are permitted provided that the following conditions
12*433d6423SLionel Sambuc  * are met:
13*433d6423SLionel Sambuc  * 1. Redistributions of source code must retain the above copyright
14*433d6423SLionel Sambuc  *    notice, this list of conditions and the following disclaimer.
15*433d6423SLionel Sambuc  * 2. Redistributions in binary form must reproduce the above copyright
16*433d6423SLionel Sambuc  *    notice, this list of conditions and the following disclaimer in the
17*433d6423SLionel Sambuc  *    documentation and/or other materials provided with the distribution.
18*433d6423SLionel Sambuc  * 3. Neither the name of IBM nor the names of its contributors
19*433d6423SLionel Sambuc  *    may be used to endorse or promote products derived from this software
20*433d6423SLionel Sambuc  *    without specific prior written permission.
21*433d6423SLionel Sambuc  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22*433d6423SLionel Sambuc  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*433d6423SLionel Sambuc  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*433d6423SLionel Sambuc  * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25*433d6423SLionel Sambuc  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26*433d6423SLionel Sambuc  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27*433d6423SLionel Sambuc  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28*433d6423SLionel Sambuc  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29*433d6423SLionel Sambuc  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30*433d6423SLionel Sambuc  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31*433d6423SLionel Sambuc  * SUCH DAMAGE.
32*433d6423SLionel Sambuc  *
33*433d6423SLionel Sambuc  * Copyright Rusty Russell IBM Corporation 2007. */
34*433d6423SLionel Sambuc 
35*433d6423SLionel Sambuc /* This marks a buffer as continuing via the next field. */
36*433d6423SLionel Sambuc #define VRING_DESC_F_NEXT	1
37*433d6423SLionel Sambuc /* This marks a buffer as write-only (otherwise read-only). */
38*433d6423SLionel Sambuc #define VRING_DESC_F_WRITE	2
39*433d6423SLionel Sambuc /* This means the buffer contains a list of buffer descriptors. */
40*433d6423SLionel Sambuc #define VRING_DESC_F_INDIRECT	4
41*433d6423SLionel Sambuc 
42*433d6423SLionel Sambuc /* The Host uses this in used->flags to advise the Guest: don't kick me when
43*433d6423SLionel Sambuc  * you add a buffer.  It's unreliable, so it's simply an optimization.  Guest
44*433d6423SLionel Sambuc  * will still kick if it's out of buffers. */
45*433d6423SLionel Sambuc #define VRING_USED_F_NO_NOTIFY	1
46*433d6423SLionel Sambuc /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
47*433d6423SLionel Sambuc  * when you consume a buffer.  It's unreliable, so it's simply an
48*433d6423SLionel Sambuc  * optimization.  */
49*433d6423SLionel Sambuc #define VRING_AVAIL_F_NO_INTERRUPT	1
50*433d6423SLionel Sambuc 
51*433d6423SLionel Sambuc /* We support indirect buffer descriptors */
52*433d6423SLionel Sambuc #define VIRTIO_RING_F_INDIRECT_DESC	28
53*433d6423SLionel Sambuc 
54*433d6423SLionel Sambuc /* The Guest publishes the used index for which it expects an interrupt
55*433d6423SLionel Sambuc  * at the end of the avail ring. Host should ignore the avail->flags field. */
56*433d6423SLionel Sambuc /* The Host publishes the avail index for which it expects a kick
57*433d6423SLionel Sambuc  * at the end of the used ring. Guest should ignore the used->flags field. */
58*433d6423SLionel Sambuc #define VIRTIO_RING_F_EVENT_IDX		29
59*433d6423SLionel Sambuc 
60*433d6423SLionel Sambuc /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
61*433d6423SLionel Sambuc struct vring_desc {
62*433d6423SLionel Sambuc 	/* Address (guest-physical). */
63*433d6423SLionel Sambuc 	u64_t addr;
64*433d6423SLionel Sambuc 	/* Length. */
65*433d6423SLionel Sambuc 	u32_t len;
66*433d6423SLionel Sambuc 	/* The flags as indicated above. */
67*433d6423SLionel Sambuc 	u16_t flags;
68*433d6423SLionel Sambuc 	/* We chain unused descriptors via this, too */
69*433d6423SLionel Sambuc 	u16_t next;
70*433d6423SLionel Sambuc };
71*433d6423SLionel Sambuc 
72*433d6423SLionel Sambuc struct vring_avail {
73*433d6423SLionel Sambuc 	u16_t flags;
74*433d6423SLionel Sambuc 	u16_t idx;
75*433d6423SLionel Sambuc 	u16_t ring[];
76*433d6423SLionel Sambuc };
77*433d6423SLionel Sambuc 
78*433d6423SLionel Sambuc /* u32 is used here for ids for padding reasons. */
79*433d6423SLionel Sambuc struct vring_used_elem {
80*433d6423SLionel Sambuc 	/* Index of start of used descriptor chain. */
81*433d6423SLionel Sambuc 	u32_t id;
82*433d6423SLionel Sambuc 	/* Total length of the descriptor chain which was used (written to) */
83*433d6423SLionel Sambuc 	u32_t len;
84*433d6423SLionel Sambuc };
85*433d6423SLionel Sambuc 
86*433d6423SLionel Sambuc struct vring_used {
87*433d6423SLionel Sambuc 	u16_t flags;
88*433d6423SLionel Sambuc 	u16_t idx;
89*433d6423SLionel Sambuc 	struct vring_used_elem ring[];
90*433d6423SLionel Sambuc };
91*433d6423SLionel Sambuc 
92*433d6423SLionel Sambuc struct vring {
93*433d6423SLionel Sambuc 	unsigned int num;
94*433d6423SLionel Sambuc 
95*433d6423SLionel Sambuc 	struct vring_desc *desc;
96*433d6423SLionel Sambuc 
97*433d6423SLionel Sambuc 	struct vring_avail *avail;
98*433d6423SLionel Sambuc 
99*433d6423SLionel Sambuc 	struct vring_used *used;
100*433d6423SLionel Sambuc };
101*433d6423SLionel Sambuc 
102*433d6423SLionel Sambuc /* The standard layout for the ring is a continuous chunk of memory which looks
103*433d6423SLionel Sambuc  * like this.  We assume num is a power of 2.
104*433d6423SLionel Sambuc  *
105*433d6423SLionel Sambuc  * struct vring
106*433d6423SLionel Sambuc  * {
107*433d6423SLionel Sambuc  *	// The actual descriptors (16 bytes each)
108*433d6423SLionel Sambuc  *	struct vring_desc desc[num];
109*433d6423SLionel Sambuc  *
110*433d6423SLionel Sambuc  *	// A ring of available descriptor heads with free-running index.
111*433d6423SLionel Sambuc  *	u16_t avail_flags;
112*433d6423SLionel Sambuc  *	u16_t avail_idx;
113*433d6423SLionel Sambuc  *	u16_t available[num];
114*433d6423SLionel Sambuc  *	u16_t used_event_idx;
115*433d6423SLionel Sambuc  *
116*433d6423SLionel Sambuc  *	// Padding to the next align boundary.
117*433d6423SLionel Sambuc  *	char pad[];
118*433d6423SLionel Sambuc  *
119*433d6423SLionel Sambuc  *	// A ring of used descriptor heads with free-running index.
120*433d6423SLionel Sambuc  *	u16_t used_flags;
121*433d6423SLionel Sambuc  *	u16_t used_idx;
122*433d6423SLionel Sambuc  *	struct vring_used_elem used[num];
123*433d6423SLionel Sambuc  *	u16_t avail_event_idx;
124*433d6423SLionel Sambuc  * };
125*433d6423SLionel Sambuc  */
126*433d6423SLionel Sambuc /* We publish the used event index at the end of the available ring, and vice
127*433d6423SLionel Sambuc  * versa. They are at the end for backwards compatibility. */
128*433d6423SLionel Sambuc #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
129*433d6423SLionel Sambuc #define vring_avail_event(vr) (*(u16_t *)&(vr)->used->ring[(vr)->num])
130*433d6423SLionel Sambuc 
131*433d6423SLionel Sambuc static inline void vring_init(struct vring *vr, unsigned int num, void *p,
132*433d6423SLionel Sambuc 			      unsigned long align)
133*433d6423SLionel Sambuc {
134*433d6423SLionel Sambuc 	vr->num = num;
135*433d6423SLionel Sambuc 	vr->desc = p;
136*433d6423SLionel Sambuc 	vr->avail = p + num*sizeof(struct vring_desc);
137*433d6423SLionel Sambuc 	vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(u16_t)
138*433d6423SLionel Sambuc 		+ align-1) & ~(align - 1));
139*433d6423SLionel Sambuc }
140*433d6423SLionel Sambuc 
141*433d6423SLionel Sambuc static inline unsigned vring_size(unsigned int num, unsigned long align)
142*433d6423SLionel Sambuc {
143*433d6423SLionel Sambuc 	return ((sizeof(struct vring_desc) * num + sizeof(u16_t) * (3 + num)
144*433d6423SLionel Sambuc 		 + align - 1) & ~(align - 1))
145*433d6423SLionel Sambuc 		+ sizeof(u16_t) * 3 + sizeof(struct vring_used_elem) * num;
146*433d6423SLionel Sambuc }
147*433d6423SLionel Sambuc 
148*433d6423SLionel Sambuc #if 0
149*433d6423SLionel Sambuc /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
150*433d6423SLionel Sambuc /* Assuming a given event_idx value from the other size, if
151*433d6423SLionel Sambuc  * we have just incremented index from old to new_idx,
152*433d6423SLionel Sambuc  * should we trigger an event? */
153*433d6423SLionel Sambuc static inline int vring_need_event(u16_t event_idx, u16_t new_idx, u16_t old)
154*433d6423SLionel Sambuc {
155*433d6423SLionel Sambuc 	/* Note: Xen has similar logic for notification hold-off
156*433d6423SLionel Sambuc 	 * in include/xen/interface/io/ring.h with req_event and req_prod
157*433d6423SLionel Sambuc 	 * corresponding to event_idx + 1 and new_idx respectively.
158*433d6423SLionel Sambuc 	 * Note also that req_event and req_prod in Xen start at 1,
159*433d6423SLionel Sambuc 	 * event indexes in virtio start at 0. */
160*433d6423SLionel Sambuc 	return (u16_t)(new_idx - event_idx - 1) < (u16_t)(new_idx - old);
161*433d6423SLionel Sambuc }
162*433d6423SLionel Sambuc 
163*433d6423SLionel Sambuc #ifdef __KERNEL__
164*433d6423SLionel Sambuc #include <linux/irqreturn.h>
165*433d6423SLionel Sambuc struct virtio_device;
166*433d6423SLionel Sambuc struct virtqueue;
167*433d6423SLionel Sambuc 
168*433d6423SLionel Sambuc struct virtqueue *vring_new_virtqueue(unsigned int num,
169*433d6423SLionel Sambuc 				      unsigned int vring_align,
170*433d6423SLionel Sambuc 				      struct virtio_device *vdev,
171*433d6423SLionel Sambuc 				      bool weak_barriers,
172*433d6423SLionel Sambuc 				      void *pages,
173*433d6423SLionel Sambuc 				      void (*notify)(struct virtqueue *vq),
174*433d6423SLionel Sambuc 				      void (*callback)(struct virtqueue *vq),
175*433d6423SLionel Sambuc 				      const char *name);
176*433d6423SLionel Sambuc void vring_del_virtqueue(struct virtqueue *vq);
177*433d6423SLionel Sambuc /* Filter out transport-specific feature bits. */
178*433d6423SLionel Sambuc void vring_transport_features(struct virtio_device *vdev);
179*433d6423SLionel Sambuc 
180*433d6423SLionel Sambuc irqreturn_t vring_interrupt(int irq, void *_vq);
181*433d6423SLionel Sambuc #endif /* __KERNEL__ */
182*433d6423SLionel Sambuc #endif /* 0 */
183*433d6423SLionel Sambuc #endif /* _LINUX_VIRTIO_RING_H */
184