1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #ifndef _VIRTIO_RING_H_ 6 #define _VIRTIO_RING_H_ 7 8 #include <stdint.h> 9 10 #include <rte_common.h> 11 12 /* This marks a buffer as continuing via the next field. */ 13 #define VRING_DESC_F_NEXT 1 14 /* This marks a buffer as write-only (otherwise read-only). */ 15 #define VRING_DESC_F_WRITE 2 16 /* This means the buffer contains a list of buffer descriptors. */ 17 #define VRING_DESC_F_INDIRECT 4 18 19 /* This flag means the descriptor was made available by the driver */ 20 #define VRING_PACKED_DESC_F_AVAIL (1 << 7) 21 /* This flag means the descriptor was used by the device */ 22 #define VRING_PACKED_DESC_F_USED (1 << 15) 23 24 /* Frequently used combinations */ 25 #define VRING_PACKED_DESC_F_AVAIL_USED (VRING_PACKED_DESC_F_AVAIL | \ 26 VRING_PACKED_DESC_F_USED) 27 28 /* The Host uses this in used->flags to advise the Guest: don't kick me 29 * when you add a buffer. It's unreliable, so it's simply an 30 * optimization. Guest will still kick if it's out of buffers. */ 31 #define VRING_USED_F_NO_NOTIFY 1 32 /* The Guest uses this in avail->flags to advise the Host: don't 33 * interrupt me when you consume a buffer. It's unreliable, so it's 34 * simply an optimization. */ 35 #define VRING_AVAIL_F_NO_INTERRUPT 1 36 37 /* VirtIO ring descriptors: 16 bytes. 38 * These can chain together via "next". */ 39 struct vring_desc { 40 uint64_t addr; /* Address (guest-physical). */ 41 uint32_t len; /* Length. */ 42 uint16_t flags; /* The flags as indicated above. */ 43 uint16_t next; /* We chain unused descriptors via this. */ 44 }; 45 46 struct vring_avail { 47 uint16_t flags; 48 uint16_t idx; 49 uint16_t ring[0]; 50 }; 51 52 /* id is a 16bit index. uint32_t is used here for ids for padding reasons. */ 53 struct vring_used_elem { 54 /* Index of start of used descriptor chain. */ 55 uint32_t id; 56 /* Total length of the descriptor chain which was written to. */ 57 uint32_t len; 58 }; 59 60 struct vring_used { 61 uint16_t flags; 62 volatile uint16_t idx; 63 struct vring_used_elem ring[0]; 64 }; 65 66 /* For support of packed virtqueues in Virtio 1.1 the format of descriptors 67 * looks like this. 68 */ 69 struct vring_packed_desc { 70 uint64_t addr; 71 uint32_t len; 72 uint16_t id; 73 uint16_t flags; 74 }; 75 76 #define RING_EVENT_FLAGS_ENABLE 0x0 77 #define RING_EVENT_FLAGS_DISABLE 0x1 78 #define RING_EVENT_FLAGS_DESC 0x2 79 struct vring_packed_desc_event { 80 uint16_t desc_event_off_wrap; 81 uint16_t desc_event_flags; 82 }; 83 84 struct vring_packed { 85 unsigned int num; 86 struct vring_packed_desc *desc; 87 struct vring_packed_desc_event *driver; 88 struct vring_packed_desc_event *device; 89 }; 90 91 struct vring { 92 unsigned int num; 93 struct vring_desc *desc; 94 struct vring_avail *avail; 95 struct vring_used *used; 96 }; 97 98 /* The standard layout for the ring is a continuous chunk of memory which 99 * looks like this. We assume num is a power of 2. 100 * 101 * struct vring { 102 * // The actual descriptors (16 bytes each) 103 * struct vring_desc desc[num]; 104 * 105 * // A ring of available descriptor heads with free-running index. 106 * __u16 avail_flags; 107 * __u16 avail_idx; 108 * __u16 available[num]; 109 * __u16 used_event_idx; 110 * 111 * // Padding to the next align boundary. 112 * char pad[]; 113 * 114 * // A ring of used descriptor heads with free-running index. 115 * __u16 used_flags; 116 * __u16 used_idx; 117 * struct vring_used_elem used[num]; 118 * __u16 avail_event_idx; 119 * }; 120 * 121 * NOTE: for VirtIO PCI, align is 4096. 122 */ 123 124 /* 125 * We publish the used event index at the end of the available ring, and vice 126 * versa. They are at the end for backwards compatibility. 127 */ 128 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) 129 #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) 130 131 static inline size_t 132 vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align) 133 { 134 size_t size; 135 136 if (vtpci_packed_queue(hw)) { 137 size = num * sizeof(struct vring_packed_desc); 138 size += sizeof(struct vring_packed_desc_event); 139 size = RTE_ALIGN_CEIL(size, align); 140 size += sizeof(struct vring_packed_desc_event); 141 return size; 142 } 143 144 size = num * sizeof(struct vring_desc); 145 size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); 146 size = RTE_ALIGN_CEIL(size, align); 147 size += sizeof(struct vring_used) + 148 (num * sizeof(struct vring_used_elem)); 149 return size; 150 } 151 static inline void 152 vring_init_split(struct vring *vr, uint8_t *p, unsigned long align, 153 unsigned int num) 154 { 155 vr->num = num; 156 vr->desc = (struct vring_desc *) p; 157 vr->avail = (struct vring_avail *) (p + 158 num * sizeof(struct vring_desc)); 159 vr->used = (void *) 160 RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align); 161 } 162 163 static inline void 164 vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align, 165 unsigned int num) 166 { 167 vr->num = num; 168 vr->desc = (struct vring_packed_desc *)p; 169 vr->driver = (struct vring_packed_desc_event *)(p + 170 vr->num * sizeof(struct vring_packed_desc)); 171 vr->device = (struct vring_packed_desc_event *) 172 RTE_ALIGN_CEIL(((uintptr_t)vr->driver + 173 sizeof(struct vring_packed_desc_event)), align); 174 } 175 176 /* 177 * The following is used with VIRTIO_RING_F_EVENT_IDX. 178 * Assuming a given event_idx value from the other size, if we have 179 * just incremented index from old to new_idx, should we trigger an 180 * event? 181 */ 182 static inline int 183 vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) 184 { 185 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); 186 } 187 188 #endif /* _VIRTIO_RING_H_ */ 189