xref: /illumos-gate/usr/src/uts/common/io/virtio/virtio.h (revision 64439ec0071c576648f76b4466ad6ee7a580ed33)
1f8296c60SJoshua M. Clulow /*
2f8296c60SJoshua M. Clulow  * This file and its contents are supplied under the terms of the
3f8296c60SJoshua M. Clulow  * Common Development and Distribution License ("CDDL"), version 1.0.
4f8296c60SJoshua M. Clulow  * You may only use this file in accordance with the terms of version
5f8296c60SJoshua M. Clulow  * 1.0 of the CDDL.
6f8296c60SJoshua M. Clulow  *
7f8296c60SJoshua M. Clulow  * A full copy of the text of the CDDL should have accompanied this
8f8296c60SJoshua M. Clulow  * source.  A copy of the CDDL is also available via the Internet at
9f8296c60SJoshua M. Clulow  * http://www.illumos.org/license/CDDL.
10f8296c60SJoshua M. Clulow  */
11f8296c60SJoshua M. Clulow 
12f8296c60SJoshua M. Clulow /*
13f8296c60SJoshua M. Clulow  * Copyright 2019 Joyent, Inc.
14501bc5c0SAndy Fiddaman  * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
15f8296c60SJoshua M. Clulow  */
16f8296c60SJoshua M. Clulow 
17f8296c60SJoshua M. Clulow #ifndef _VIRTIO_H
18f8296c60SJoshua M. Clulow #define	_VIRTIO_H
19f8296c60SJoshua M. Clulow 
20f8296c60SJoshua M. Clulow /*
21f8296c60SJoshua M. Clulow  * VIRTIO FRAMEWORK
22f8296c60SJoshua M. Clulow  *
23f8296c60SJoshua M. Clulow  * This framework handles the initialisation and operation common to all Virtio
24f8296c60SJoshua M. Clulow  * device types; e.g., Virtio Block (vioblk), Virtio Network (vioif), etc.  The
25f8296c60SJoshua M. Clulow  * framework presently provides for what is now described as a "legacy" driver
26f8296c60SJoshua M. Clulow  * in the current issue of the "Virtual I/O Device (VIRTIO) Version 1.1"
27f8296c60SJoshua M. Clulow  * specification.  Though several new specifications have been released, legacy
28f8296c60SJoshua M. Clulow  * devices are still the most widely available on current hypervisor platforms.
29f8296c60SJoshua M. Clulow  * Legacy devices make use of the native byte order of the host system.
30f8296c60SJoshua M. Clulow  *
31f8296c60SJoshua M. Clulow  * FRAMEWORK INITIALISATION: STARTING
32f8296c60SJoshua M. Clulow  *
33f8296c60SJoshua M. Clulow  * Client drivers will, in their attach(9E) routine, make an early call to
34f8296c60SJoshua M. Clulow  * virtio_init().  This causes the framework to allocate some base resources
35f8296c60SJoshua M. Clulow  * and begin initialising the device.  This routine confirms that the device
36f8296c60SJoshua M. Clulow  * will operate in the supported legacy mode as per the specification.  A
37f8296c60SJoshua M. Clulow  * failure here means that we cannot presently support this device.
38f8296c60SJoshua M. Clulow  *
39f8296c60SJoshua M. Clulow  * Once virtio_init() returns, the initialisation phase has begun and the
40f8296c60SJoshua M. Clulow  * driver can examine negotiated features and set up virtqueues.  The
41f8296c60SJoshua M. Clulow  * initialisation phase ends when the driver calls either
42f8296c60SJoshua M. Clulow  * virtio_init_complete() or virtio_fini().
43f8296c60SJoshua M. Clulow  *
44f8296c60SJoshua M. Clulow  * FRAMEWORK INITIALISATION: FEATURE NEGOTIATION
45f8296c60SJoshua M. Clulow  *
46f8296c60SJoshua M. Clulow  * The virtio_init() call accepts a bitmask of desired features that the driver
47f8296c60SJoshua M. Clulow  * supports.  The framework will negotiate the common set of features supported
48f8296c60SJoshua M. Clulow  * by both the driver and the device.  The presence of any individual feature
49f8296c60SJoshua M. Clulow  * can be tested after the initialisation phase has begun using
50f8296c60SJoshua M. Clulow  * virtio_feature_present().
51f8296c60SJoshua M. Clulow  *
52f8296c60SJoshua M. Clulow  * The framework will additionally negotiate some set of features that are not
53f8296c60SJoshua M. Clulow  * specific to a device type on behalf of the client driver; e.g., support for
54f8296c60SJoshua M. Clulow  * indirect descriptors.
55f8296c60SJoshua M. Clulow  *
56f8296c60SJoshua M. Clulow  * Some features allow the driver to read additional configuration values from
57f8296c60SJoshua M. Clulow  * the device-specific regions of the device register space.  These can be
58f8296c60SJoshua M. Clulow  * accessed via the virtio_dev_get*() and virtio_dev_put*() family of
59f8296c60SJoshua M. Clulow  * functions.
60f8296c60SJoshua M. Clulow  *
61f8296c60SJoshua M. Clulow  * FRAMEWORK INITIALISATION: VIRTQUEUE CONFIGURATION
62f8296c60SJoshua M. Clulow  *
63f8296c60SJoshua M. Clulow  * During the initialisation phase, the client driver may configure some number
64f8296c60SJoshua M. Clulow  * of virtqueues with virtio_queue_alloc().  Once initialisation has been
65f8296c60SJoshua M. Clulow  * completed, no further queues can be configured without destroying the
66f8296c60SJoshua M. Clulow  * framework object and beginning again from scratch.
67f8296c60SJoshua M. Clulow  *
68f8296c60SJoshua M. Clulow  * When configuring a queue, the driver must know the queue index number.  This
69f8296c60SJoshua M. Clulow  * generally comes from the section of the specification describing the
70f8296c60SJoshua M. Clulow  * specific device type; e.g., Virtio Network devices have a receive queue at
71f8296c60SJoshua M. Clulow  * index 0, and a transmit queue at index 1.  The name given to the queue is
72f8296c60SJoshua M. Clulow  * informational and has no impact on device operation.
73f8296c60SJoshua M. Clulow  *
74f8296c60SJoshua M. Clulow  * Most queues will require an interrupt handler function.  When a queue
75f8296c60SJoshua M. Clulow  * notification interrupt is received, the provided handler will be called with
76f8296c60SJoshua M. Clulow  * two arguments: first, the provided user data argument; and second, a pointer
77f8296c60SJoshua M. Clulow  * to the "virtio_t" object for this instance.
78f8296c60SJoshua M. Clulow  *
79f8296c60SJoshua M. Clulow  * A maximum segment count must be selected for each queue.  This count is the
80f8296c60SJoshua M. Clulow  * upper bound on the number of scatter-gather cookies that will be accepted,
81f8296c60SJoshua M. Clulow  * and applies to both direct and indirect descriptor based queues.  This cap
82f8296c60SJoshua M. Clulow  * is usually either negotiated with the device, or determined structurally
83f8296c60SJoshua M. Clulow  * based on the shape of the buffers required for device operation.
84f8296c60SJoshua M. Clulow  *
85501bc5c0SAndy Fiddaman  * FRAMEWORK INITIALISATION: CONFIGURATION SPACE CHANGE HANDLER
86501bc5c0SAndy Fiddaman  *
87501bc5c0SAndy Fiddaman  * During the initialisation phase, the client driver may register a handler
88501bc5c0SAndy Fiddaman  * function for receiving device configuration space change events.  Once
89501bc5c0SAndy Fiddaman  * initialisation has been completed, this cannot be changed without destroying
90501bc5c0SAndy Fiddaman  * the framework object and beginning again from scratch.
91501bc5c0SAndy Fiddaman  *
92501bc5c0SAndy Fiddaman  * When a configuration space change interrupt is received, the provided
93501bc5c0SAndy Fiddaman  * handler will be called with two arguments: first, the provided user data
94501bc5c0SAndy Fiddaman  * argument; and second, a pointer to the "virtio_t" object for this instance.
95501bc5c0SAndy Fiddaman  * The handler is called in an interrupt context.
96501bc5c0SAndy Fiddaman  *
97f8296c60SJoshua M. Clulow  * FRAMEWORK INITIALISATION: FINISHING
98f8296c60SJoshua M. Clulow  *
99f8296c60SJoshua M. Clulow  * Once queue configuration has been completed, the client driver calls
100f8296c60SJoshua M. Clulow  * virtio_init_complete() to finalise resource allocation and set the device to
101f8296c60SJoshua M. Clulow  * the running state (DRIVER_OK).  The framework will allocate any interrupts
102f8296c60SJoshua M. Clulow  * needed for queue notifications at this time.
103f8296c60SJoshua M. Clulow  *
104f8296c60SJoshua M. Clulow  * If the client driver cannot complete initialisation, the instance may
105f8296c60SJoshua M. Clulow  * instead be torn down with virtio_fini().  Signalling failure to this routine
106f8296c60SJoshua M. Clulow  * will report failure to the device instead of resetting it, which may be
107f8296c60SJoshua M. Clulow  * reported by the hypervisor as a fault.
108f8296c60SJoshua M. Clulow  *
109f8296c60SJoshua M. Clulow  * DESCRIPTOR CHAINS
110f8296c60SJoshua M. Clulow  *
111f8296c60SJoshua M. Clulow  * Most devices accept I/O requests from the driver through a least one queue.
112f8296c60SJoshua M. Clulow  * Some devices are operated by submission of synchronous requests.  The device
113f8296c60SJoshua M. Clulow  * is expected to process the request and return some kind of status; e.g., a
114f8296c60SJoshua M. Clulow  * block device accepts write requests from the file system and signals when
115f8296c60SJoshua M. Clulow  * they have completed or failed.
116f8296c60SJoshua M. Clulow  *
117f8296c60SJoshua M. Clulow  * Other devices operate by asynchronous delivery of I/O requests to the
118f8296c60SJoshua M. Clulow  * driver; e.g., a network device may receive incoming frames at any time.
119f8296c60SJoshua M. Clulow  * Inbound asynchronous delivery is usually achieved by populating a queue with
120f8296c60SJoshua M. Clulow  * a series of memory buffers where the incoming data will be written by the
121f8296c60SJoshua M. Clulow  * device at some later time.
122f8296c60SJoshua M. Clulow  *
123f8296c60SJoshua M. Clulow  * Whether for inbound or outbound transfers, buffers are inserted into the
124f8296c60SJoshua M. Clulow  * ring through chains of one or more descriptors.  Each descriptor has a
125f8296c60SJoshua M. Clulow  * transfer direction (to or from the device), and a physical address and
126f8296c60SJoshua M. Clulow  * length (i.e., a DMA cookie).  The framework automatically manages the slight
127f8296c60SJoshua M. Clulow  * differences in operation between direct and indirect descriptor usage on
128f8296c60SJoshua M. Clulow  * behalf of the client driver.
129f8296c60SJoshua M. Clulow  *
130f8296c60SJoshua M. Clulow  * A chain of descriptors is allocated by calling virtio_chain_alloc() against
131f8296c60SJoshua M. Clulow  * a particular queue.  This function accepts a kmem flag as per
132f8296c60SJoshua M. Clulow  * kmem_alloc(9F).  A client driver specific void pointer may be attached to
133f8296c60SJoshua M. Clulow  * the chain with virtio_chain_data_set() and read back later with
134f8296c60SJoshua M. Clulow  * virtio_chain_data(); e.g., after it is returned by a call to
135f8296c60SJoshua M. Clulow  * virtio_queue_poll().
136f8296c60SJoshua M. Clulow  *
137f8296c60SJoshua M. Clulow  * Cookies are added to a chain by calling virtio_chain_append() with the
138f8296c60SJoshua M. Clulow  * appropriate physical address and transfer direction.  This function may fail
139f8296c60SJoshua M. Clulow  * if the chain is already using the maximum number of cookies for this queue.
140f8296c60SJoshua M. Clulow  * Client drivers are responsible for appropriate use of virtio_dma_sync()
141f8296c60SJoshua M. Clulow  * or ddi_dma_sync(9F) on any memory appended to a descriptor chain prior to
142f8296c60SJoshua M. Clulow  * chain submission.
143f8296c60SJoshua M. Clulow  *
144f8296c60SJoshua M. Clulow  * Once fully constructed and synced, a chain can be submitted to the device by
145f8296c60SJoshua M. Clulow  * calling virtio_chain_submit().  The caller may choose to flush the queue
146f8296c60SJoshua M. Clulow  * contents to the device on each submission, or to batch notifications until
147f8296c60SJoshua M. Clulow  * later to amortise the notification cost over more requests.  If batching
148f8296c60SJoshua M. Clulow  * notifications, outstanding submissions can be flushed with a call to
149f8296c60SJoshua M. Clulow  * virtio_queue_flush().  Note that the framework will insert an appropriate
150f8296c60SJoshua M. Clulow  * memory barrier to ensure writes by the driver complete before making the
151f8296c60SJoshua M. Clulow  * submitted descriptor visible to the device.
152f8296c60SJoshua M. Clulow  *
153f8296c60SJoshua M. Clulow  * A chain may be reset for reuse with new cookies by calling
154f8296c60SJoshua M. Clulow  * virtio_chain_clear().  The chain may be freed completely by calling
155f8296c60SJoshua M. Clulow  * virtio_chain_free().
156f8296c60SJoshua M. Clulow  *
157f8296c60SJoshua M. Clulow  * When a descriptor chain is returned to the driver by the device, it may
158f8296c60SJoshua M. Clulow  * include a received data length value.  This value can be accessed via
159f8296c60SJoshua M. Clulow  * virtio_chain_received_length().  There is some suggestion in more recent
160f8296c60SJoshua M. Clulow  * Virtio specifications that, depending on the device type and the hypervisor
161f8296c60SJoshua M. Clulow  * this value may not always be accurate or useful.
162f8296c60SJoshua M. Clulow  *
163f8296c60SJoshua M. Clulow  * VIRTQUEUE OPERATION
164f8296c60SJoshua M. Clulow  *
165f8296c60SJoshua M. Clulow  * The queue size (i.e., the number of direct descriptor entries) can be
166f8296c60SJoshua M. Clulow  * found with virtio_queue_size().  This value is static over the lifetime
167f8296c60SJoshua M. Clulow  * of the queue.
168f8296c60SJoshua M. Clulow  *
169f8296c60SJoshua M. Clulow  * The number of descriptor chains presently submitted to the device and not
170f8296c60SJoshua M. Clulow  * yet returned can be obtained via virtio_queue_nactive().
171f8296c60SJoshua M. Clulow  *
172f8296c60SJoshua M. Clulow  * Over time the device will return descriptor chains to the driver in response
173f8296c60SJoshua M. Clulow  * to device activity.  Any newly returned chains may be retrieved by the
174f8296c60SJoshua M. Clulow  * driver by calling virtio_queue_poll().  See the DESCRIPTOR CHAINS section
175f8296c60SJoshua M. Clulow  * for more detail about managing descriptor chain objects.  Note that the
176f8296c60SJoshua M. Clulow  * framework will insert an appropriate memory barrier to ensure that writes by
177f8296c60SJoshua M. Clulow  * the host are complete before returning the chain to the client driver.
178f8296c60SJoshua M. Clulow  *
179f8296c60SJoshua M. Clulow  * The NO_INTERRUPT flag on a queue may be set or cleared with
180f8296c60SJoshua M. Clulow  * virtio_queue_no_interrupt().  Note that this flag is purely advisory, and
181f8296c60SJoshua M. Clulow  * may not actually stop interrupts from the device in a timely fashion.
182f8296c60SJoshua M. Clulow  *
183f8296c60SJoshua M. Clulow  * INTERRUPT MANAGEMENT
184f8296c60SJoshua M. Clulow  *
185f8296c60SJoshua M. Clulow  * A mutex used within an interrupt handler must be initialised with the
186f8296c60SJoshua M. Clulow  * correct interrupt priority.  After the initialisation phase is complete, the
187f8296c60SJoshua M. Clulow  * client should use virtio_intr_pri() to get a value suitable to pass to
188f8296c60SJoshua M. Clulow  * mutex_init(9F).
189f8296c60SJoshua M. Clulow  *
190f8296c60SJoshua M. Clulow  * When the driver is ready to receive notifications from the device, the
191f8296c60SJoshua M. Clulow  * virtio_interrupts_enable() routine may be called.  Interrupts may be
192f8296c60SJoshua M. Clulow  * disabled again by calling virtio_interrupts_disable().  Interrupt resources
193f8296c60SJoshua M. Clulow  * will be deallocated as part of a subsequent call to virtio_fini().
194f8296c60SJoshua M. Clulow  *
195f8296c60SJoshua M. Clulow  * DMA MEMORY MANAGEMENT: ALLOCATION AND FREE
196f8296c60SJoshua M. Clulow  *
197f8296c60SJoshua M. Clulow  * Client drivers may allocate memory suitable for communication with the
198f8296c60SJoshua M. Clulow  * device by using virtio_dma_alloc().  This function accepts an allocation
199f8296c60SJoshua M. Clulow  * size, a DMA attribute template, a set of DMA flags, and a kmem flag.
200f8296c60SJoshua M. Clulow  * A "virtio_dma_t" object is returned to track and manage the allocation.
201f8296c60SJoshua M. Clulow  *
202f8296c60SJoshua M. Clulow  * The DMA flags value will be a combination of direction flags (e.g.,
203f8296c60SJoshua M. Clulow  * DDI_DMA_READ or DDI_DMA_WRITE) and mapping flags (e.g., DDI_DMA_CONSISTENT
204f8296c60SJoshua M. Clulow  * or DDI_DMA_STREAMING).  The kmem flag is either KM_SLEEP or KM_NOSLEEP,
205f8296c60SJoshua M. Clulow  * as described in kmem_alloc(9F).
206f8296c60SJoshua M. Clulow  *
207f8296c60SJoshua M. Clulow  * Memory that is no longer required can be freed using virtio_dma_free().
208f8296c60SJoshua M. Clulow  *
209f8296c60SJoshua M. Clulow  * DMA MEMORY MANAGEMENT: BINDING WITHOUT ALLOCATION
210f8296c60SJoshua M. Clulow  *
211f8296c60SJoshua M. Clulow  * If another subsystem has loaned memory to your client driver, you may need
212f8296c60SJoshua M. Clulow  * to allocate and bind a handle without additional backing memory.  The
213f8296c60SJoshua M. Clulow  * virtio_dma_alloc_nomem() function can be used for this purpose, returning a
214f8296c60SJoshua M. Clulow  * "virtio_dma_t" object.
215f8296c60SJoshua M. Clulow  *
216f8296c60SJoshua M. Clulow  * Once allocated, an arbitrary kernel memory location can be bound for DMA
217f8296c60SJoshua M. Clulow  * with virtio_dma_bind().  The binding can be subsequently undone with
218f8296c60SJoshua M. Clulow  * virtio_dma_unbind(), allowing the "virtio_dma_t" object to be reused for
219f8296c60SJoshua M. Clulow  * another binding.
220f8296c60SJoshua M. Clulow  *
221f8296c60SJoshua M. Clulow  * DMA MEMORY MANAGEMENT: VIRTUAL AND PHYSICAL ADDRESSES
222f8296c60SJoshua M. Clulow  *
223f8296c60SJoshua M. Clulow  * The total size of a mapping (with or without own backing memory) can be
224f8296c60SJoshua M. Clulow  * found with virtio_dma_size().  A void pointer to a kernel virtual address
225f8296c60SJoshua M. Clulow  * within the buffer can be obtained via virtio_dma_va(); this function accepts
226f8296c60SJoshua M. Clulow  * a linear offset into the VA range and performs bounds checking.
227f8296c60SJoshua M. Clulow  *
228f8296c60SJoshua M. Clulow  * The number of physical memory addresses (DMA cookies) can be found with
229f8296c60SJoshua M. Clulow  * virtio_dma_ncookies().  The physical address and length of each cookie can
230f8296c60SJoshua M. Clulow  * be found with virtio_dma_cookie_pa() and virtio_dma_cookie_size(); these
231f8296c60SJoshua M. Clulow  * functions are keyed on the zero-indexed cookie number.
232f8296c60SJoshua M. Clulow  *
233f8296c60SJoshua M. Clulow  * DMA MEMORY MANAGEMENT: SYNCHRONISATION
234f8296c60SJoshua M. Clulow  *
235f8296c60SJoshua M. Clulow  * When passing memory to the device, or reading memory returned from the
236f8296c60SJoshua M. Clulow  * device, DMA synchronisation must be performed in case it is required by the
237f8296c60SJoshua M. Clulow  * underlying platform.  A convenience wrapper exists: virtio_dma_sync().  This
238f8296c60SJoshua M. Clulow  * routine synchronises the entire binding and accepts the same synchronisation
239f8296c60SJoshua M. Clulow  * type values as ddi_dma_sync(9F).
240f8296c60SJoshua M. Clulow  *
241f8296c60SJoshua M. Clulow  * QUIESCE
242f8296c60SJoshua M. Clulow  *
243f8296c60SJoshua M. Clulow  * As quiesce(9E) merely requires that the device come to a complete stop, most
244f8296c60SJoshua M. Clulow  * client drivers will be able to call virtio_quiesce() without additional
245f8296c60SJoshua M. Clulow  * actions.  This will reset the device, immediately halting all queue
246f8296c60SJoshua M. Clulow  * activity, and return a value suitable for returning from the client driver
247f8296c60SJoshua M. Clulow  * quiesce(9E) entrypoint.  This routine must only be called from quiesce
248f8296c60SJoshua M. Clulow  * context as it performs no synchronisation with other threads.
249f8296c60SJoshua M. Clulow  *
250f8296c60SJoshua M. Clulow  * DETACH
251f8296c60SJoshua M. Clulow  *
252f8296c60SJoshua M. Clulow  * Some devices are effectively long-polled; that is, they submit some number
253f8296c60SJoshua M. Clulow  * of descriptor chains to the device that are not returned to the driver until
254f8296c60SJoshua M. Clulow  * some asynchronous event occurs such as the receipt of an incoming packet or
255f8296c60SJoshua M. Clulow  * a device hot plug event.  When detaching the device the return of these
256f8296c60SJoshua M. Clulow  * outstanding buffers must be arranged.  Some device types may have task
257f8296c60SJoshua M. Clulow  * management commands that can force the orderly return of these chains, but
258f8296c60SJoshua M. Clulow  * the only way to do so uniformly is to reset the device and claw back the
259f8296c60SJoshua M. Clulow  * memory.
260f8296c60SJoshua M. Clulow  *
261f8296c60SJoshua M. Clulow  * If the client driver has outstanding descriptors and needs a hard stop on
262f8296c60SJoshua M. Clulow  * device activity it can call virtio_shutdown().  This routine will bring
263f8296c60SJoshua M. Clulow  * queue processing to an orderly stop and then reset the device, causing it to
264f8296c60SJoshua M. Clulow  * cease use of any DMA resources.  Once this function returns, the driver may
265f8296c60SJoshua M. Clulow  * call virtio_queue_evacuate() on each queue to retrieve any previously
266f8296c60SJoshua M. Clulow  * submitted chains.
267f8296c60SJoshua M. Clulow  *
268f8296c60SJoshua M. Clulow  * To tear down resources (e.g., interrupts and allocated memory) the client
269f8296c60SJoshua M. Clulow  * driver must finally call virtio_fini().  If virtio_shutdown() was not
270f8296c60SJoshua M. Clulow  * needed, this routine will also reset the device.
271f8296c60SJoshua M. Clulow  */
272f8296c60SJoshua M. Clulow 
273f8296c60SJoshua M. Clulow #ifdef __cplusplus
274f8296c60SJoshua M. Clulow extern "C" {
275f8296c60SJoshua M. Clulow #endif
276f8296c60SJoshua M. Clulow 
277f8296c60SJoshua M. Clulow typedef struct virtio virtio_t;
278f8296c60SJoshua M. Clulow typedef struct virtio_queue virtio_queue_t;
279f8296c60SJoshua M. Clulow typedef struct virtio_chain virtio_chain_t;
280f8296c60SJoshua M. Clulow typedef struct virtio_dma virtio_dma_t;
281f8296c60SJoshua M. Clulow 
282f8296c60SJoshua M. Clulow typedef enum virtio_direction {
283f8296c60SJoshua M. Clulow 	/*
284f8296c60SJoshua M. Clulow 	 * In the base specification, a descriptor is either set up to be
285f8296c60SJoshua M. Clulow 	 * written by the device or to be read by the device, but not both.
286f8296c60SJoshua M. Clulow 	 */
287f8296c60SJoshua M. Clulow 	VIRTIO_DIR_DEVICE_WRITES = 1,
288f8296c60SJoshua M. Clulow 	VIRTIO_DIR_DEVICE_READS
289f8296c60SJoshua M. Clulow } virtio_direction_t;
290f8296c60SJoshua M. Clulow 
291f8296c60SJoshua M. Clulow void virtio_fini(virtio_t *, boolean_t);
292f8296c60SJoshua M. Clulow virtio_t *virtio_init(dev_info_t *, uint64_t, boolean_t);
293f8296c60SJoshua M. Clulow int virtio_init_complete(virtio_t *, int);
294f8296c60SJoshua M. Clulow int virtio_quiesce(virtio_t *);
295f8296c60SJoshua M. Clulow void virtio_shutdown(virtio_t *);
296f8296c60SJoshua M. Clulow 
297501bc5c0SAndy Fiddaman void virtio_register_cfgchange_handler(virtio_t *, ddi_intr_handler_t *,
298501bc5c0SAndy Fiddaman     void *);
299501bc5c0SAndy Fiddaman 
300f8296c60SJoshua M. Clulow void *virtio_intr_pri(virtio_t *);
301f8296c60SJoshua M. Clulow 
302f8296c60SJoshua M. Clulow void virtio_device_reset(virtio_t *);
303f8296c60SJoshua M. Clulow 
304f8296c60SJoshua M. Clulow uint8_t virtio_dev_get8(virtio_t *, uintptr_t);
305f8296c60SJoshua M. Clulow uint16_t virtio_dev_get16(virtio_t *, uintptr_t);
306f8296c60SJoshua M. Clulow uint32_t virtio_dev_get32(virtio_t *, uintptr_t);
307f8296c60SJoshua M. Clulow uint64_t virtio_dev_get64(virtio_t *, uintptr_t);
308f8296c60SJoshua M. Clulow 
309f8296c60SJoshua M. Clulow void virtio_dev_put8(virtio_t *, uintptr_t, uint8_t);
310f8296c60SJoshua M. Clulow void virtio_dev_put16(virtio_t *, uintptr_t, uint16_t);
311f8296c60SJoshua M. Clulow void virtio_dev_put32(virtio_t *, uintptr_t, uint32_t);
312f8296c60SJoshua M. Clulow 
313f8296c60SJoshua M. Clulow boolean_t virtio_feature_present(virtio_t *, uint64_t);
314f8296c60SJoshua M. Clulow 
315f8296c60SJoshua M. Clulow virtio_queue_t *virtio_queue_alloc(virtio_t *, uint16_t, const char *,
316f8296c60SJoshua M. Clulow     ddi_intr_handler_t *, void *, boolean_t, uint_t);
317f8296c60SJoshua M. Clulow 
318f8296c60SJoshua M. Clulow virtio_chain_t *virtio_queue_poll(virtio_queue_t *);
319f8296c60SJoshua M. Clulow virtio_chain_t *virtio_queue_evacuate(virtio_queue_t *);
320f8296c60SJoshua M. Clulow void virtio_queue_flush(virtio_queue_t *);
321f8296c60SJoshua M. Clulow void virtio_queue_no_interrupt(virtio_queue_t *, boolean_t);
322f8296c60SJoshua M. Clulow uint_t virtio_queue_nactive(virtio_queue_t *);
323f8296c60SJoshua M. Clulow uint_t virtio_queue_size(virtio_queue_t *);
324f8296c60SJoshua M. Clulow 
325f8296c60SJoshua M. Clulow virtio_chain_t *virtio_chain_alloc(virtio_queue_t *, int);
326f8296c60SJoshua M. Clulow void virtio_chain_clear(virtio_chain_t *);
327f8296c60SJoshua M. Clulow void virtio_chain_free(virtio_chain_t *);
328f8296c60SJoshua M. Clulow int virtio_chain_append(virtio_chain_t *, uint64_t, size_t, virtio_direction_t);
329f8296c60SJoshua M. Clulow 
330f8296c60SJoshua M. Clulow void *virtio_chain_data(virtio_chain_t *);
331f8296c60SJoshua M. Clulow void virtio_chain_data_set(virtio_chain_t *, void *);
332f8296c60SJoshua M. Clulow 
333f8296c60SJoshua M. Clulow void virtio_chain_submit(virtio_chain_t *, boolean_t);
334f8296c60SJoshua M. Clulow size_t virtio_chain_received_length(virtio_chain_t *);
335f8296c60SJoshua M. Clulow 
336f8296c60SJoshua M. Clulow int virtio_interrupts_enable(virtio_t *);
337f8296c60SJoshua M. Clulow void virtio_interrupts_disable(virtio_t *);
338f8296c60SJoshua M. Clulow 
339f8296c60SJoshua M. Clulow virtio_dma_t *virtio_dma_alloc(virtio_t *, size_t, const ddi_dma_attr_t *, int,
340f8296c60SJoshua M. Clulow     int);
341f8296c60SJoshua M. Clulow virtio_dma_t *virtio_dma_alloc_nomem(virtio_t *, const ddi_dma_attr_t *, int);
342f8296c60SJoshua M. Clulow void virtio_dma_free(virtio_dma_t *);
343f8296c60SJoshua M. Clulow int virtio_dma_bind(virtio_dma_t *, void *, size_t, int, int);
344f8296c60SJoshua M. Clulow void virtio_dma_unbind(virtio_dma_t *);
345f8296c60SJoshua M. Clulow void virtio_dma_sync(virtio_dma_t *, int);
346f8296c60SJoshua M. Clulow 
347f8296c60SJoshua M. Clulow void *virtio_dma_va(virtio_dma_t *, size_t);
348f8296c60SJoshua M. Clulow size_t virtio_dma_size(virtio_dma_t *);
349f8296c60SJoshua M. Clulow uint_t virtio_dma_ncookies(virtio_dma_t *);
350f8296c60SJoshua M. Clulow uint64_t virtio_dma_cookie_pa(virtio_dma_t *, uint_t);
351f8296c60SJoshua M. Clulow size_t virtio_dma_cookie_size(virtio_dma_t *, uint_t);
352f8296c60SJoshua M. Clulow 
353*64439ec0SJoshua M. Clulow /*
354*64439ec0SJoshua M. Clulow  * virtio_init_complete() accepts a mask of allowed interrupt types using the
355*64439ec0SJoshua M. Clulow  * DDI_INTR_TYPE_* family of constants.  If no specific interrupt type is
356*64439ec0SJoshua M. Clulow  * required, pass VIRTIO_ANY_INTR_TYPE instead:
357*64439ec0SJoshua M. Clulow  */
358*64439ec0SJoshua M. Clulow #define	VIRTIO_ANY_INTR_TYPE	0
359f8296c60SJoshua M. Clulow 
360f8296c60SJoshua M. Clulow #ifdef __cplusplus
361f8296c60SJoshua M. Clulow }
362f8296c60SJoshua M. Clulow #endif
363f8296c60SJoshua M. Clulow 
364f8296c60SJoshua M. Clulow #endif /* _VIRTIO_H */
365