1*64439ec0SJoshua M. Clulow /*
2*64439ec0SJoshua M. Clulow * CDDL HEADER START
3*64439ec0SJoshua M. Clulow *
4*64439ec0SJoshua M. Clulow * The contents of this file are subject to the terms of the
5*64439ec0SJoshua M. Clulow * Common Development and Distribution License (the "License").
6*64439ec0SJoshua M. Clulow * You may not use this file except in compliance with the License.
7*64439ec0SJoshua M. Clulow *
8*64439ec0SJoshua M. Clulow * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*64439ec0SJoshua M. Clulow * or http://www.opensolaris.org/os/licensing.
10*64439ec0SJoshua M. Clulow * See the License for the specific language governing permissions
11*64439ec0SJoshua M. Clulow * and limitations under the License.
12*64439ec0SJoshua M. Clulow *
13*64439ec0SJoshua M. Clulow * When distributing Covered Code, include this CDDL HEADER in each
14*64439ec0SJoshua M. Clulow * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*64439ec0SJoshua M. Clulow * If applicable, add the following below this CDDL HEADER, with the
16*64439ec0SJoshua M. Clulow * fields enclosed by brackets "[]" replaced with your own identifying
17*64439ec0SJoshua M. Clulow * information: Portions Copyright [yyyy] [name of copyright owner]
18*64439ec0SJoshua M. Clulow *
19*64439ec0SJoshua M. Clulow * CDDL HEADER END
20*64439ec0SJoshua M. Clulow */
21*64439ec0SJoshua M. Clulow
22*64439ec0SJoshua M. Clulow /*
23*64439ec0SJoshua M. Clulow * Copyright 2022 Oxide Computer Company
24*64439ec0SJoshua M. Clulow */
25*64439ec0SJoshua M. Clulow
26*64439ec0SJoshua M. Clulow /*
27*64439ec0SJoshua M. Clulow * VIRTIO 9P DRIVER
28*64439ec0SJoshua M. Clulow *
29*64439ec0SJoshua M. Clulow * This driver provides support for Virtio 9P devices. Each driver instance
30*64439ec0SJoshua M. Clulow * attaches to a single underlying 9P channel. A 9P file system will use LDI
31*64439ec0SJoshua M. Clulow * to open this device.
32*64439ec0SJoshua M. Clulow */
33*64439ec0SJoshua M. Clulow
34*64439ec0SJoshua M. Clulow #include <sys/modctl.h>
35*64439ec0SJoshua M. Clulow #include <sys/types.h>
36*64439ec0SJoshua M. Clulow #include <sys/file.h>
37*64439ec0SJoshua M. Clulow #include <sys/errno.h>
38*64439ec0SJoshua M. Clulow #include <sys/param.h>
39*64439ec0SJoshua M. Clulow #include <sys/stropts.h>
40*64439ec0SJoshua M. Clulow #include <sys/stream.h>
41*64439ec0SJoshua M. Clulow #include <sys/strsubr.h>
42*64439ec0SJoshua M. Clulow #include <sys/kmem.h>
43*64439ec0SJoshua M. Clulow #include <sys/ddi.h>
44*64439ec0SJoshua M. Clulow #include <sys/sunddi.h>
45*64439ec0SJoshua M. Clulow #include <sys/conf.h>
46*64439ec0SJoshua M. Clulow #include <sys/devops.h>
47*64439ec0SJoshua M. Clulow #include <sys/ksynch.h>
48*64439ec0SJoshua M. Clulow #include <sys/stat.h>
49*64439ec0SJoshua M. Clulow #include <sys/modctl.h>
50*64439ec0SJoshua M. Clulow #include <sys/debug.h>
51*64439ec0SJoshua M. Clulow #include <sys/pci.h>
52*64439ec0SJoshua M. Clulow #include <sys/containerof.h>
53*64439ec0SJoshua M. Clulow #include <sys/ctype.h>
54*64439ec0SJoshua M. Clulow #include <sys/stdbool.h>
55*64439ec0SJoshua M. Clulow #include <sys/sysmacros.h>
56*64439ec0SJoshua M. Clulow #include <sys/list.h>
57*64439ec0SJoshua M. Clulow
58*64439ec0SJoshua M. Clulow #include "virtio.h"
59*64439ec0SJoshua M. Clulow #include "vio9p_impl.h"
60*64439ec0SJoshua M. Clulow
61*64439ec0SJoshua M. Clulow static void *vio9p_state;
62*64439ec0SJoshua M. Clulow
63*64439ec0SJoshua M. Clulow uint_t vio9p_int_handler(caddr_t, caddr_t);
64*64439ec0SJoshua M. Clulow static uint_t vio9p_poll(vio9p_t *);
65*64439ec0SJoshua M. Clulow static int vio9p_quiesce(dev_info_t *);
66*64439ec0SJoshua M. Clulow static int vio9p_attach(dev_info_t *, ddi_attach_cmd_t);
67*64439ec0SJoshua M. Clulow static int vio9p_teardown(vio9p_t *, vio9p_teardown_style_t);
68*64439ec0SJoshua M. Clulow static int vio9p_detach(dev_info_t *, ddi_detach_cmd_t);
69*64439ec0SJoshua M. Clulow static int vio9p_open(dev_t *, int, int, cred_t *);
70*64439ec0SJoshua M. Clulow static int vio9p_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
71*64439ec0SJoshua M. Clulow static int vio9p_close(dev_t, int, int, cred_t *);
72*64439ec0SJoshua M. Clulow static int vio9p_read(dev_t, uio_t *, cred_t *);
73*64439ec0SJoshua M. Clulow static int vio9p_write(dev_t, uio_t *, cred_t *);
74*64439ec0SJoshua M. Clulow static vio9p_req_t *vio9p_req_alloc_impl(vio9p_t *, int);
75*64439ec0SJoshua M. Clulow static void vio9p_req_free_impl(vio9p_t *, vio9p_req_t *);
76*64439ec0SJoshua M. Clulow
77*64439ec0SJoshua M. Clulow static struct cb_ops vio9p_cb_ops = {
78*64439ec0SJoshua M. Clulow .cb_rev = CB_REV,
79*64439ec0SJoshua M. Clulow .cb_flag = D_NEW | D_MP,
80*64439ec0SJoshua M. Clulow
81*64439ec0SJoshua M. Clulow .cb_open = vio9p_open,
82*64439ec0SJoshua M. Clulow .cb_close = vio9p_close,
83*64439ec0SJoshua M. Clulow .cb_read = vio9p_read,
84*64439ec0SJoshua M. Clulow .cb_write = vio9p_write,
85*64439ec0SJoshua M. Clulow .cb_ioctl = vio9p_ioctl,
86*64439ec0SJoshua M. Clulow
87*64439ec0SJoshua M. Clulow .cb_strategy = nodev,
88*64439ec0SJoshua M. Clulow .cb_print = nodev,
89*64439ec0SJoshua M. Clulow .cb_dump = nodev,
90*64439ec0SJoshua M. Clulow .cb_devmap = nodev,
91*64439ec0SJoshua M. Clulow .cb_mmap = nodev,
92*64439ec0SJoshua M. Clulow .cb_segmap = nodev,
93*64439ec0SJoshua M. Clulow .cb_chpoll = nochpoll,
94*64439ec0SJoshua M. Clulow .cb_prop_op = ddi_prop_op,
95*64439ec0SJoshua M. Clulow .cb_str = NULL,
96*64439ec0SJoshua M. Clulow .cb_aread = nodev,
97*64439ec0SJoshua M. Clulow .cb_awrite = nodev,
98*64439ec0SJoshua M. Clulow };
99*64439ec0SJoshua M. Clulow
100*64439ec0SJoshua M. Clulow static struct dev_ops vio9p_dev_ops = {
101*64439ec0SJoshua M. Clulow .devo_rev = DEVO_REV,
102*64439ec0SJoshua M. Clulow .devo_refcnt = 0,
103*64439ec0SJoshua M. Clulow
104*64439ec0SJoshua M. Clulow .devo_attach = vio9p_attach,
105*64439ec0SJoshua M. Clulow .devo_detach = vio9p_detach,
106*64439ec0SJoshua M. Clulow .devo_quiesce = vio9p_quiesce,
107*64439ec0SJoshua M. Clulow
108*64439ec0SJoshua M. Clulow .devo_cb_ops = &vio9p_cb_ops,
109*64439ec0SJoshua M. Clulow
110*64439ec0SJoshua M. Clulow .devo_getinfo = ddi_no_info,
111*64439ec0SJoshua M. Clulow .devo_identify = nulldev,
112*64439ec0SJoshua M. Clulow .devo_probe = nulldev,
113*64439ec0SJoshua M. Clulow .devo_reset = nodev,
114*64439ec0SJoshua M. Clulow .devo_bus_ops = NULL,
115*64439ec0SJoshua M. Clulow .devo_power = NULL,
116*64439ec0SJoshua M. Clulow };
117*64439ec0SJoshua M. Clulow
118*64439ec0SJoshua M. Clulow static struct modldrv vio9p_modldrv = {
119*64439ec0SJoshua M. Clulow .drv_modops = &mod_driverops,
120*64439ec0SJoshua M. Clulow .drv_linkinfo = "VIRTIO 9P driver",
121*64439ec0SJoshua M. Clulow .drv_dev_ops = &vio9p_dev_ops
122*64439ec0SJoshua M. Clulow };
123*64439ec0SJoshua M. Clulow
124*64439ec0SJoshua M. Clulow static struct modlinkage vio9p_modlinkage = {
125*64439ec0SJoshua M. Clulow .ml_rev = MODREV_1,
126*64439ec0SJoshua M. Clulow .ml_linkage = { &vio9p_modldrv, NULL }
127*64439ec0SJoshua M. Clulow };
128*64439ec0SJoshua M. Clulow
129*64439ec0SJoshua M. Clulow /*
130*64439ec0SJoshua M. Clulow * DMA attribute template for header and status blocks.
131*64439ec0SJoshua M. Clulow */
132*64439ec0SJoshua M. Clulow static const ddi_dma_attr_t vio9p_dma_attr = {
133*64439ec0SJoshua M. Clulow .dma_attr_version = DMA_ATTR_V0,
134*64439ec0SJoshua M. Clulow .dma_attr_addr_lo = 0x0000000000000000,
135*64439ec0SJoshua M. Clulow .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFF,
136*64439ec0SJoshua M. Clulow .dma_attr_count_max = 0x00000000FFFFFFFF,
137*64439ec0SJoshua M. Clulow .dma_attr_align = 1,
138*64439ec0SJoshua M. Clulow .dma_attr_burstsizes = 1,
139*64439ec0SJoshua M. Clulow .dma_attr_minxfer = 1,
140*64439ec0SJoshua M. Clulow .dma_attr_maxxfer = 0x00000000FFFFFFFF,
141*64439ec0SJoshua M. Clulow .dma_attr_seg = 0x00000000FFFFFFFF,
142*64439ec0SJoshua M. Clulow .dma_attr_sgllen = VIRTIO_9P_MAX_SGL,
143*64439ec0SJoshua M. Clulow .dma_attr_granular = 1,
144*64439ec0SJoshua M. Clulow .dma_attr_flags = 0
145*64439ec0SJoshua M. Clulow };
146*64439ec0SJoshua M. Clulow
147*64439ec0SJoshua M. Clulow uint_t
vio9p_int_handler(caddr_t arg0,caddr_t arg1)148*64439ec0SJoshua M. Clulow vio9p_int_handler(caddr_t arg0, caddr_t arg1)
149*64439ec0SJoshua M. Clulow {
150*64439ec0SJoshua M. Clulow vio9p_t *vin = (vio9p_t *)arg0;
151*64439ec0SJoshua M. Clulow
152*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
153*64439ec0SJoshua M. Clulow uint_t count = vio9p_poll(vin);
154*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
155*64439ec0SJoshua M. Clulow
156*64439ec0SJoshua M. Clulow return (count > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
157*64439ec0SJoshua M. Clulow }
158*64439ec0SJoshua M. Clulow
159*64439ec0SJoshua M. Clulow static void
vio9p_req_freelist_put(vio9p_t * vin,vio9p_req_t * vnr)160*64439ec0SJoshua M. Clulow vio9p_req_freelist_put(vio9p_t *vin, vio9p_req_t *vnr)
161*64439ec0SJoshua M. Clulow {
162*64439ec0SJoshua M. Clulow VERIFY(!list_link_active(&vnr->vnr_link_complete));
163*64439ec0SJoshua M. Clulow VERIFY(!list_link_active(&vnr->vnr_link_free));
164*64439ec0SJoshua M. Clulow
165*64439ec0SJoshua M. Clulow vin->vin_generation = 0;
166*64439ec0SJoshua M. Clulow list_insert_head(&vin->vin_req_freelist, vnr);
167*64439ec0SJoshua M. Clulow
168*64439ec0SJoshua M. Clulow if (vin->vin_open) {
169*64439ec0SJoshua M. Clulow /*
170*64439ec0SJoshua M. Clulow * Wake any callers waiting in vio9p_req_alloc() for an entry:
171*64439ec0SJoshua M. Clulow */
172*64439ec0SJoshua M. Clulow cv_broadcast(&vin->vin_cv);
173*64439ec0SJoshua M. Clulow }
174*64439ec0SJoshua M. Clulow }
175*64439ec0SJoshua M. Clulow
176*64439ec0SJoshua M. Clulow static void
vio9p_req_free(vio9p_t * vin,vio9p_req_t * vnr)177*64439ec0SJoshua M. Clulow vio9p_req_free(vio9p_t *vin, vio9p_req_t *vnr)
178*64439ec0SJoshua M. Clulow {
179*64439ec0SJoshua M. Clulow VERIFY(MUTEX_HELD(&vin->vin_mutex));
180*64439ec0SJoshua M. Clulow
181*64439ec0SJoshua M. Clulow if (list_link_active(&vnr->vnr_link_complete)) {
182*64439ec0SJoshua M. Clulow list_remove(&vin->vin_completes, vnr);
183*64439ec0SJoshua M. Clulow }
184*64439ec0SJoshua M. Clulow
185*64439ec0SJoshua M. Clulow vio9p_req_freelist_put(vin, vnr);
186*64439ec0SJoshua M. Clulow }
187*64439ec0SJoshua M. Clulow
188*64439ec0SJoshua M. Clulow static void
vio9p_req_free_impl(vio9p_t * vin,vio9p_req_t * vnr)189*64439ec0SJoshua M. Clulow vio9p_req_free_impl(vio9p_t *vin, vio9p_req_t *vnr)
190*64439ec0SJoshua M. Clulow {
191*64439ec0SJoshua M. Clulow if (vnr->vnr_chain != NULL) {
192*64439ec0SJoshua M. Clulow virtio_chain_free(vnr->vnr_chain);
193*64439ec0SJoshua M. Clulow vnr->vnr_chain = NULL;
194*64439ec0SJoshua M. Clulow }
195*64439ec0SJoshua M. Clulow if (vnr->vnr_dma_in != NULL) {
196*64439ec0SJoshua M. Clulow virtio_dma_free(vnr->vnr_dma_in);
197*64439ec0SJoshua M. Clulow vnr->vnr_dma_in = NULL;
198*64439ec0SJoshua M. Clulow }
199*64439ec0SJoshua M. Clulow if (vnr->vnr_dma_out != NULL) {
200*64439ec0SJoshua M. Clulow virtio_dma_free(vnr->vnr_dma_out);
201*64439ec0SJoshua M. Clulow vnr->vnr_dma_out = NULL;
202*64439ec0SJoshua M. Clulow }
203*64439ec0SJoshua M. Clulow
204*64439ec0SJoshua M. Clulow VERIFY(!list_link_active(&vnr->vnr_link_complete));
205*64439ec0SJoshua M. Clulow VERIFY(!list_link_active(&vnr->vnr_link_free));
206*64439ec0SJoshua M. Clulow
207*64439ec0SJoshua M. Clulow list_remove(&vin->vin_reqs, vnr);
208*64439ec0SJoshua M. Clulow VERIFY3U(vin->vin_nreqs, >, 0);
209*64439ec0SJoshua M. Clulow vin->vin_nreqs--;
210*64439ec0SJoshua M. Clulow
211*64439ec0SJoshua M. Clulow kmem_free(vnr, sizeof (*vnr));
212*64439ec0SJoshua M. Clulow }
213*64439ec0SJoshua M. Clulow
214*64439ec0SJoshua M. Clulow /*
215*64439ec0SJoshua M. Clulow * Allocate a request for a transaction. If one is not available and this is
216*64439ec0SJoshua M. Clulow * for a blocking request, wait for one to become available.
217*64439ec0SJoshua M. Clulow */
218*64439ec0SJoshua M. Clulow static vio9p_req_t *
vio9p_req_alloc(vio9p_t * vin,bool wait)219*64439ec0SJoshua M. Clulow vio9p_req_alloc(vio9p_t *vin, bool wait)
220*64439ec0SJoshua M. Clulow {
221*64439ec0SJoshua M. Clulow vio9p_req_t *vnr;
222*64439ec0SJoshua M. Clulow
223*64439ec0SJoshua M. Clulow VERIFY(MUTEX_HELD(&vin->vin_mutex));
224*64439ec0SJoshua M. Clulow
225*64439ec0SJoshua M. Clulow again:
226*64439ec0SJoshua M. Clulow /*
227*64439ec0SJoshua M. Clulow * Try the free list first:
228*64439ec0SJoshua M. Clulow */
229*64439ec0SJoshua M. Clulow if ((vnr = list_remove_head(&vin->vin_req_freelist)) != NULL) {
230*64439ec0SJoshua M. Clulow return (vnr);
231*64439ec0SJoshua M. Clulow }
232*64439ec0SJoshua M. Clulow
233*64439ec0SJoshua M. Clulow /*
234*64439ec0SJoshua M. Clulow * Failing that, try to allocate more memory if we are under our
235*64439ec0SJoshua M. Clulow * request cap:
236*64439ec0SJoshua M. Clulow */
237*64439ec0SJoshua M. Clulow if ((vnr = vio9p_req_alloc_impl(vin, KM_NOSLEEP_LAZY)) != NULL) {
238*64439ec0SJoshua M. Clulow return (vnr);
239*64439ec0SJoshua M. Clulow }
240*64439ec0SJoshua M. Clulow
241*64439ec0SJoshua M. Clulow /*
242*64439ec0SJoshua M. Clulow * If this is a blocking request, wait for an entry to become available
243*64439ec0SJoshua M. Clulow * on the free list:
244*64439ec0SJoshua M. Clulow */
245*64439ec0SJoshua M. Clulow if (wait) {
246*64439ec0SJoshua M. Clulow if (cv_wait_sig(&vin->vin_cv, &vin->vin_mutex) == 0) {
247*64439ec0SJoshua M. Clulow return (NULL);
248*64439ec0SJoshua M. Clulow }
249*64439ec0SJoshua M. Clulow
250*64439ec0SJoshua M. Clulow goto again;
251*64439ec0SJoshua M. Clulow }
252*64439ec0SJoshua M. Clulow
253*64439ec0SJoshua M. Clulow return (NULL);
254*64439ec0SJoshua M. Clulow }
255*64439ec0SJoshua M. Clulow
256*64439ec0SJoshua M. Clulow static vio9p_req_t *
vio9p_req_alloc_impl(vio9p_t * vin,int kmflag)257*64439ec0SJoshua M. Clulow vio9p_req_alloc_impl(vio9p_t *vin, int kmflag)
258*64439ec0SJoshua M. Clulow {
259*64439ec0SJoshua M. Clulow dev_info_t *dip = vin->vin_dip;
260*64439ec0SJoshua M. Clulow vio9p_req_t *vnr;
261*64439ec0SJoshua M. Clulow
262*64439ec0SJoshua M. Clulow if (vin->vin_nreqs >= VIRTIO_9P_MAX_REQS) {
263*64439ec0SJoshua M. Clulow /*
264*64439ec0SJoshua M. Clulow * We have reached the limit of requests that we are willing to
265*64439ec0SJoshua M. Clulow * allocate for the whole device.
266*64439ec0SJoshua M. Clulow */
267*64439ec0SJoshua M. Clulow return (NULL);
268*64439ec0SJoshua M. Clulow }
269*64439ec0SJoshua M. Clulow
270*64439ec0SJoshua M. Clulow /*
271*64439ec0SJoshua M. Clulow * Note that the request object has various list link fields which are
272*64439ec0SJoshua M. Clulow * initialised to zero here and which we check at various points later.
273*64439ec0SJoshua M. Clulow */
274*64439ec0SJoshua M. Clulow if ((vnr = kmem_zalloc(sizeof (*vnr), kmflag)) == NULL) {
275*64439ec0SJoshua M. Clulow return (NULL);
276*64439ec0SJoshua M. Clulow }
277*64439ec0SJoshua M. Clulow list_insert_tail(&vin->vin_reqs, vnr);
278*64439ec0SJoshua M. Clulow vin->vin_nreqs++;
279*64439ec0SJoshua M. Clulow
280*64439ec0SJoshua M. Clulow if ((vnr->vnr_chain = virtio_chain_alloc(vin->vin_vq, kmflag)) ==
281*64439ec0SJoshua M. Clulow NULL) {
282*64439ec0SJoshua M. Clulow dev_err(vin->vin_dip, CE_WARN, "!chain alloc failure");
283*64439ec0SJoshua M. Clulow goto fail;
284*64439ec0SJoshua M. Clulow }
285*64439ec0SJoshua M. Clulow virtio_chain_data_set(vnr->vnr_chain, vnr);
286*64439ec0SJoshua M. Clulow
287*64439ec0SJoshua M. Clulow /*
288*64439ec0SJoshua M. Clulow * Allocate outbound request buffer:
289*64439ec0SJoshua M. Clulow */
290*64439ec0SJoshua M. Clulow if ((vnr->vnr_dma_out = virtio_dma_alloc(vin->vin_virtio,
291*64439ec0SJoshua M. Clulow VIRTIO_9P_REQ_SIZE, &vio9p_dma_attr,
292*64439ec0SJoshua M. Clulow DDI_DMA_CONSISTENT | DDI_DMA_WRITE, kmflag)) == NULL) {
293*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "!DMA out alloc failure");
294*64439ec0SJoshua M. Clulow goto fail;
295*64439ec0SJoshua M. Clulow }
296*64439ec0SJoshua M. Clulow VERIFY3U(virtio_dma_ncookies(vnr->vnr_dma_out), <=, VIRTIO_9P_MAX_SGL);
297*64439ec0SJoshua M. Clulow
298*64439ec0SJoshua M. Clulow for (uint_t n = 0; n < virtio_dma_ncookies(vnr->vnr_dma_out); n++) {
299*64439ec0SJoshua M. Clulow if (virtio_chain_append(vnr->vnr_chain,
300*64439ec0SJoshua M. Clulow virtio_dma_cookie_pa(vnr->vnr_dma_out, n),
301*64439ec0SJoshua M. Clulow virtio_dma_cookie_size(vnr->vnr_dma_out, n),
302*64439ec0SJoshua M. Clulow VIRTIO_DIR_DEVICE_READS) != DDI_SUCCESS) {
303*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "!chain append out failure");
304*64439ec0SJoshua M. Clulow goto fail;
305*64439ec0SJoshua M. Clulow }
306*64439ec0SJoshua M. Clulow }
307*64439ec0SJoshua M. Clulow
308*64439ec0SJoshua M. Clulow /*
309*64439ec0SJoshua M. Clulow * Allocate inbound request buffer:
310*64439ec0SJoshua M. Clulow */
311*64439ec0SJoshua M. Clulow if ((vnr->vnr_dma_in = virtio_dma_alloc(vin->vin_virtio,
312*64439ec0SJoshua M. Clulow VIRTIO_9P_REQ_SIZE, &vio9p_dma_attr,
313*64439ec0SJoshua M. Clulow DDI_DMA_CONSISTENT | DDI_DMA_READ, kmflag)) == NULL) {
314*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "!DMA in alloc failure");
315*64439ec0SJoshua M. Clulow goto fail;
316*64439ec0SJoshua M. Clulow }
317*64439ec0SJoshua M. Clulow VERIFY3U(virtio_dma_ncookies(vnr->vnr_dma_in), <=, VIRTIO_9P_MAX_SGL);
318*64439ec0SJoshua M. Clulow
319*64439ec0SJoshua M. Clulow for (uint_t n = 0; n < virtio_dma_ncookies(vnr->vnr_dma_in); n++) {
320*64439ec0SJoshua M. Clulow if (virtio_chain_append(vnr->vnr_chain,
321*64439ec0SJoshua M. Clulow virtio_dma_cookie_pa(vnr->vnr_dma_in, n),
322*64439ec0SJoshua M. Clulow virtio_dma_cookie_size(vnr->vnr_dma_in, n),
323*64439ec0SJoshua M. Clulow VIRTIO_DIR_DEVICE_WRITES) != DDI_SUCCESS) {
324*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "!chain append in failure");
325*64439ec0SJoshua M. Clulow goto fail;
326*64439ec0SJoshua M. Clulow }
327*64439ec0SJoshua M. Clulow }
328*64439ec0SJoshua M. Clulow
329*64439ec0SJoshua M. Clulow return (vnr);
330*64439ec0SJoshua M. Clulow
331*64439ec0SJoshua M. Clulow fail:
332*64439ec0SJoshua M. Clulow vio9p_req_free_impl(vin, vnr);
333*64439ec0SJoshua M. Clulow return (NULL);
334*64439ec0SJoshua M. Clulow }
335*64439ec0SJoshua M. Clulow
336*64439ec0SJoshua M. Clulow static uint_t
vio9p_poll(vio9p_t * vin)337*64439ec0SJoshua M. Clulow vio9p_poll(vio9p_t *vin)
338*64439ec0SJoshua M. Clulow {
339*64439ec0SJoshua M. Clulow virtio_chain_t *vic;
340*64439ec0SJoshua M. Clulow uint_t count = 0;
341*64439ec0SJoshua M. Clulow bool wakeup = false;
342*64439ec0SJoshua M. Clulow
343*64439ec0SJoshua M. Clulow VERIFY(MUTEX_HELD(&vin->vin_mutex));
344*64439ec0SJoshua M. Clulow
345*64439ec0SJoshua M. Clulow while ((vic = virtio_queue_poll(vin->vin_vq)) != NULL) {
346*64439ec0SJoshua M. Clulow vio9p_req_t *vnr = virtio_chain_data(vic);
347*64439ec0SJoshua M. Clulow
348*64439ec0SJoshua M. Clulow count++;
349*64439ec0SJoshua M. Clulow
350*64439ec0SJoshua M. Clulow virtio_dma_sync(vnr->vnr_dma_in, DDI_DMA_SYNC_FORCPU);
351*64439ec0SJoshua M. Clulow
352*64439ec0SJoshua M. Clulow if (!vin->vin_open ||
353*64439ec0SJoshua M. Clulow vnr->vnr_generation != vin->vin_generation) {
354*64439ec0SJoshua M. Clulow /*
355*64439ec0SJoshua M. Clulow * Either the device is not open, or the device has
356*64439ec0SJoshua M. Clulow * been closed and opened again since this request was
357*64439ec0SJoshua M. Clulow * submitted. Just free the memory and drive on.
358*64439ec0SJoshua M. Clulow */
359*64439ec0SJoshua M. Clulow vio9p_req_free(vin, vnr);
360*64439ec0SJoshua M. Clulow continue;
361*64439ec0SJoshua M. Clulow }
362*64439ec0SJoshua M. Clulow
363*64439ec0SJoshua M. Clulow list_insert_tail(&vin->vin_completes, vnr);
364*64439ec0SJoshua M. Clulow wakeup = true;
365*64439ec0SJoshua M. Clulow }
366*64439ec0SJoshua M. Clulow
367*64439ec0SJoshua M. Clulow if (wakeup) {
368*64439ec0SJoshua M. Clulow cv_broadcast(&vin->vin_cv);
369*64439ec0SJoshua M. Clulow }
370*64439ec0SJoshua M. Clulow
371*64439ec0SJoshua M. Clulow return (count);
372*64439ec0SJoshua M. Clulow }
373*64439ec0SJoshua M. Clulow
374*64439ec0SJoshua M. Clulow static int
vio9p_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)375*64439ec0SJoshua M. Clulow vio9p_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
376*64439ec0SJoshua M. Clulow {
377*64439ec0SJoshua M. Clulow int instance = ddi_get_instance(dip);
378*64439ec0SJoshua M. Clulow virtio_t *vio;
379*64439ec0SJoshua M. Clulow vio9p_req_t *vnr;
380*64439ec0SJoshua M. Clulow
381*64439ec0SJoshua M. Clulow if (cmd != DDI_ATTACH) {
382*64439ec0SJoshua M. Clulow return (DDI_FAILURE);
383*64439ec0SJoshua M. Clulow }
384*64439ec0SJoshua M. Clulow
385*64439ec0SJoshua M. Clulow if (ddi_soft_state_zalloc(vio9p_state, instance) != DDI_SUCCESS) {
386*64439ec0SJoshua M. Clulow return (DDI_FAILURE);
387*64439ec0SJoshua M. Clulow }
388*64439ec0SJoshua M. Clulow
389*64439ec0SJoshua M. Clulow if ((vio = virtio_init(dip, VIRTIO_9P_WANTED_FEATURES, B_TRUE)) ==
390*64439ec0SJoshua M. Clulow NULL) {
391*64439ec0SJoshua M. Clulow ddi_soft_state_free(vio9p_state, instance);
392*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "failed to start Virtio init");
393*64439ec0SJoshua M. Clulow return (DDI_FAILURE);
394*64439ec0SJoshua M. Clulow }
395*64439ec0SJoshua M. Clulow
396*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_soft_state(vio9p_state, instance);
397*64439ec0SJoshua M. Clulow vin->vin_dip = dip;
398*64439ec0SJoshua M. Clulow vin->vin_virtio = vio;
399*64439ec0SJoshua M. Clulow ddi_set_driver_private(dip, vin);
400*64439ec0SJoshua M. Clulow list_create(&vin->vin_reqs, sizeof (vio9p_req_t),
401*64439ec0SJoshua M. Clulow offsetof(vio9p_req_t, vnr_link));
402*64439ec0SJoshua M. Clulow list_create(&vin->vin_completes, sizeof (vio9p_req_t),
403*64439ec0SJoshua M. Clulow offsetof(vio9p_req_t, vnr_link_complete));
404*64439ec0SJoshua M. Clulow list_create(&vin->vin_req_freelist, sizeof (vio9p_req_t),
405*64439ec0SJoshua M. Clulow offsetof(vio9p_req_t, vnr_link_free));
406*64439ec0SJoshua M. Clulow
407*64439ec0SJoshua M. Clulow if (virtio_feature_present(vio, VIRTIO_9P_F_MOUNT_TAG)) {
408*64439ec0SJoshua M. Clulow uint16_t len = virtio_dev_get16(vio, VIRTIO_9P_CONFIG_TAG_SZ);
409*64439ec0SJoshua M. Clulow if (len > VIRTIO_9P_TAGLEN) {
410*64439ec0SJoshua M. Clulow len = VIRTIO_9P_TAGLEN;
411*64439ec0SJoshua M. Clulow }
412*64439ec0SJoshua M. Clulow
413*64439ec0SJoshua M. Clulow /*
414*64439ec0SJoshua M. Clulow * This array is one byte longer than VIRTIO_9P_TAGLEN, and is
415*64439ec0SJoshua M. Clulow * thus always NUL-terminated by the use of
416*64439ec0SJoshua M. Clulow * ddi_soft_state_zalloc() above.
417*64439ec0SJoshua M. Clulow */
418*64439ec0SJoshua M. Clulow for (uint16_t n = 0; n < len; n++) {
419*64439ec0SJoshua M. Clulow vin->vin_tag[n] = virtio_dev_get8(vio,
420*64439ec0SJoshua M. Clulow VIRTIO_9P_CONFIG_TAG + n);
421*64439ec0SJoshua M. Clulow }
422*64439ec0SJoshua M. Clulow }
423*64439ec0SJoshua M. Clulow
424*64439ec0SJoshua M. Clulow /*
425*64439ec0SJoshua M. Clulow * When allocating the request queue, we include enough slots for a
426*64439ec0SJoshua M. Clulow * full set of cookies (based on our DMA attributes) in both the in and
427*64439ec0SJoshua M. Clulow * the out direction.
428*64439ec0SJoshua M. Clulow */
429*64439ec0SJoshua M. Clulow if ((vin->vin_vq = virtio_queue_alloc(vio, VIRTIO_9P_VIRTQ_REQUESTS,
430*64439ec0SJoshua M. Clulow "requests", vio9p_int_handler, vin, B_FALSE,
431*64439ec0SJoshua M. Clulow 2 * VIRTIO_9P_MAX_SGL)) == NULL) {
432*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_PRE_MUTEX));
433*64439ec0SJoshua M. Clulow }
434*64439ec0SJoshua M. Clulow
435*64439ec0SJoshua M. Clulow if (virtio_init_complete(vio, VIRTIO_ANY_INTR_TYPE) != DDI_SUCCESS) {
436*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "failed to complete Virtio init");
437*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_PRE_MUTEX));
438*64439ec0SJoshua M. Clulow }
439*64439ec0SJoshua M. Clulow
440*64439ec0SJoshua M. Clulow cv_init(&vin->vin_cv, NULL, CV_DRIVER, NULL);
441*64439ec0SJoshua M. Clulow mutex_init(&vin->vin_mutex, NULL, MUTEX_DRIVER, virtio_intr_pri(vio));
442*64439ec0SJoshua M. Clulow
443*64439ec0SJoshua M. Clulow /*
444*64439ec0SJoshua M. Clulow * Make sure the free list contains at least one request at attach time
445*64439ec0SJoshua M. Clulow * so that the device is always somewhat useable:
446*64439ec0SJoshua M. Clulow */
447*64439ec0SJoshua M. Clulow if ((vnr = vio9p_req_alloc_impl(vin, KM_SLEEP)) == NULL) {
448*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "failed to allocate first request");
449*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_ATTACH));
450*64439ec0SJoshua M. Clulow }
451*64439ec0SJoshua M. Clulow vio9p_req_freelist_put(vin, vnr);
452*64439ec0SJoshua M. Clulow
453*64439ec0SJoshua M. Clulow if (virtio_interrupts_enable(vio) != DDI_SUCCESS) {
454*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_ATTACH));
455*64439ec0SJoshua M. Clulow }
456*64439ec0SJoshua M. Clulow
457*64439ec0SJoshua M. Clulow /*
458*64439ec0SJoshua M. Clulow * Hang out a minor node so that we can be opened.
459*64439ec0SJoshua M. Clulow */
460*64439ec0SJoshua M. Clulow int minor = ddi_get_instance(dip);
461*64439ec0SJoshua M. Clulow if (ddi_create_minor_node(dip, "9p", S_IFCHR, minor, DDI_PSEUDO,
462*64439ec0SJoshua M. Clulow 0) != DDI_SUCCESS) {
463*64439ec0SJoshua M. Clulow dev_err(dip, CE_WARN, "could not create minor node");
464*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_ATTACH));
465*64439ec0SJoshua M. Clulow }
466*64439ec0SJoshua M. Clulow
467*64439ec0SJoshua M. Clulow ddi_report_dev(dip);
468*64439ec0SJoshua M. Clulow
469*64439ec0SJoshua M. Clulow return (DDI_SUCCESS);
470*64439ec0SJoshua M. Clulow }
471*64439ec0SJoshua M. Clulow
472*64439ec0SJoshua M. Clulow static int
vio9p_teardown(vio9p_t * vin,vio9p_teardown_style_t style)473*64439ec0SJoshua M. Clulow vio9p_teardown(vio9p_t *vin, vio9p_teardown_style_t style)
474*64439ec0SJoshua M. Clulow {
475*64439ec0SJoshua M. Clulow dev_info_t *dip = vin->vin_dip;
476*64439ec0SJoshua M. Clulow
477*64439ec0SJoshua M. Clulow if (style != VIRTIO_9P_TEARDOWN_PRE_MUTEX) {
478*64439ec0SJoshua M. Clulow /*
479*64439ec0SJoshua M. Clulow * Make sure we do not hold the mutex across interrupt disable.
480*64439ec0SJoshua M. Clulow */
481*64439ec0SJoshua M. Clulow VERIFY(MUTEX_NOT_HELD(&vin->vin_mutex));
482*64439ec0SJoshua M. Clulow }
483*64439ec0SJoshua M. Clulow
484*64439ec0SJoshua M. Clulow ddi_remove_minor_node(dip, NULL);
485*64439ec0SJoshua M. Clulow
486*64439ec0SJoshua M. Clulow if (vin->vin_virtio != NULL) {
487*64439ec0SJoshua M. Clulow /*
488*64439ec0SJoshua M. Clulow * Disable interrupts so that we can be sure our handler does
489*64439ec0SJoshua M. Clulow * not run again while we free things.
490*64439ec0SJoshua M. Clulow */
491*64439ec0SJoshua M. Clulow virtio_interrupts_disable(vin->vin_virtio);
492*64439ec0SJoshua M. Clulow }
493*64439ec0SJoshua M. Clulow
494*64439ec0SJoshua M. Clulow /*
495*64439ec0SJoshua M. Clulow * Empty the free list:
496*64439ec0SJoshua M. Clulow */
497*64439ec0SJoshua M. Clulow for (;;) {
498*64439ec0SJoshua M. Clulow vio9p_req_t *vnr = list_remove_head(&vin->vin_req_freelist);
499*64439ec0SJoshua M. Clulow if (vnr == NULL) {
500*64439ec0SJoshua M. Clulow break;
501*64439ec0SJoshua M. Clulow }
502*64439ec0SJoshua M. Clulow vio9p_req_free_impl(vin, vnr);
503*64439ec0SJoshua M. Clulow }
504*64439ec0SJoshua M. Clulow VERIFY(list_is_empty(&vin->vin_req_freelist));
505*64439ec0SJoshua M. Clulow list_destroy(&vin->vin_req_freelist);
506*64439ec0SJoshua M. Clulow
507*64439ec0SJoshua M. Clulow /*
508*64439ec0SJoshua M. Clulow * Any active requests should have been freed in vio9p_detach(), so
509*64439ec0SJoshua M. Clulow * there should be no other requests left at this point.
510*64439ec0SJoshua M. Clulow */
511*64439ec0SJoshua M. Clulow VERIFY0(vin->vin_nreqs);
512*64439ec0SJoshua M. Clulow VERIFY(list_is_empty(&vin->vin_reqs));
513*64439ec0SJoshua M. Clulow list_destroy(&vin->vin_reqs);
514*64439ec0SJoshua M. Clulow
515*64439ec0SJoshua M. Clulow VERIFY(list_is_empty(&vin->vin_completes));
516*64439ec0SJoshua M. Clulow list_destroy(&vin->vin_completes);
517*64439ec0SJoshua M. Clulow
518*64439ec0SJoshua M. Clulow /*
519*64439ec0SJoshua M. Clulow * Tear down the Virtio framework.
520*64439ec0SJoshua M. Clulow */
521*64439ec0SJoshua M. Clulow if (vin->vin_virtio != NULL) {
522*64439ec0SJoshua M. Clulow boolean_t failed = (style != VIRTIO_9P_TEARDOWN_DETACH);
523*64439ec0SJoshua M. Clulow virtio_fini(vin->vin_virtio, failed);
524*64439ec0SJoshua M. Clulow }
525*64439ec0SJoshua M. Clulow
526*64439ec0SJoshua M. Clulow if (style != VIRTIO_9P_TEARDOWN_PRE_MUTEX) {
527*64439ec0SJoshua M. Clulow mutex_destroy(&vin->vin_mutex);
528*64439ec0SJoshua M. Clulow cv_destroy(&vin->vin_cv);
529*64439ec0SJoshua M. Clulow }
530*64439ec0SJoshua M. Clulow
531*64439ec0SJoshua M. Clulow ddi_set_driver_private(dip, NULL);
532*64439ec0SJoshua M. Clulow ddi_soft_state_free(vio9p_state, ddi_get_instance(dip));
533*64439ec0SJoshua M. Clulow
534*64439ec0SJoshua M. Clulow return (style == VIRTIO_9P_TEARDOWN_DETACH ? DDI_SUCCESS : DDI_FAILURE);
535*64439ec0SJoshua M. Clulow }
536*64439ec0SJoshua M. Clulow
537*64439ec0SJoshua M. Clulow static int
vio9p_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)538*64439ec0SJoshua M. Clulow vio9p_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
539*64439ec0SJoshua M. Clulow {
540*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_driver_private(dip);
541*64439ec0SJoshua M. Clulow
542*64439ec0SJoshua M. Clulow if (cmd != DDI_DETACH) {
543*64439ec0SJoshua M. Clulow return (DDI_FAILURE);
544*64439ec0SJoshua M. Clulow }
545*64439ec0SJoshua M. Clulow
546*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
547*64439ec0SJoshua M. Clulow
548*64439ec0SJoshua M. Clulow /*
549*64439ec0SJoshua M. Clulow * Detach will only be called once we are no longer held open.
550*64439ec0SJoshua M. Clulow */
551*64439ec0SJoshua M. Clulow VERIFY(!vin->vin_open);
552*64439ec0SJoshua M. Clulow
553*64439ec0SJoshua M. Clulow /*
554*64439ec0SJoshua M. Clulow * If a request was submitted to the hypervisor but never completed, it
555*64439ec0SJoshua M. Clulow * may still be active even though the device has been closed.
556*64439ec0SJoshua M. Clulow */
557*64439ec0SJoshua M. Clulow bool shutdown = false;
558*64439ec0SJoshua M. Clulow for (vio9p_req_t *vnr = list_head(&vin->vin_reqs);
559*64439ec0SJoshua M. Clulow vnr != NULL; vnr = list_next(&vin->vin_reqs, vnr)) {
560*64439ec0SJoshua M. Clulow if (!list_link_active(&vnr->vnr_link_free)) {
561*64439ec0SJoshua M. Clulow /*
562*64439ec0SJoshua M. Clulow * There is at least one active request. We need to
563*64439ec0SJoshua M. Clulow * reset the device to claw back the DMA memory.
564*64439ec0SJoshua M. Clulow */
565*64439ec0SJoshua M. Clulow shutdown = true;
566*64439ec0SJoshua M. Clulow break;
567*64439ec0SJoshua M. Clulow }
568*64439ec0SJoshua M. Clulow }
569*64439ec0SJoshua M. Clulow
570*64439ec0SJoshua M. Clulow if (shutdown) {
571*64439ec0SJoshua M. Clulow virtio_chain_t *vic;
572*64439ec0SJoshua M. Clulow
573*64439ec0SJoshua M. Clulow virtio_shutdown(vin->vin_virtio);
574*64439ec0SJoshua M. Clulow while ((vic = virtio_queue_evacuate(vin->vin_vq)) != NULL) {
575*64439ec0SJoshua M. Clulow vio9p_req_t *vnr = virtio_chain_data(vic);
576*64439ec0SJoshua M. Clulow
577*64439ec0SJoshua M. Clulow virtio_dma_sync(vnr->vnr_dma_in, DDI_DMA_SYNC_FORCPU);
578*64439ec0SJoshua M. Clulow
579*64439ec0SJoshua M. Clulow vio9p_req_free_impl(vin, vnr);
580*64439ec0SJoshua M. Clulow }
581*64439ec0SJoshua M. Clulow }
582*64439ec0SJoshua M. Clulow
583*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
584*64439ec0SJoshua M. Clulow
585*64439ec0SJoshua M. Clulow return (vio9p_teardown(vin, VIRTIO_9P_TEARDOWN_DETACH));
586*64439ec0SJoshua M. Clulow }
587*64439ec0SJoshua M. Clulow
588*64439ec0SJoshua M. Clulow static int
vio9p_quiesce(dev_info_t * dip)589*64439ec0SJoshua M. Clulow vio9p_quiesce(dev_info_t *dip)
590*64439ec0SJoshua M. Clulow {
591*64439ec0SJoshua M. Clulow vio9p_t *vin;
592*64439ec0SJoshua M. Clulow
593*64439ec0SJoshua M. Clulow if ((vin = ddi_get_driver_private(dip)) == NULL) {
594*64439ec0SJoshua M. Clulow return (DDI_FAILURE);
595*64439ec0SJoshua M. Clulow }
596*64439ec0SJoshua M. Clulow
597*64439ec0SJoshua M. Clulow return (virtio_quiesce(vin->vin_virtio));
598*64439ec0SJoshua M. Clulow }
599*64439ec0SJoshua M. Clulow
600*64439ec0SJoshua M. Clulow static int
vio9p_open(dev_t * dev,int flag,int otyp,cred_t * cred)601*64439ec0SJoshua M. Clulow vio9p_open(dev_t *dev, int flag, int otyp, cred_t *cred)
602*64439ec0SJoshua M. Clulow {
603*64439ec0SJoshua M. Clulow if (otyp != OTYP_CHR) {
604*64439ec0SJoshua M. Clulow return (EINVAL);
605*64439ec0SJoshua M. Clulow }
606*64439ec0SJoshua M. Clulow
607*64439ec0SJoshua M. Clulow /*
608*64439ec0SJoshua M. Clulow * This device represents a request-response communication channel
609*64439ec0SJoshua M. Clulow * between the host and the hypervisor; as such we insist that it be
610*64439ec0SJoshua M. Clulow * opened exclusively, and for both read and write access.
611*64439ec0SJoshua M. Clulow */
612*64439ec0SJoshua M. Clulow if (!(flag & FEXCL) || !(flag & FREAD) || !(flag & FWRITE)) {
613*64439ec0SJoshua M. Clulow return (EINVAL);
614*64439ec0SJoshua M. Clulow }
615*64439ec0SJoshua M. Clulow
616*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_soft_state(vio9p_state, getminor(*dev));
617*64439ec0SJoshua M. Clulow if (vin == NULL) {
618*64439ec0SJoshua M. Clulow return (ENXIO);
619*64439ec0SJoshua M. Clulow }
620*64439ec0SJoshua M. Clulow
621*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
622*64439ec0SJoshua M. Clulow if (vin->vin_open) {
623*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
624*64439ec0SJoshua M. Clulow return (EBUSY);
625*64439ec0SJoshua M. Clulow }
626*64439ec0SJoshua M. Clulow vin->vin_open = true;
627*64439ec0SJoshua M. Clulow
628*64439ec0SJoshua M. Clulow vin->vin_generation++;
629*64439ec0SJoshua M. Clulow if (vin->vin_generation == 0) {
630*64439ec0SJoshua M. Clulow vin->vin_generation++;
631*64439ec0SJoshua M. Clulow }
632*64439ec0SJoshua M. Clulow
633*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
634*64439ec0SJoshua M. Clulow return (0);
635*64439ec0SJoshua M. Clulow }
636*64439ec0SJoshua M. Clulow
637*64439ec0SJoshua M. Clulow static int
vio9p_close(dev_t dev,int flag,int otyp,cred_t * cred)638*64439ec0SJoshua M. Clulow vio9p_close(dev_t dev, int flag, int otyp, cred_t *cred)
639*64439ec0SJoshua M. Clulow {
640*64439ec0SJoshua M. Clulow if (otyp != OTYP_CHR) {
641*64439ec0SJoshua M. Clulow return (EINVAL);
642*64439ec0SJoshua M. Clulow }
643*64439ec0SJoshua M. Clulow
644*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_soft_state(vio9p_state, getminor(dev));
645*64439ec0SJoshua M. Clulow if (vin == NULL) {
646*64439ec0SJoshua M. Clulow return (ENXIO);
647*64439ec0SJoshua M. Clulow }
648*64439ec0SJoshua M. Clulow
649*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
650*64439ec0SJoshua M. Clulow if (!vin->vin_open) {
651*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
652*64439ec0SJoshua M. Clulow return (EIO);
653*64439ec0SJoshua M. Clulow }
654*64439ec0SJoshua M. Clulow
655*64439ec0SJoshua M. Clulow /*
656*64439ec0SJoshua M. Clulow * Free all completed requests that have not yet been read:
657*64439ec0SJoshua M. Clulow */
658*64439ec0SJoshua M. Clulow vio9p_req_t *vnr;
659*64439ec0SJoshua M. Clulow while ((vnr = list_remove_head(&vin->vin_completes)) != NULL) {
660*64439ec0SJoshua M. Clulow vio9p_req_free(vin, vnr);
661*64439ec0SJoshua M. Clulow }
662*64439ec0SJoshua M. Clulow
663*64439ec0SJoshua M. Clulow vin->vin_open = false;
664*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
665*64439ec0SJoshua M. Clulow return (0);
666*64439ec0SJoshua M. Clulow }
667*64439ec0SJoshua M. Clulow
668*64439ec0SJoshua M. Clulow static int
vio9p_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred,int * rvalp)669*64439ec0SJoshua M. Clulow vio9p_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred,
670*64439ec0SJoshua M. Clulow int *rvalp)
671*64439ec0SJoshua M. Clulow {
672*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_soft_state(vio9p_state, getminor(dev));
673*64439ec0SJoshua M. Clulow if (vin == NULL) {
674*64439ec0SJoshua M. Clulow return (ENXIO);
675*64439ec0SJoshua M. Clulow }
676*64439ec0SJoshua M. Clulow
677*64439ec0SJoshua M. Clulow switch (cmd) {
678*64439ec0SJoshua M. Clulow case VIO9P_IOC_MOUNT_TAG:
679*64439ec0SJoshua M. Clulow if (ddi_copyout(vin->vin_tag, (void *)arg,
680*64439ec0SJoshua M. Clulow sizeof (vin->vin_tag), mode) != 0) {
681*64439ec0SJoshua M. Clulow return (EFAULT);
682*64439ec0SJoshua M. Clulow }
683*64439ec0SJoshua M. Clulow return (0);
684*64439ec0SJoshua M. Clulow
685*64439ec0SJoshua M. Clulow default:
686*64439ec0SJoshua M. Clulow return (ENOTTY);
687*64439ec0SJoshua M. Clulow }
688*64439ec0SJoshua M. Clulow }
689*64439ec0SJoshua M. Clulow
690*64439ec0SJoshua M. Clulow static int
vio9p_read(dev_t dev,struct uio * uio,cred_t * cred)691*64439ec0SJoshua M. Clulow vio9p_read(dev_t dev, struct uio *uio, cred_t *cred)
692*64439ec0SJoshua M. Clulow {
693*64439ec0SJoshua M. Clulow bool blocking = (uio->uio_fmode & (FNDELAY | FNONBLOCK)) == 0;
694*64439ec0SJoshua M. Clulow vio9p_req_t *vnr;
695*64439ec0SJoshua M. Clulow vio9p_t *vin;
696*64439ec0SJoshua M. Clulow
697*64439ec0SJoshua M. Clulow if ((vin = ddi_get_soft_state(vio9p_state, getminor(dev))) == NULL) {
698*64439ec0SJoshua M. Clulow return (ENXIO);
699*64439ec0SJoshua M. Clulow }
700*64439ec0SJoshua M. Clulow
701*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
702*64439ec0SJoshua M. Clulow again:
703*64439ec0SJoshua M. Clulow if ((vnr = list_remove_head(&vin->vin_completes)) == NULL) {
704*64439ec0SJoshua M. Clulow if (!blocking) {
705*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
706*64439ec0SJoshua M. Clulow return (EAGAIN);
707*64439ec0SJoshua M. Clulow }
708*64439ec0SJoshua M. Clulow
709*64439ec0SJoshua M. Clulow /*
710*64439ec0SJoshua M. Clulow * There is nothing to read right now. Wait for something:
711*64439ec0SJoshua M. Clulow */
712*64439ec0SJoshua M. Clulow if (cv_wait_sig(&vin->vin_cv, &vin->vin_mutex) == 0) {
713*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
714*64439ec0SJoshua M. Clulow return (EINTR);
715*64439ec0SJoshua M. Clulow }
716*64439ec0SJoshua M. Clulow goto again;
717*64439ec0SJoshua M. Clulow }
718*64439ec0SJoshua M. Clulow
719*64439ec0SJoshua M. Clulow /*
720*64439ec0SJoshua M. Clulow * Determine the size of the response message using the initial size[4]
721*64439ec0SJoshua M. Clulow * field of the response. The various specifying documents that exist
722*64439ec0SJoshua M. Clulow * suggest this is an unsigned integer in little-endian order.
723*64439ec0SJoshua M. Clulow */
724*64439ec0SJoshua M. Clulow uint32_t msz;
725*64439ec0SJoshua M. Clulow bcopy(virtio_dma_va(vnr->vnr_dma_in, 0), &msz, sizeof (msz));
726*64439ec0SJoshua M. Clulow msz = LE_32(msz);
727*64439ec0SJoshua M. Clulow if (msz > virtio_dma_size(vnr->vnr_dma_in)) {
728*64439ec0SJoshua M. Clulow msz = virtio_dma_size(vnr->vnr_dma_in);
729*64439ec0SJoshua M. Clulow }
730*64439ec0SJoshua M. Clulow
731*64439ec0SJoshua M. Clulow if (msz > uio->uio_resid) {
732*64439ec0SJoshua M. Clulow /*
733*64439ec0SJoshua M. Clulow * Tell the consumer they are going to need a bigger
734*64439ec0SJoshua M. Clulow * buffer.
735*64439ec0SJoshua M. Clulow */
736*64439ec0SJoshua M. Clulow list_insert_head(&vin->vin_completes, vnr);
737*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
738*64439ec0SJoshua M. Clulow return (EOVERFLOW);
739*64439ec0SJoshua M. Clulow }
740*64439ec0SJoshua M. Clulow
741*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
742*64439ec0SJoshua M. Clulow int e = uiomove(virtio_dma_va(vnr->vnr_dma_in, 0), msz, UIO_READ, uio);
743*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
744*64439ec0SJoshua M. Clulow
745*64439ec0SJoshua M. Clulow if (e == 0) {
746*64439ec0SJoshua M. Clulow vio9p_req_free(vin, vnr);
747*64439ec0SJoshua M. Clulow } else {
748*64439ec0SJoshua M. Clulow /*
749*64439ec0SJoshua M. Clulow * Put the response back in the list for another try, so that
750*64439ec0SJoshua M. Clulow * we do not drop any messages:
751*64439ec0SJoshua M. Clulow */
752*64439ec0SJoshua M. Clulow list_insert_head(&vin->vin_completes, vnr);
753*64439ec0SJoshua M. Clulow }
754*64439ec0SJoshua M. Clulow
755*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
756*64439ec0SJoshua M. Clulow return (e);
757*64439ec0SJoshua M. Clulow }
758*64439ec0SJoshua M. Clulow
759*64439ec0SJoshua M. Clulow static int
vio9p_write(dev_t dev,struct uio * uio,cred_t * cred)760*64439ec0SJoshua M. Clulow vio9p_write(dev_t dev, struct uio *uio, cred_t *cred)
761*64439ec0SJoshua M. Clulow {
762*64439ec0SJoshua M. Clulow bool blocking = (uio->uio_fmode & (FNDELAY | FNONBLOCK)) == 0;
763*64439ec0SJoshua M. Clulow
764*64439ec0SJoshua M. Clulow size_t wsz = uio->uio_resid;
765*64439ec0SJoshua M. Clulow if (wsz < 7) {
766*64439ec0SJoshua M. Clulow /*
767*64439ec0SJoshua M. Clulow * Requests should be well-formed 9P messages. They must
768*64439ec0SJoshua M. Clulow * contain at least 7 bytes: msize[4] + type[1] + tag[2].
769*64439ec0SJoshua M. Clulow */
770*64439ec0SJoshua M. Clulow return (EINVAL);
771*64439ec0SJoshua M. Clulow } else if (wsz > VIRTIO_9P_REQ_SIZE) {
772*64439ec0SJoshua M. Clulow return (EMSGSIZE);
773*64439ec0SJoshua M. Clulow }
774*64439ec0SJoshua M. Clulow
775*64439ec0SJoshua M. Clulow vio9p_t *vin = ddi_get_soft_state(vio9p_state, getminor(dev));
776*64439ec0SJoshua M. Clulow if (vin == NULL) {
777*64439ec0SJoshua M. Clulow return (ENXIO);
778*64439ec0SJoshua M. Clulow }
779*64439ec0SJoshua M. Clulow
780*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
781*64439ec0SJoshua M. Clulow vio9p_req_t *vnr = vio9p_req_alloc(vin, blocking);
782*64439ec0SJoshua M. Clulow if (vnr == NULL) {
783*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
784*64439ec0SJoshua M. Clulow return (blocking ? ENOMEM : EAGAIN);
785*64439ec0SJoshua M. Clulow }
786*64439ec0SJoshua M. Clulow vnr->vnr_generation = vin->vin_generation;
787*64439ec0SJoshua M. Clulow VERIFY3U(wsz, <=, virtio_dma_size(vnr->vnr_dma_out));
788*64439ec0SJoshua M. Clulow
789*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
790*64439ec0SJoshua M. Clulow int e = uiomove(virtio_dma_va(vnr->vnr_dma_out, 0), wsz, UIO_WRITE,
791*64439ec0SJoshua M. Clulow uio);
792*64439ec0SJoshua M. Clulow mutex_enter(&vin->vin_mutex);
793*64439ec0SJoshua M. Clulow
794*64439ec0SJoshua M. Clulow if (e == 0) {
795*64439ec0SJoshua M. Clulow virtio_dma_sync(vnr->vnr_dma_out, DDI_DMA_SYNC_FORDEV);
796*64439ec0SJoshua M. Clulow virtio_chain_submit(vnr->vnr_chain, B_TRUE);
797*64439ec0SJoshua M. Clulow } else {
798*64439ec0SJoshua M. Clulow vio9p_req_free(vin, vnr);
799*64439ec0SJoshua M. Clulow }
800*64439ec0SJoshua M. Clulow
801*64439ec0SJoshua M. Clulow mutex_exit(&vin->vin_mutex);
802*64439ec0SJoshua M. Clulow return (e);
803*64439ec0SJoshua M. Clulow }
804*64439ec0SJoshua M. Clulow
805*64439ec0SJoshua M. Clulow int
_init(void)806*64439ec0SJoshua M. Clulow _init(void)
807*64439ec0SJoshua M. Clulow {
808*64439ec0SJoshua M. Clulow int r;
809*64439ec0SJoshua M. Clulow
810*64439ec0SJoshua M. Clulow if ((r = ddi_soft_state_init(&vio9p_state, sizeof (vio9p_t), 0)) != 0) {
811*64439ec0SJoshua M. Clulow return (r);
812*64439ec0SJoshua M. Clulow }
813*64439ec0SJoshua M. Clulow
814*64439ec0SJoshua M. Clulow if ((r = mod_install(&vio9p_modlinkage)) != 0) {
815*64439ec0SJoshua M. Clulow ddi_soft_state_fini(&vio9p_state);
816*64439ec0SJoshua M. Clulow }
817*64439ec0SJoshua M. Clulow
818*64439ec0SJoshua M. Clulow return (r);
819*64439ec0SJoshua M. Clulow }
820*64439ec0SJoshua M. Clulow
821*64439ec0SJoshua M. Clulow int
_fini(void)822*64439ec0SJoshua M. Clulow _fini(void)
823*64439ec0SJoshua M. Clulow {
824*64439ec0SJoshua M. Clulow int r;
825*64439ec0SJoshua M. Clulow
826*64439ec0SJoshua M. Clulow if ((r = mod_remove(&vio9p_modlinkage)) != 0) {
827*64439ec0SJoshua M. Clulow return (r);
828*64439ec0SJoshua M. Clulow }
829*64439ec0SJoshua M. Clulow
830*64439ec0SJoshua M. Clulow ddi_soft_state_fini(&vio9p_state);
831*64439ec0SJoshua M. Clulow
832*64439ec0SJoshua M. Clulow return (r);
833*64439ec0SJoshua M. Clulow }
834*64439ec0SJoshua M. Clulow
835*64439ec0SJoshua M. Clulow int
_info(struct modinfo * modinfop)836*64439ec0SJoshua M. Clulow _info(struct modinfo *modinfop)
837*64439ec0SJoshua M. Clulow {
838*64439ec0SJoshua M. Clulow return (mod_info(&vio9p_modlinkage, modinfop));
839*64439ec0SJoshua M. Clulow }
840