1 /* $NetBSD: vio9p.c,v 1.11 2023/03/23 03:55:11 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.11 2023/03/23 03:55:11 yamaguchi Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/condvar.h>
37 #include <sys/device.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/select.h>
43 #include <sys/kmem.h>
44
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/uio.h>
48
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51
52 #include "ioconf.h"
53
54 //#define VIO9P_DEBUG 1
55 //#define VIO9P_DUMP 1
56 #ifdef VIO9P_DEBUG
57 #define DLOG(fmt, args...) \
58 do { log(LOG_DEBUG, "%s: " fmt "\n", __func__, ##args); } while (0)
59 #else
60 #define DLOG(fmt, args...) __nothing
61 #endif
62
63 /* Device-specific feature bits */
64 #define VIO9P_F_MOUNT_TAG (UINT64_C(1) << 0) /* mount tag specified */
65
66 /* Configuration registers */
67 #define VIO9P_CONFIG_TAG_LEN 0 /* 16bit */
68 #define VIO9P_CONFIG_TAG 2
69
70 #define VIO9P_FLAG_BITS \
71 VIRTIO_COMMON_FLAG_BITS \
72 "b\x00" "MOUNT_TAG\0"
73
74
75 // Must be the same as P9P_DEFREQLEN of usr.sbin/puffs/mount_9p/ninepuffs.h
76 #define VIO9P_MAX_REQLEN (16 * 1024)
77 #define VIO9P_SEGSIZE PAGE_SIZE
78 #define VIO9P_N_SEGMENTS (VIO9P_MAX_REQLEN / VIO9P_SEGSIZE)
79
80 /*
81 * QEMU defines this as 32 but includes the final zero byte into the
82 * limit. The code below counts the final zero byte separately, so
83 * adjust this define to match.
84 */
85 #define P9_MAX_TAG_LEN 31
86
87 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
88
89 struct vio9p_softc {
90 device_t sc_dev;
91
92 struct virtio_softc *sc_virtio;
93 struct virtqueue sc_vq[1];
94
95 uint16_t sc_taglen;
96 uint8_t sc_tag[P9_MAX_TAG_LEN + 1];
97
98 int sc_flags;
99 #define VIO9P_INUSE __BIT(0)
100
101 int sc_state;
102 #define VIO9P_S_INIT 0
103 #define VIO9P_S_REQUESTING 1
104 #define VIO9P_S_REPLIED 2
105 #define VIO9P_S_CONSUMING 3
106 kcondvar_t sc_wait;
107 struct selinfo sc_sel;
108 kmutex_t sc_lock;
109
110 bus_dmamap_t sc_dmamap_tx;
111 bus_dmamap_t sc_dmamap_rx;
112 char *sc_buf_tx;
113 char *sc_buf_rx;
114 size_t sc_buf_rx_len;
115 off_t sc_buf_rx_offset;
116 };
117
118 /*
119 * Locking notes:
120 * - sc_state, sc_wait and sc_sel are protected by sc_lock
121 *
122 * The state machine (sc_state):
123 * - INIT =(write from client)=> REQUESTING
124 * - REQUESTING =(reply from host)=> REPLIED
125 * - REPLIED =(read from client)=> CONSUMING
126 * - CONSUMING =(read completed(*))=> INIT
127 *
128 * (*) read may not finish by one read(2) request, then
129 * the state remains CONSUMING.
130 */
131
132 static int vio9p_match(device_t, cfdata_t, void *);
133 static void vio9p_attach(device_t, device_t, void *);
134 static void vio9p_read_config(struct vio9p_softc *);
135 static int vio9p_request_done(struct virtqueue *);
136
137 static int vio9p_read(struct file *, off_t *, struct uio *, kauth_cred_t,
138 int);
139 static int vio9p_write(struct file *, off_t *, struct uio *,
140 kauth_cred_t, int);
141 static int vio9p_ioctl(struct file *, u_long, void *);
142 static int vio9p_close(struct file *);
143 static int vio9p_kqfilter(struct file *, struct knote *);
144
145 static const struct fileops vio9p_fileops = {
146 .fo_name = "vio9p",
147 .fo_read = vio9p_read,
148 .fo_write = vio9p_write,
149 .fo_ioctl = vio9p_ioctl,
150 .fo_fcntl = fnullop_fcntl,
151 .fo_poll = fnullop_poll,
152 .fo_stat = fbadop_stat,
153 .fo_close = vio9p_close,
154 .fo_kqfilter = vio9p_kqfilter,
155 .fo_restart = fnullop_restart,
156 };
157
158 static dev_type_open(vio9p_dev_open);
159
160 const struct cdevsw vio9p_cdevsw = {
161 .d_open = vio9p_dev_open,
162 .d_read = noread,
163 .d_write = nowrite,
164 .d_ioctl = noioctl,
165 .d_stop = nostop,
166 .d_tty = notty,
167 .d_poll = nopoll,
168 .d_mmap = nommap,
169 .d_kqfilter = nokqfilter,
170 .d_discard = nodiscard,
171 .d_flag = D_OTHER | D_MPSAFE,
172 };
173
174 static int
vio9p_dev_open(dev_t dev,int flag,int mode,struct lwp * l)175 vio9p_dev_open(dev_t dev, int flag, int mode, struct lwp *l)
176 {
177 struct vio9p_softc *sc;
178 struct file *fp;
179 int error, fd;
180
181 sc = device_lookup_private(&vio9p_cd, minor(dev));
182 if (sc == NULL)
183 return ENXIO;
184
185 /* FIXME TOCTOU */
186 if (ISSET(sc->sc_flags, VIO9P_INUSE))
187 return EBUSY;
188
189 /* falloc() will fill in the descriptor for us. */
190 error = fd_allocfile(&fp, &fd);
191 if (error != 0)
192 return error;
193
194 sc->sc_flags |= VIO9P_INUSE;
195
196 return fd_clone(fp, fd, flag, &vio9p_fileops, sc);
197 }
198
199 static int
vio9p_ioctl(struct file * fp,u_long cmd,void * addr)200 vio9p_ioctl(struct file *fp, u_long cmd, void *addr)
201 {
202 int error = 0;
203
204 switch (cmd) {
205 case FIONBIO:
206 break;
207 default:
208 error = EINVAL;
209 break;
210 }
211
212 return error;
213 }
214
215 static int
vio9p_read(struct file * fp,off_t * offp,struct uio * uio,kauth_cred_t cred,int flags)216 vio9p_read(struct file *fp, off_t *offp, struct uio *uio,
217 kauth_cred_t cred, int flags)
218 {
219 struct vio9p_softc *sc = fp->f_data;
220 struct virtio_softc *vsc = sc->sc_virtio;
221 struct virtqueue *vq = &sc->sc_vq[0];
222 int error, slot, len;
223
224 DLOG("enter");
225
226 mutex_enter(&sc->sc_lock);
227
228 if (sc->sc_state == VIO9P_S_INIT) {
229 DLOG("%s: not requested", device_xname(sc->sc_dev));
230 error = EAGAIN;
231 goto out;
232 }
233
234 if (sc->sc_state == VIO9P_S_CONSUMING) {
235 KASSERT(sc->sc_buf_rx_len > 0);
236 /* We already have some remaining, consume it. */
237 len = sc->sc_buf_rx_len - sc->sc_buf_rx_offset;
238 goto consume;
239 }
240
241 #if 0
242 if (uio->uio_resid != VIO9P_MAX_REQLEN)
243 return EINVAL;
244 #else
245 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
246 error = EINVAL;
247 goto out;
248 }
249 #endif
250
251 error = 0;
252 while (sc->sc_state == VIO9P_S_REQUESTING) {
253 error = cv_timedwait_sig(&sc->sc_wait, &sc->sc_lock, hz);
254 if (error != 0)
255 break;
256 }
257 if (sc->sc_state == VIO9P_S_REPLIED)
258 sc->sc_state = VIO9P_S_CONSUMING;
259
260 if (error != 0)
261 goto out;
262
263 error = virtio_dequeue(vsc, vq, &slot, &len);
264 if (error != 0) {
265 log(LOG_ERR, "%s: virtio_dequeue failed: %d\n",
266 device_xname(sc->sc_dev), error);
267 goto out;
268 }
269 DLOG("len=%d", len);
270 sc->sc_buf_rx_len = len;
271 sc->sc_buf_rx_offset = 0;
272 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0, VIO9P_MAX_REQLEN,
273 BUS_DMASYNC_POSTWRITE);
274 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0, VIO9P_MAX_REQLEN,
275 BUS_DMASYNC_POSTREAD);
276 virtio_dequeue_commit(vsc, vq, slot);
277 #ifdef VIO9P_DUMP
278 int i;
279 log(LOG_DEBUG, "%s: buf: ", __func__);
280 for (i = 0; i < len; i++) {
281 log(LOG_DEBUG, "%c", (char)sc->sc_buf_rx[i]);
282 }
283 log(LOG_DEBUG, "\n");
284 #endif
285
286 consume:
287 DLOG("uio_resid=%lu", uio->uio_resid);
288 if (len < uio->uio_resid) {
289 error = EINVAL;
290 goto out;
291 }
292 len = uio->uio_resid;
293 error = uiomove(sc->sc_buf_rx + sc->sc_buf_rx_offset, len, uio);
294 if (error != 0)
295 goto out;
296
297 sc->sc_buf_rx_offset += len;
298 if (sc->sc_buf_rx_offset == sc->sc_buf_rx_len) {
299 sc->sc_buf_rx_len = 0;
300 sc->sc_buf_rx_offset = 0;
301
302 sc->sc_state = VIO9P_S_INIT;
303 selnotify(&sc->sc_sel, 0, 1);
304 }
305
306 out:
307 mutex_exit(&sc->sc_lock);
308 return error;
309 }
310
311 static int
vio9p_write(struct file * fp,off_t * offp,struct uio * uio,kauth_cred_t cred,int flags)312 vio9p_write(struct file *fp, off_t *offp, struct uio *uio,
313 kauth_cred_t cred, int flags)
314 {
315 struct vio9p_softc *sc = fp->f_data;
316 struct virtio_softc *vsc = sc->sc_virtio;
317 struct virtqueue *vq = &sc->sc_vq[0];
318 int error, slot;
319 size_t len;
320
321 DLOG("enter");
322
323 mutex_enter(&sc->sc_lock);
324
325 if (sc->sc_state != VIO9P_S_INIT) {
326 DLOG("already requesting");
327 error = EAGAIN;
328 goto out;
329 }
330
331 if (uio->uio_resid == 0) {
332 error = 0;
333 goto out;
334 }
335
336 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
337 error = EINVAL;
338 goto out;
339 }
340
341 len = uio->uio_resid;
342 error = uiomove(sc->sc_buf_tx, len, uio);
343 if (error != 0)
344 goto out;
345
346 DLOG("len=%lu", len);
347 #ifdef VIO9P_DUMP
348 int i;
349 log(LOG_DEBUG, "%s: buf: ", __func__);
350 for (i = 0; i < len; i++) {
351 log(LOG_DEBUG, "%c", (char)sc->sc_buf_tx[i]);
352 }
353 log(LOG_DEBUG, "\n");
354 #endif
355
356 error = virtio_enqueue_prep(vsc, vq, &slot);
357 if (error != 0) {
358 log(LOG_ERR, "%s: virtio_enqueue_prep failed\n",
359 device_xname(sc->sc_dev));
360 goto out;
361 }
362 DLOG("slot=%d", slot);
363 error = virtio_enqueue_reserve(vsc, vq, slot,
364 sc->sc_dmamap_tx->dm_nsegs + sc->sc_dmamap_rx->dm_nsegs);
365 if (error != 0) {
366 log(LOG_ERR, "%s: virtio_enqueue_reserve failed\n",
367 device_xname(sc->sc_dev));
368 goto out;
369 }
370
371 /* Tx */
372 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0,
373 len, BUS_DMASYNC_PREWRITE);
374 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true);
375 /* Rx */
376 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0,
377 VIO9P_MAX_REQLEN, BUS_DMASYNC_PREREAD);
378 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false);
379 virtio_enqueue_commit(vsc, vq, slot, true);
380
381 sc->sc_state = VIO9P_S_REQUESTING;
382 out:
383 mutex_exit(&sc->sc_lock);
384 return error;
385 }
386
387 static int
vio9p_close(struct file * fp)388 vio9p_close(struct file *fp)
389 {
390 struct vio9p_softc *sc = fp->f_data;
391
392 KASSERT(ISSET(sc->sc_flags, VIO9P_INUSE));
393 sc->sc_flags &= ~VIO9P_INUSE;
394
395 return 0;
396 }
397
398 static void
filt_vio9p_detach(struct knote * kn)399 filt_vio9p_detach(struct knote *kn)
400 {
401 struct vio9p_softc *sc = kn->kn_hook;
402
403 mutex_enter(&sc->sc_lock);
404 selremove_knote(&sc->sc_sel, kn);
405 mutex_exit(&sc->sc_lock);
406 }
407
408 static int
filt_vio9p_read(struct knote * kn,long hint)409 filt_vio9p_read(struct knote *kn, long hint)
410 {
411 struct vio9p_softc *sc = kn->kn_hook;
412 int rv;
413
414 kn->kn_data = sc->sc_buf_rx_len;
415 /* XXX need sc_lock? */
416 rv = (kn->kn_data > 0) || sc->sc_state != VIO9P_S_INIT;
417
418 return rv;
419 }
420
421 static const struct filterops vio9p_read_filtops = {
422 .f_flags = FILTEROP_ISFD,
423 .f_attach = NULL,
424 .f_detach = filt_vio9p_detach,
425 .f_event = filt_vio9p_read,
426 };
427
428 static int
filt_vio9p_write(struct knote * kn,long hint)429 filt_vio9p_write(struct knote *kn, long hint)
430 {
431 struct vio9p_softc *sc = kn->kn_hook;
432
433 /* XXX need sc_lock? */
434 return sc->sc_state == VIO9P_S_INIT;
435 }
436
437 static const struct filterops vio9p_write_filtops = {
438 .f_flags = FILTEROP_ISFD,
439 .f_attach = NULL,
440 .f_detach = filt_vio9p_detach,
441 .f_event = filt_vio9p_write,
442 };
443
444 static int
vio9p_kqfilter(struct file * fp,struct knote * kn)445 vio9p_kqfilter(struct file *fp, struct knote *kn)
446 {
447 struct vio9p_softc *sc = fp->f_data;
448
449 switch (kn->kn_filter) {
450 case EVFILT_READ:
451 kn->kn_fop = &vio9p_read_filtops;
452 break;
453
454 case EVFILT_WRITE:
455 kn->kn_fop = &vio9p_write_filtops;
456 break;
457
458 default:
459 log(LOG_ERR, "%s: kn_filter=%u\n", __func__, kn->kn_filter);
460 return EINVAL;
461 }
462
463 kn->kn_hook = sc;
464
465 mutex_enter(&sc->sc_lock);
466 selrecord_knote(&sc->sc_sel, kn);
467 mutex_exit(&sc->sc_lock);
468
469 return 0;
470 }
471
472 CFATTACH_DECL_NEW(vio9p, sizeof(struct vio9p_softc),
473 vio9p_match, vio9p_attach, NULL, NULL);
474
475 static int
vio9p_match(device_t parent,cfdata_t match,void * aux)476 vio9p_match(device_t parent, cfdata_t match, void *aux)
477 {
478 struct virtio_attach_args *va = aux;
479
480 if (va->sc_childdevid == VIRTIO_DEVICE_ID_9P)
481 return 1;
482
483 return 0;
484 }
485
486 static void
vio9p_attach(device_t parent,device_t self,void * aux)487 vio9p_attach(device_t parent, device_t self, void *aux)
488 {
489 struct vio9p_softc *sc = device_private(self);
490 struct virtio_softc *vsc = device_private(parent);
491 uint64_t features;
492 int error;
493
494 if (virtio_child(vsc) != NULL) {
495 aprint_normal(": child already attached for %s; "
496 "something wrong...\n", device_xname(parent));
497 return;
498 }
499
500 sc->sc_dev = self;
501 sc->sc_virtio = vsc;
502
503 virtio_child_attach_start(vsc, self, IPL_VM,
504 VIO9P_F_MOUNT_TAG, VIO9P_FLAG_BITS);
505
506 features = virtio_features(vsc);
507 if ((features & VIO9P_F_MOUNT_TAG) == 0)
508 goto err_none;
509
510 virtio_init_vq_vqdone(vsc, &sc->sc_vq[0], 0, vio9p_request_done);
511 error = virtio_alloc_vq(vsc, &sc->sc_vq[0], VIO9P_MAX_REQLEN,
512 VIO9P_N_SEGMENTS * 2, "vio9p");
513 if (error != 0)
514 goto err_none;
515
516 sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
517 sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
518
519 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
520 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_tx);
521 if (error != 0) {
522 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
523 error);
524 goto err_vq;
525 }
526 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
527 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_rx);
528 if (error != 0) {
529 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
530 error);
531 goto err_vq;
532 }
533
534 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_tx,
535 sc->sc_buf_tx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
536 if (error != 0) {
537 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
538 error);
539 goto err_dmamap;
540 }
541 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_rx,
542 sc->sc_buf_rx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_READ);
543 if (error != 0) {
544 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
545 error);
546 goto err_dmamap;
547 }
548
549 sc->sc_state = VIO9P_S_INIT;
550 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
551 cv_init(&sc->sc_wait, "vio9p");
552
553 vio9p_read_config(sc);
554 aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag);
555
556 error = virtio_child_attach_finish(vsc, sc->sc_vq,
557 __arraycount(sc->sc_vq), NULL,
558 VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT);
559 if (error != 0)
560 goto err_mutex;
561
562 return;
563
564 err_mutex:
565 cv_destroy(&sc->sc_wait);
566 mutex_destroy(&sc->sc_lock);
567 err_dmamap:
568 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_tx);
569 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_rx);
570 err_vq:
571 virtio_free_vq(vsc, &sc->sc_vq[0]);
572 err_none:
573 virtio_child_attach_failed(vsc);
574 return;
575 }
576
577 static void
vio9p_read_config(struct vio9p_softc * sc)578 vio9p_read_config(struct vio9p_softc *sc)
579 {
580 device_t dev = sc->sc_dev;
581 uint8_t reg;
582 int i;
583
584 /* these values are explicitly specified as little-endian */
585 sc->sc_taglen = virtio_read_device_config_le_2(sc->sc_virtio,
586 VIO9P_CONFIG_TAG_LEN);
587
588 if (sc->sc_taglen > P9_MAX_TAG_LEN) {
589 aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",
590 sc->sc_taglen, P9_MAX_TAG_LEN);
591 sc->sc_taglen = P9_MAX_TAG_LEN;
592 }
593
594 for (i = 0; i < sc->sc_taglen; i++) {
595 reg = virtio_read_device_config_1(sc->sc_virtio,
596 VIO9P_CONFIG_TAG + i);
597 sc->sc_tag[i] = reg;
598 }
599 sc->sc_tag[i] = '\0';
600 }
601
602 static int
vio9p_request_done(struct virtqueue * vq)603 vio9p_request_done(struct virtqueue *vq)
604 {
605 struct virtio_softc *vsc = vq->vq_owner;
606 struct vio9p_softc *sc = device_private(virtio_child(vsc));
607
608 DLOG("enter");
609
610 mutex_enter(&sc->sc_lock);
611 sc->sc_state = VIO9P_S_REPLIED;
612 cv_broadcast(&sc->sc_wait);
613 selnotify(&sc->sc_sel, 0, 1);
614 mutex_exit(&sc->sc_lock);
615
616 return 1;
617 }
618
619 MODULE(MODULE_CLASS_DRIVER, vio9p, "virtio");
620
621 #ifdef _MODULE
622 #include "ioconf.c"
623 #endif
624
625 static int
vio9p_modcmd(modcmd_t cmd,void * opaque)626 vio9p_modcmd(modcmd_t cmd, void *opaque)
627 {
628 #ifdef _MODULE
629 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
630 #endif
631 int error = 0;
632
633 #ifdef _MODULE
634 switch (cmd) {
635 case MODULE_CMD_INIT:
636 devsw_attach(vio9p_cd.cd_name, NULL, &bmajor,
637 &vio9p_cdevsw, &cmajor);
638 error = config_init_component(cfdriver_ioconf_vio9p,
639 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
640 break;
641 case MODULE_CMD_FINI:
642 error = config_fini_component(cfdriver_ioconf_vio9p,
643 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
644 devsw_detach(NULL, &vio9p_cdevsw);
645 break;
646 default:
647 error = ENOTTY;
648 break;
649 }
650 #endif
651
652 return error;
653 }
654