1e97ad33aSDoug Rabson /*- 2e97ad33aSDoug Rabson * Copyright (c) 2017 Juniper Networks, Inc. 3e97ad33aSDoug Rabson * All rights reserved. 4e97ad33aSDoug Rabson * 5e97ad33aSDoug Rabson * Redistribution and use in source and binary forms, with or without 6e97ad33aSDoug Rabson * modification, are permitted provided that the following conditions 7e97ad33aSDoug Rabson * are met: 8e97ad33aSDoug Rabson * 1. Redistributions of source code must retain the above copyright 9e97ad33aSDoug Rabson * notice, this list of conditions and the following disclaimer. 10e97ad33aSDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11e97ad33aSDoug Rabson * notice, this list of conditions and the following disclaimer in the 12e97ad33aSDoug Rabson * documentation and/or other materials provided with the distribution. 13e97ad33aSDoug Rabson * 14e97ad33aSDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15e97ad33aSDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16e97ad33aSDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17e97ad33aSDoug Rabson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18e97ad33aSDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19e97ad33aSDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20e97ad33aSDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21e97ad33aSDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22e97ad33aSDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23e97ad33aSDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24e97ad33aSDoug Rabson * 25e97ad33aSDoug Rabson */ 26e97ad33aSDoug Rabson /* 27e97ad33aSDoug Rabson * The Virtio 9P transport driver. This file contains all functions related to 28e97ad33aSDoug Rabson * the virtqueue infrastructure which include creating the virtqueue, host 29e97ad33aSDoug Rabson * interactions, interrupts etc. 30e97ad33aSDoug Rabson */ 31e97ad33aSDoug Rabson 32e97ad33aSDoug Rabson #include <sys/param.h> 33e97ad33aSDoug Rabson #include <sys/errno.h> 34e97ad33aSDoug Rabson #include <sys/module.h> 35e97ad33aSDoug Rabson #include <sys/sglist.h> 36e97ad33aSDoug Rabson #include <sys/queue.h> 37e97ad33aSDoug Rabson #include <sys/bus.h> 38e97ad33aSDoug Rabson #include <sys/kthread.h> 39e97ad33aSDoug Rabson #include <sys/condvar.h> 40e97ad33aSDoug Rabson #include <sys/sysctl.h> 41e97ad33aSDoug Rabson 42e97ad33aSDoug Rabson #include <machine/bus.h> 43e97ad33aSDoug Rabson 44e97ad33aSDoug Rabson #include <fs/p9fs/p9_client.h> 45e97ad33aSDoug Rabson #include <fs/p9fs/p9_debug.h> 46e97ad33aSDoug Rabson #include <fs/p9fs/p9_protocol.h> 47e97ad33aSDoug Rabson #include <fs/p9fs/p9_transport.h> 48e97ad33aSDoug Rabson 49e97ad33aSDoug Rabson #include <dev/virtio/virtio.h> 50e97ad33aSDoug Rabson #include <dev/virtio/virtqueue.h> 51e97ad33aSDoug Rabson #include <dev/virtio/virtio_ring.h> 52e97ad33aSDoug Rabson #include <dev/virtio/p9fs/virtio_p9fs.h> 53e97ad33aSDoug Rabson 54e97ad33aSDoug Rabson #define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx) 55e97ad33aSDoug Rabson #define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc)) 56e97ad33aSDoug Rabson #define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc)) 57e97ad33aSDoug Rabson #define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \ 58e97ad33aSDoug Rabson "VIRTIO 9P CHAN lock", NULL, MTX_DEF) 59e97ad33aSDoug Rabson #define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc)) 60e97ad33aSDoug Rabson #define MAX_SUPPORTED_SGS 20 61e97ad33aSDoug Rabson static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag"); 62e97ad33aSDoug Rabson 63e97ad33aSDoug Rabson struct vt9p_softc { 64e97ad33aSDoug Rabson device_t vt9p_dev; 65e97ad33aSDoug Rabson struct mtx vt9p_mtx; 66e97ad33aSDoug Rabson struct sglist *vt9p_sglist; 67e97ad33aSDoug Rabson struct cv submit_cv; 68e97ad33aSDoug Rabson bool busy; 69e97ad33aSDoug Rabson struct virtqueue *vt9p_vq; 70e97ad33aSDoug Rabson int max_nsegs; 71e97ad33aSDoug Rabson uint16_t mount_tag_len; 72e97ad33aSDoug Rabson char *mount_tag; 73e97ad33aSDoug Rabson STAILQ_ENTRY(vt9p_softc) chan_next; 74e97ad33aSDoug Rabson }; 75e97ad33aSDoug Rabson 76e97ad33aSDoug Rabson /* Global channel list, Each channel will correspond to a mount point */ 77b9500cbdSMark Johnston static STAILQ_HEAD( ,vt9p_softc) global_chan_list = 78b9500cbdSMark Johnston STAILQ_HEAD_INITIALIZER(global_chan_list); 79e97ad33aSDoug Rabson struct mtx global_chan_list_mtx; 80b9500cbdSMark Johnston MTX_SYSINIT(global_chan_list_mtx, &global_chan_list_mtx, "9pglobal", MTX_DEF); 81e97ad33aSDoug Rabson 82e97ad33aSDoug Rabson static struct virtio_feature_desc virtio_9p_feature_desc[] = { 83e97ad33aSDoug Rabson { VIRTIO_9PNET_F_MOUNT_TAG, "9PMountTag" }, 84e97ad33aSDoug Rabson { 0, NULL } 85e97ad33aSDoug Rabson }; 86e97ad33aSDoug Rabson 87e97ad33aSDoug Rabson /* We don't currently allow canceling of virtio requests */ 88e97ad33aSDoug Rabson static int 89e97ad33aSDoug Rabson vt9p_cancel(void *handle, struct p9_req_t *req) 90e97ad33aSDoug Rabson { 91e97ad33aSDoug Rabson return (1); 92e97ad33aSDoug Rabson } 93e97ad33aSDoug Rabson 94e97ad33aSDoug Rabson SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol"); 95e97ad33aSDoug Rabson 96e97ad33aSDoug Rabson /* 97e97ad33aSDoug Rabson * Maximum number of seconds vt9p_request thread sleep waiting for an 98e97ad33aSDoug Rabson * ack from the host, before exiting 99e97ad33aSDoug Rabson */ 100e97ad33aSDoug Rabson static unsigned int vt9p_ackmaxidle = 120; 101e97ad33aSDoug Rabson SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0, 102e97ad33aSDoug Rabson "Maximum time request thread waits for ack from host"); 103e97ad33aSDoug Rabson 104e97ad33aSDoug Rabson /* 105e97ad33aSDoug Rabson * Wait for completion of a p9 request. 106e97ad33aSDoug Rabson * 107e97ad33aSDoug Rabson * This routine will sleep and release the chan mtx during the period. 108e97ad33aSDoug Rabson * chan mtx will be acquired again upon return. 109e97ad33aSDoug Rabson */ 110e97ad33aSDoug Rabson static int 111e97ad33aSDoug Rabson vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req) 112e97ad33aSDoug Rabson { 113*28c9b13bSMark Johnston KASSERT(req->tc->tag != req->rc->tag, 114*28c9b13bSMark Johnston ("%s: request %p already completed", __func__, req)); 115*28c9b13bSMark Johnston 116*28c9b13bSMark Johnston if (msleep(req, VT9P_MTX(chan), 0, "chan lock", vt9p_ackmaxidle * hz)) { 117e97ad33aSDoug Rabson /* 118e97ad33aSDoug Rabson * Waited for 120s. No response from host. 119e97ad33aSDoug Rabson * Can't wait for ever.. 120e97ad33aSDoug Rabson */ 121e97ad33aSDoug Rabson P9_DEBUG(ERROR, "Timeout after waiting %u seconds" 122e97ad33aSDoug Rabson "for an ack from host\n", vt9p_ackmaxidle); 123e97ad33aSDoug Rabson return (EIO); 124e97ad33aSDoug Rabson } 125e97ad33aSDoug Rabson KASSERT(req->tc->tag == req->rc->tag, 126*28c9b13bSMark Johnston ("%s spurious event on request %p", __func__, req)); 127e97ad33aSDoug Rabson return (0); 128e97ad33aSDoug Rabson } 129e97ad33aSDoug Rabson 130e97ad33aSDoug Rabson /* 131e97ad33aSDoug Rabson * Request handler. This is called for every request submitted to the host 132e97ad33aSDoug Rabson * It basically maps the tc/rc buffers to sg lists and submits the requests 133e97ad33aSDoug Rabson * into the virtqueue. Since we have implemented a synchronous version, the 134e97ad33aSDoug Rabson * submission thread sleeps until the ack in the interrupt wakes it up. Once 135e97ad33aSDoug Rabson * it wakes up, it returns back to the P9fs layer. The rc buffer is then 136e97ad33aSDoug Rabson * processed and completed to its upper layers. 137e97ad33aSDoug Rabson */ 138e97ad33aSDoug Rabson static int 139e97ad33aSDoug Rabson vt9p_request(void *handle, struct p9_req_t *req) 140e97ad33aSDoug Rabson { 141e97ad33aSDoug Rabson int error; 142e97ad33aSDoug Rabson struct vt9p_softc *chan; 143e97ad33aSDoug Rabson int readable, writable; 144e97ad33aSDoug Rabson struct sglist *sg; 145e97ad33aSDoug Rabson struct virtqueue *vq; 146e97ad33aSDoug Rabson 147e97ad33aSDoug Rabson chan = handle; 148e97ad33aSDoug Rabson sg = chan->vt9p_sglist; 149e97ad33aSDoug Rabson vq = chan->vt9p_vq; 150e97ad33aSDoug Rabson 151e97ad33aSDoug Rabson P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req); 152e97ad33aSDoug Rabson 153e97ad33aSDoug Rabson /* Grab the channel lock*/ 154e97ad33aSDoug Rabson VT9P_LOCK(chan); 155fff51811SMark Johnston req_retry: 156e97ad33aSDoug Rabson sglist_reset(sg); 157e97ad33aSDoug Rabson /* Handle out VirtIO ring buffers */ 158e97ad33aSDoug Rabson error = sglist_append(sg, req->tc->sdata, req->tc->size); 159e97ad33aSDoug Rabson if (error != 0) { 160e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__); 161e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 162e97ad33aSDoug Rabson return (error); 163e97ad33aSDoug Rabson } 164e97ad33aSDoug Rabson readable = sg->sg_nseg; 165e97ad33aSDoug Rabson 166e97ad33aSDoug Rabson error = sglist_append(sg, req->rc->sdata, req->rc->capacity); 167e97ad33aSDoug Rabson if (error != 0) { 168e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__); 169e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 170e97ad33aSDoug Rabson return (error); 171e97ad33aSDoug Rabson } 172e97ad33aSDoug Rabson writable = sg->sg_nseg - readable; 173e97ad33aSDoug Rabson 174e97ad33aSDoug Rabson error = virtqueue_enqueue(vq, req, sg, readable, writable); 175e97ad33aSDoug Rabson if (error != 0) { 176e97ad33aSDoug Rabson if (error == ENOSPC) { 177e97ad33aSDoug Rabson /* 178e97ad33aSDoug Rabson * Condvar for the submit queue. Unlock the chan 179e97ad33aSDoug Rabson * since wakeup needs one. 180e97ad33aSDoug Rabson */ 181e97ad33aSDoug Rabson cv_wait(&chan->submit_cv, VT9P_MTX(chan)); 182e97ad33aSDoug Rabson P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__); 183e97ad33aSDoug Rabson goto req_retry; 184e97ad33aSDoug Rabson } else { 185e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__); 186e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 187e97ad33aSDoug Rabson return (EIO); 188e97ad33aSDoug Rabson } 189e97ad33aSDoug Rabson } 190e97ad33aSDoug Rabson 191e97ad33aSDoug Rabson /* We have to notify */ 192e97ad33aSDoug Rabson virtqueue_notify(vq); 193e97ad33aSDoug Rabson 194e97ad33aSDoug Rabson error = vt9p_req_wait(chan, req); 195e97ad33aSDoug Rabson if (error != 0) { 196e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 197e97ad33aSDoug Rabson return (error); 198e97ad33aSDoug Rabson } 199e97ad33aSDoug Rabson 200e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 201e97ad33aSDoug Rabson 202e97ad33aSDoug Rabson P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__); 203e97ad33aSDoug Rabson 204e97ad33aSDoug Rabson return (0); 205e97ad33aSDoug Rabson } 206e97ad33aSDoug Rabson 207e97ad33aSDoug Rabson /* 208e97ad33aSDoug Rabson * Completion of the request from the virtqueue. This interrupt handler is 209e97ad33aSDoug Rabson * setup at initialization and is called for every completing request. It 210e97ad33aSDoug Rabson * just wakes up the sleeping submission requests. 211e97ad33aSDoug Rabson */ 212e97ad33aSDoug Rabson static void 213e97ad33aSDoug Rabson vt9p_intr_complete(void *xsc) 214e97ad33aSDoug Rabson { 215e97ad33aSDoug Rabson struct vt9p_softc *chan; 216e97ad33aSDoug Rabson struct virtqueue *vq; 217e97ad33aSDoug Rabson struct p9_req_t *curreq; 218e97ad33aSDoug Rabson 219e97ad33aSDoug Rabson chan = (struct vt9p_softc *)xsc; 220e97ad33aSDoug Rabson vq = chan->vt9p_vq; 221e97ad33aSDoug Rabson 222e97ad33aSDoug Rabson P9_DEBUG(TRANS, "%s: completing\n", __func__); 223e97ad33aSDoug Rabson 224e97ad33aSDoug Rabson VT9P_LOCK(chan); 2251f6f247bSMark Johnston again: 226e97ad33aSDoug Rabson while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) { 227e97ad33aSDoug Rabson curreq->rc->tag = curreq->tc->tag; 228e97ad33aSDoug Rabson wakeup_one(curreq); 229e97ad33aSDoug Rabson } 2301f6f247bSMark Johnston if (virtqueue_enable_intr(vq) != 0) { 2311f6f247bSMark Johnston virtqueue_disable_intr(vq); 2321f6f247bSMark Johnston goto again; 2331f6f247bSMark Johnston } 234e97ad33aSDoug Rabson cv_signal(&chan->submit_cv); 235e97ad33aSDoug Rabson VT9P_UNLOCK(chan); 236e97ad33aSDoug Rabson } 237e97ad33aSDoug Rabson 238e97ad33aSDoug Rabson /* 239e97ad33aSDoug Rabson * Allocation of the virtqueue with interrupt complete routines. 240e97ad33aSDoug Rabson */ 241e97ad33aSDoug Rabson static int 242e97ad33aSDoug Rabson vt9p_alloc_virtqueue(struct vt9p_softc *sc) 243e97ad33aSDoug Rabson { 244e97ad33aSDoug Rabson struct vq_alloc_info vq_info; 245e97ad33aSDoug Rabson device_t dev; 246e97ad33aSDoug Rabson 247e97ad33aSDoug Rabson dev = sc->vt9p_dev; 248e97ad33aSDoug Rabson 249e97ad33aSDoug Rabson VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs, 250e97ad33aSDoug Rabson vt9p_intr_complete, sc, &sc->vt9p_vq, 251e97ad33aSDoug Rabson "%s request", device_get_nameunit(dev)); 252e97ad33aSDoug Rabson 253e97ad33aSDoug Rabson return (virtio_alloc_virtqueues(dev, 1, &vq_info)); 254e97ad33aSDoug Rabson } 255e97ad33aSDoug Rabson 256e97ad33aSDoug Rabson /* Probe for existence of 9P virtio channels */ 257e97ad33aSDoug Rabson static int 258e97ad33aSDoug Rabson vt9p_probe(device_t dev) 259e97ad33aSDoug Rabson { 260e97ad33aSDoug Rabson 261e97ad33aSDoug Rabson /* If the virtio device type is a 9P device, then we claim and attach it */ 262e97ad33aSDoug Rabson if (virtio_get_device_type(dev) != VIRTIO_ID_9P) 263e97ad33aSDoug Rabson return (ENXIO); 264e97ad33aSDoug Rabson device_set_desc(dev, "VirtIO 9P Transport"); 265e97ad33aSDoug Rabson 266e97ad33aSDoug Rabson return (BUS_PROBE_DEFAULT); 267e97ad33aSDoug Rabson } 268e97ad33aSDoug Rabson 269e97ad33aSDoug Rabson static void 270e97ad33aSDoug Rabson vt9p_stop(struct vt9p_softc *sc) 271e97ad33aSDoug Rabson { 272e97ad33aSDoug Rabson 273e97ad33aSDoug Rabson /* Device specific stops .*/ 274e97ad33aSDoug Rabson virtqueue_disable_intr(sc->vt9p_vq); 275e97ad33aSDoug Rabson virtio_stop(sc->vt9p_dev); 276e97ad33aSDoug Rabson } 277e97ad33aSDoug Rabson 278e97ad33aSDoug Rabson /* Detach the 9P virtio PCI device */ 279e97ad33aSDoug Rabson static int 280e97ad33aSDoug Rabson vt9p_detach(device_t dev) 281e97ad33aSDoug Rabson { 282e97ad33aSDoug Rabson struct vt9p_softc *sc; 283e97ad33aSDoug Rabson 284e97ad33aSDoug Rabson sc = device_get_softc(dev); 285e97ad33aSDoug Rabson VT9P_LOCK(sc); 286e97ad33aSDoug Rabson vt9p_stop(sc); 287e97ad33aSDoug Rabson VT9P_UNLOCK(sc); 288e97ad33aSDoug Rabson 289e97ad33aSDoug Rabson if (sc->vt9p_sglist) { 290e97ad33aSDoug Rabson sglist_free(sc->vt9p_sglist); 291e97ad33aSDoug Rabson sc->vt9p_sglist = NULL; 292e97ad33aSDoug Rabson } 293e97ad33aSDoug Rabson if (sc->mount_tag) { 294e97ad33aSDoug Rabson free(sc->mount_tag, M_P9FS_MNTTAG); 295e97ad33aSDoug Rabson sc->mount_tag = NULL; 296e97ad33aSDoug Rabson } 297e97ad33aSDoug Rabson mtx_lock(&global_chan_list_mtx); 298e97ad33aSDoug Rabson STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next); 299e97ad33aSDoug Rabson mtx_unlock(&global_chan_list_mtx); 300e97ad33aSDoug Rabson 301e97ad33aSDoug Rabson VT9P_LOCK_DESTROY(sc); 302e97ad33aSDoug Rabson cv_destroy(&sc->submit_cv); 303e97ad33aSDoug Rabson 304e97ad33aSDoug Rabson return (0); 305e97ad33aSDoug Rabson } 306e97ad33aSDoug Rabson 307e97ad33aSDoug Rabson /* Attach the 9P virtio PCI device */ 308e97ad33aSDoug Rabson static int 309e97ad33aSDoug Rabson vt9p_attach(device_t dev) 310e97ad33aSDoug Rabson { 311e97ad33aSDoug Rabson struct sysctl_ctx_list *ctx; 312e97ad33aSDoug Rabson struct sysctl_oid *tree; 313e97ad33aSDoug Rabson struct vt9p_softc *chan; 314e97ad33aSDoug Rabson char *mount_tag; 315e97ad33aSDoug Rabson int error; 316e97ad33aSDoug Rabson uint16_t mount_tag_len; 317e97ad33aSDoug Rabson 318e97ad33aSDoug Rabson chan = device_get_softc(dev); 319e97ad33aSDoug Rabson chan->vt9p_dev = dev; 320e97ad33aSDoug Rabson 321e97ad33aSDoug Rabson /* Init the channel lock. */ 322e97ad33aSDoug Rabson VT9P_LOCK_INIT(chan); 323e97ad33aSDoug Rabson /* Initialize the condition variable */ 324e97ad33aSDoug Rabson cv_init(&chan->submit_cv, "Conditional variable for submit queue" ); 325e97ad33aSDoug Rabson chan->max_nsegs = MAX_SUPPORTED_SGS; 326e6b88237SEd Maste chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK); 327e97ad33aSDoug Rabson 328e97ad33aSDoug Rabson /* Negotiate the features from the host */ 329e97ad33aSDoug Rabson virtio_set_feature_desc(dev, virtio_9p_feature_desc); 330e97ad33aSDoug Rabson virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG); 331e97ad33aSDoug Rabson 332e97ad33aSDoug Rabson /* 333e97ad33aSDoug Rabson * If mount tag feature is supported read the mount tag 334e97ad33aSDoug Rabson * from device config 335e97ad33aSDoug Rabson */ 336e97ad33aSDoug Rabson if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG)) 337e97ad33aSDoug Rabson mount_tag_len = virtio_read_dev_config_2(dev, 338e97ad33aSDoug Rabson offsetof(struct virtio_9pnet_config, mount_tag_len)); 339e97ad33aSDoug Rabson else { 340e97ad33aSDoug Rabson error = EINVAL; 341e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__); 342e97ad33aSDoug Rabson goto out; 343e97ad33aSDoug Rabson } 344e97ad33aSDoug Rabson mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG, 345e97ad33aSDoug Rabson M_WAITOK | M_ZERO); 346e97ad33aSDoug Rabson 347814bf1fdSDanilo Egea Gondolfo virtio_read_device_config_array(dev, 348e97ad33aSDoug Rabson offsetof(struct virtio_9pnet_config, mount_tag), 349814bf1fdSDanilo Egea Gondolfo mount_tag, 1, mount_tag_len); 350e97ad33aSDoug Rabson 351e97ad33aSDoug Rabson device_printf(dev, "Mount tag: %s\n", mount_tag); 352e97ad33aSDoug Rabson 353e97ad33aSDoug Rabson mount_tag_len++; 354e97ad33aSDoug Rabson chan->mount_tag_len = mount_tag_len; 355e97ad33aSDoug Rabson chan->mount_tag = mount_tag; 356e97ad33aSDoug Rabson 357e97ad33aSDoug Rabson ctx = device_get_sysctl_ctx(dev); 358e97ad33aSDoug Rabson tree = device_get_sysctl_tree(dev); 359e97ad33aSDoug Rabson SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag", 360e97ad33aSDoug Rabson CTLFLAG_RD, chan->mount_tag, 0, "Mount tag"); 361e97ad33aSDoug Rabson 362e97ad33aSDoug Rabson /* We expect one virtqueue, for requests. */ 363e97ad33aSDoug Rabson error = vt9p_alloc_virtqueue(chan); 364e97ad33aSDoug Rabson if (error != 0) { 365e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__); 366e97ad33aSDoug Rabson goto out; 367e97ad33aSDoug Rabson } 368e97ad33aSDoug Rabson error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE); 369e97ad33aSDoug Rabson if (error != 0) { 370e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__); 371e97ad33aSDoug Rabson goto out; 372e97ad33aSDoug Rabson } 373e97ad33aSDoug Rabson error = virtqueue_enable_intr(chan->vt9p_vq); 374e97ad33aSDoug Rabson if (error != 0) { 375e97ad33aSDoug Rabson P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__); 376e97ad33aSDoug Rabson goto out; 377e97ad33aSDoug Rabson } 378e97ad33aSDoug Rabson 379e97ad33aSDoug Rabson mtx_lock(&global_chan_list_mtx); 380e97ad33aSDoug Rabson /* Insert the channel in global channel list */ 381e97ad33aSDoug Rabson STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next); 382e97ad33aSDoug Rabson mtx_unlock(&global_chan_list_mtx); 383e97ad33aSDoug Rabson 384e97ad33aSDoug Rabson return (0); 385e97ad33aSDoug Rabson out: 386e97ad33aSDoug Rabson /* Something went wrong, detach the device */ 387e97ad33aSDoug Rabson vt9p_detach(dev); 388e97ad33aSDoug Rabson return (error); 389e97ad33aSDoug Rabson } 390e97ad33aSDoug Rabson 391e97ad33aSDoug Rabson /* 392e97ad33aSDoug Rabson * Allocate a new virtio channel. This sets up a transport channel 393e97ad33aSDoug Rabson * for 9P communication 394e97ad33aSDoug Rabson */ 395e97ad33aSDoug Rabson static int 396e97ad33aSDoug Rabson vt9p_create(const char *mount_tag, void **handlep) 397e97ad33aSDoug Rabson { 398e97ad33aSDoug Rabson struct vt9p_softc *sc, *chan; 399e97ad33aSDoug Rabson 400e97ad33aSDoug Rabson chan = NULL; 401e97ad33aSDoug Rabson 402e97ad33aSDoug Rabson /* 403e97ad33aSDoug Rabson * Find out the corresponding channel for a client from global list 404e97ad33aSDoug Rabson * of channels based on mount tag and attach it to client 405e97ad33aSDoug Rabson */ 406e97ad33aSDoug Rabson mtx_lock(&global_chan_list_mtx); 407e97ad33aSDoug Rabson STAILQ_FOREACH(sc, &global_chan_list, chan_next) { 408e97ad33aSDoug Rabson if (!strcmp(sc->mount_tag, mount_tag)) { 409e97ad33aSDoug Rabson chan = sc; 410e97ad33aSDoug Rabson break; 411e97ad33aSDoug Rabson } 412e97ad33aSDoug Rabson } 413e97ad33aSDoug Rabson mtx_unlock(&global_chan_list_mtx); 414e97ad33aSDoug Rabson 415e97ad33aSDoug Rabson /* 416e97ad33aSDoug Rabson * If chan is already attached to a client then it cannot be used for 417e97ad33aSDoug Rabson * another client. 418e97ad33aSDoug Rabson */ 419e97ad33aSDoug Rabson if (chan && chan->busy) { 420e97ad33aSDoug Rabson //p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client); 421e97ad33aSDoug Rabson return (EBUSY); 422e97ad33aSDoug Rabson } 423e97ad33aSDoug Rabson 424e97ad33aSDoug Rabson /* If we dont have one, for now bail out.*/ 425e97ad33aSDoug Rabson if (chan) { 426e97ad33aSDoug Rabson *handlep = (void *)chan; 427b9500cbdSMark Johnston chan->busy = true; 428e97ad33aSDoug Rabson } else { 429e97ad33aSDoug Rabson P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n", 430e97ad33aSDoug Rabson __func__, mount_tag); 431e97ad33aSDoug Rabson return (EINVAL); 432e97ad33aSDoug Rabson } 433e97ad33aSDoug Rabson 434e97ad33aSDoug Rabson return (0); 435e97ad33aSDoug Rabson } 436e97ad33aSDoug Rabson 437e97ad33aSDoug Rabson static void 438e97ad33aSDoug Rabson vt9p_close(void *handle) 439e97ad33aSDoug Rabson { 440e97ad33aSDoug Rabson struct vt9p_softc *chan = handle; 441b9500cbdSMark Johnston 442b9500cbdSMark Johnston chan->busy = false; 443e97ad33aSDoug Rabson } 444e97ad33aSDoug Rabson 445e97ad33aSDoug Rabson static struct p9_trans_module vt9p_trans = { 446e97ad33aSDoug Rabson .name = "virtio", 447e97ad33aSDoug Rabson .create = vt9p_create, 448e97ad33aSDoug Rabson .close = vt9p_close, 449e97ad33aSDoug Rabson .request = vt9p_request, 450e97ad33aSDoug Rabson .cancel = vt9p_cancel, 451e97ad33aSDoug Rabson }; 452e97ad33aSDoug Rabson 453e97ad33aSDoug Rabson static device_method_t vt9p_mthds[] = { 454e97ad33aSDoug Rabson /* Device methods. */ 455e97ad33aSDoug Rabson DEVMETHOD(device_probe, vt9p_probe), 456e97ad33aSDoug Rabson DEVMETHOD(device_attach, vt9p_attach), 457e97ad33aSDoug Rabson DEVMETHOD(device_detach, vt9p_detach), 458e97ad33aSDoug Rabson DEVMETHOD_END 459e97ad33aSDoug Rabson }; 460e97ad33aSDoug Rabson 461e97ad33aSDoug Rabson static driver_t vt9p_drv = { 462e97ad33aSDoug Rabson "virtio_p9fs", 463e97ad33aSDoug Rabson vt9p_mthds, 464e97ad33aSDoug Rabson sizeof(struct vt9p_softc) 465e97ad33aSDoug Rabson }; 466e97ad33aSDoug Rabson 467e97ad33aSDoug Rabson static int 468e97ad33aSDoug Rabson vt9p_modevent(module_t mod, int type, void *unused) 469e97ad33aSDoug Rabson { 470e97ad33aSDoug Rabson int error; 471e97ad33aSDoug Rabson 472e97ad33aSDoug Rabson error = 0; 473e97ad33aSDoug Rabson 474e97ad33aSDoug Rabson switch (type) { 475e97ad33aSDoug Rabson case MOD_LOAD: 476e97ad33aSDoug Rabson p9_init_zones(); 477e97ad33aSDoug Rabson p9_register_trans(&vt9p_trans); 478e97ad33aSDoug Rabson break; 479e97ad33aSDoug Rabson case MOD_UNLOAD: 480e97ad33aSDoug Rabson p9_destroy_zones(); 481e97ad33aSDoug Rabson break; 482e97ad33aSDoug Rabson case MOD_SHUTDOWN: 483e97ad33aSDoug Rabson break; 484e97ad33aSDoug Rabson default: 485e97ad33aSDoug Rabson error = EOPNOTSUPP; 486e97ad33aSDoug Rabson break; 487e97ad33aSDoug Rabson } 488e97ad33aSDoug Rabson return (error); 489e97ad33aSDoug Rabson } 490e97ad33aSDoug Rabson 491e97ad33aSDoug Rabson DRIVER_MODULE(virtio_p9fs, virtio_pci, vt9p_drv, vt9p_modevent, 0); 492e97ad33aSDoug Rabson MODULE_VERSION(virtio_p9fs, 1); 493e97ad33aSDoug Rabson MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1); 494e97ad33aSDoug Rabson MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1); 495