15084Sjohnlev /*
25084Sjohnlev * CDDL HEADER START
35084Sjohnlev *
45084Sjohnlev * The contents of this file are subject to the terms of the
55084Sjohnlev * Common Development and Distribution License (the "License").
65084Sjohnlev * You may not use this file except in compliance with the License.
75084Sjohnlev *
85084Sjohnlev * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95084Sjohnlev * or http://www.opensolaris.org/os/licensing.
105084Sjohnlev * See the License for the specific language governing permissions
115084Sjohnlev * and limitations under the License.
125084Sjohnlev *
135084Sjohnlev * When distributing Covered Code, include this CDDL HEADER in each
145084Sjohnlev * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155084Sjohnlev * If applicable, add the following below this CDDL HEADER, with the
165084Sjohnlev * fields enclosed by brackets "[]" replaced with your own identifying
175084Sjohnlev * information: Portions Copyright [yyyy] [name of copyright owner]
185084Sjohnlev *
195084Sjohnlev * CDDL HEADER END
205084Sjohnlev */
215084Sjohnlev
225084Sjohnlev /*
238863SEdward.Pilatowicz@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
245084Sjohnlev * Use is subject to license terms.
255084Sjohnlev */
265084Sjohnlev
275084Sjohnlev /*
285084Sjohnlev * xdf.c - Xen Virtual Block Device Driver
295084Sjohnlev * TODO:
305084Sjohnlev * - support alternate block size (currently only DEV_BSIZE supported)
315084Sjohnlev * - revalidate geometry for removable devices
328863SEdward.Pilatowicz@Sun.COM *
338863SEdward.Pilatowicz@Sun.COM * This driver export solaris disk device nodes, accepts IO requests from
348863SEdward.Pilatowicz@Sun.COM * those nodes, and services those requests by talking to a backend device
358863SEdward.Pilatowicz@Sun.COM * in another domain.
368863SEdward.Pilatowicz@Sun.COM *
378863SEdward.Pilatowicz@Sun.COM * Communication with the backend device is done via a ringbuffer (which is
388863SEdward.Pilatowicz@Sun.COM * managed via xvdi interfaces) and dma memory (which is managed via ddi
398863SEdward.Pilatowicz@Sun.COM * interfaces).
408863SEdward.Pilatowicz@Sun.COM *
418863SEdward.Pilatowicz@Sun.COM * Communication with the backend device is dependant upon establishing a
428863SEdward.Pilatowicz@Sun.COM * connection to the backend device. This connection process involves
438863SEdward.Pilatowicz@Sun.COM * reading device configuration information from xenbus and publishing
448863SEdward.Pilatowicz@Sun.COM * some frontend runtime configuration parameters via the xenbus (for
458863SEdward.Pilatowicz@Sun.COM * consumption by the backend). Once we've published runtime configuration
468863SEdward.Pilatowicz@Sun.COM * information via the xenbus, the backend device can enter the connected
478863SEdward.Pilatowicz@Sun.COM * state and we'll enter the XD_CONNECTED state. But before we can allow
488863SEdward.Pilatowicz@Sun.COM * random IO to begin, we need to do IO to the backend device to determine
498863SEdward.Pilatowicz@Sun.COM * the device label and if flush operations are supported. Once this is
508863SEdward.Pilatowicz@Sun.COM * done we enter the XD_READY state and can process any IO operations.
518863SEdward.Pilatowicz@Sun.COM *
528863SEdward.Pilatowicz@Sun.COM * We recieve notifications of xenbus state changes for the backend device
538863SEdward.Pilatowicz@Sun.COM * (aka, the "other end") via the xdf_oe_change() callback. This callback
548863SEdward.Pilatowicz@Sun.COM * is single threaded, meaning that we can't recieve new notification of
558863SEdward.Pilatowicz@Sun.COM * other end state changes while we're processing an outstanding
568863SEdward.Pilatowicz@Sun.COM * notification of an other end state change. There for we can't do any
578863SEdward.Pilatowicz@Sun.COM * blocking operations from the xdf_oe_change() callback. This is why we
588863SEdward.Pilatowicz@Sun.COM * have a seperate taskq (xdf_ready_tq) which exists to do the necessary
598863SEdward.Pilatowicz@Sun.COM * IO to get us from the XD_CONNECTED to the XD_READY state. All IO
608863SEdward.Pilatowicz@Sun.COM * generated by the xdf_ready_tq thread (xdf_ready_tq_thread) will go
618863SEdward.Pilatowicz@Sun.COM * throught xdf_lb_rdwr(), which is a synchronous IO interface. IOs
628863SEdward.Pilatowicz@Sun.COM * generated by the xdf_ready_tq_thread thread have priority over all
638863SEdward.Pilatowicz@Sun.COM * other IO requests.
648863SEdward.Pilatowicz@Sun.COM *
658863SEdward.Pilatowicz@Sun.COM * We also communicate with the backend device via the xenbus "media-req"
668863SEdward.Pilatowicz@Sun.COM * (XBP_MEDIA_REQ) property. For more information on this see the
678863SEdward.Pilatowicz@Sun.COM * comments in blkif.h.
685084Sjohnlev */
695084Sjohnlev
708863SEdward.Pilatowicz@Sun.COM #include <io/xdf.h>
718863SEdward.Pilatowicz@Sun.COM
725741Smrj #include <sys/conf.h>
735741Smrj #include <sys/dkio.h>
745741Smrj #include <sys/promif.h>
755741Smrj #include <sys/sysmacros.h>
765741Smrj #include <sys/kstat.h>
775741Smrj #include <sys/mach_mmu.h>
785741Smrj #ifdef XPV_HVM_DRIVER
795741Smrj #include <sys/xpv_support.h>
806318Sedp #include <sys/sunndi.h>
818863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
828863SEdward.Pilatowicz@Sun.COM #include <sys/evtchn_impl.h>
838863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
845741Smrj #include <public/io/xenbus.h>
855741Smrj #include <xen/sys/xenbus_impl.h>
865741Smrj #include <sys/scsi/generic/inquiry.h>
876144Srab #include <xen/io/blkif_impl.h>
888863SEdward.Pilatowicz@Sun.COM #include <sys/fdio.h>
898863SEdward.Pilatowicz@Sun.COM #include <sys/cdio.h>
908863SEdward.Pilatowicz@Sun.COM
918863SEdward.Pilatowicz@Sun.COM /*
928863SEdward.Pilatowicz@Sun.COM * DEBUG_EVAL can be used to include debug only statements without
938863SEdward.Pilatowicz@Sun.COM * having to use '#ifdef DEBUG' statements
948863SEdward.Pilatowicz@Sun.COM */
958863SEdward.Pilatowicz@Sun.COM #ifdef DEBUG
968863SEdward.Pilatowicz@Sun.COM #define DEBUG_EVAL(x) (x)
978863SEdward.Pilatowicz@Sun.COM #else /* !DEBUG */
988863SEdward.Pilatowicz@Sun.COM #define DEBUG_EVAL(x)
998863SEdward.Pilatowicz@Sun.COM #endif /* !DEBUG */
1008863SEdward.Pilatowicz@Sun.COM
1018863SEdward.Pilatowicz@Sun.COM #define XDF_DRAIN_MSEC_DELAY (50*1000) /* 00.05 sec */
1028863SEdward.Pilatowicz@Sun.COM #define XDF_DRAIN_RETRY_COUNT 200 /* 10.00 sec */
1038863SEdward.Pilatowicz@Sun.COM
1048863SEdward.Pilatowicz@Sun.COM #define INVALID_DOMID ((domid_t)-1)
1055084Sjohnlev #define FLUSH_DISKCACHE 0x1
1065084Sjohnlev #define WRITE_BARRIER 0x2
1075084Sjohnlev #define DEFAULT_FLUSH_BLOCK 156 /* block to write to cause a cache flush */
1088863SEdward.Pilatowicz@Sun.COM #define USE_WRITE_BARRIER(vdp) \
1095084Sjohnlev ((vdp)->xdf_feature_barrier && !(vdp)->xdf_flush_supported)
1108863SEdward.Pilatowicz@Sun.COM #define USE_FLUSH_DISKCACHE(vdp) \
1115084Sjohnlev ((vdp)->xdf_feature_barrier && (vdp)->xdf_flush_supported)
1128863SEdward.Pilatowicz@Sun.COM #define IS_WRITE_BARRIER(vdp, bp) \
1138863SEdward.Pilatowicz@Sun.COM (!IS_READ(bp) && USE_WRITE_BARRIER(vdp) && \
1145084Sjohnlev ((bp)->b_un.b_addr == (vdp)->xdf_cache_flush_block))
1158863SEdward.Pilatowicz@Sun.COM #define IS_FLUSH_DISKCACHE(bp) \
1165084Sjohnlev (!IS_READ(bp) && USE_FLUSH_DISKCACHE(vdp) && ((bp)->b_bcount == 0))
1175084Sjohnlev
1188863SEdward.Pilatowicz@Sun.COM #define VREQ_DONE(vreq) \
1198863SEdward.Pilatowicz@Sun.COM VOID2BOOLEAN(((vreq)->v_status == VREQ_DMAWIN_DONE) && \
1208863SEdward.Pilatowicz@Sun.COM (((vreq)->v_flush_diskcache == FLUSH_DISKCACHE) || \
1218863SEdward.Pilatowicz@Sun.COM (((vreq)->v_dmaw + 1) == (vreq)->v_ndmaws)))
1228863SEdward.Pilatowicz@Sun.COM
1238863SEdward.Pilatowicz@Sun.COM #define BP_VREQ(bp) ((v_req_t *)((bp)->av_back))
1248863SEdward.Pilatowicz@Sun.COM #define BP_VREQ_SET(bp, vreq) (((bp)->av_back = (buf_t *)(vreq)))
1258863SEdward.Pilatowicz@Sun.COM
1268863SEdward.Pilatowicz@Sun.COM extern int do_polled_io;
1278863SEdward.Pilatowicz@Sun.COM
1288863SEdward.Pilatowicz@Sun.COM /* run-time tunables that we don't want the compiler to optimize away */
1298863SEdward.Pilatowicz@Sun.COM volatile int xdf_debug = 0;
1308863SEdward.Pilatowicz@Sun.COM volatile boolean_t xdf_barrier_flush_disable = B_FALSE;
1318863SEdward.Pilatowicz@Sun.COM
1328863SEdward.Pilatowicz@Sun.COM /* per module globals */
1338863SEdward.Pilatowicz@Sun.COM major_t xdf_major;
1348863SEdward.Pilatowicz@Sun.COM static void *xdf_ssp;
1358863SEdward.Pilatowicz@Sun.COM static kmem_cache_t *xdf_vreq_cache;
1368863SEdward.Pilatowicz@Sun.COM static kmem_cache_t *xdf_gs_cache;
1378863SEdward.Pilatowicz@Sun.COM static int xdf_maxphys = XB_MAXPHYS;
1388863SEdward.Pilatowicz@Sun.COM static diskaddr_t xdf_flush_block = DEFAULT_FLUSH_BLOCK;
1398863SEdward.Pilatowicz@Sun.COM static int xdf_fbrewrites; /* flush block re-write count */
1408863SEdward.Pilatowicz@Sun.COM
1418863SEdward.Pilatowicz@Sun.COM /* misc public functions (used by xdf_shell.c) */
1428863SEdward.Pilatowicz@Sun.COM int xdf_lb_rdwr(dev_info_t *, uchar_t, void *, diskaddr_t, size_t, void *);
1438863SEdward.Pilatowicz@Sun.COM int xdf_lb_getinfo(dev_info_t *, int, void *, void *);
1448863SEdward.Pilatowicz@Sun.COM
1458863SEdward.Pilatowicz@Sun.COM /* misc private functions */
1468863SEdward.Pilatowicz@Sun.COM static void xdf_io_start(xdf_t *);
1478863SEdward.Pilatowicz@Sun.COM
1488863SEdward.Pilatowicz@Sun.COM /* callbacks from commmon label */
1498863SEdward.Pilatowicz@Sun.COM static cmlb_tg_ops_t xdf_lb_ops = {
1508863SEdward.Pilatowicz@Sun.COM TG_DK_OPS_VERSION_1,
1518863SEdward.Pilatowicz@Sun.COM xdf_lb_rdwr,
1528863SEdward.Pilatowicz@Sun.COM xdf_lb_getinfo
1535084Sjohnlev };
1545084Sjohnlev
1555084Sjohnlev /*
1565084Sjohnlev * I/O buffer DMA attributes
1575084Sjohnlev * Make sure: one DMA window contains BLKIF_MAX_SEGMENTS_PER_REQUEST at most
1585084Sjohnlev */
1595084Sjohnlev static ddi_dma_attr_t xb_dma_attr = {
1605084Sjohnlev DMA_ATTR_V0,
1615084Sjohnlev (uint64_t)0, /* lowest address */
1625084Sjohnlev (uint64_t)0xffffffffffffffff, /* highest usable address */
1635084Sjohnlev (uint64_t)0xffffff, /* DMA counter limit max */
1645084Sjohnlev (uint64_t)XB_BSIZE, /* alignment in bytes */
1655084Sjohnlev XB_BSIZE - 1, /* bitmap of burst sizes */
1665084Sjohnlev XB_BSIZE, /* min transfer */
1675084Sjohnlev (uint64_t)XB_MAX_XFER, /* maximum transfer */
1685084Sjohnlev (uint64_t)PAGEOFFSET, /* 1 page segment length */
1695084Sjohnlev BLKIF_MAX_SEGMENTS_PER_REQUEST, /* maximum number of segments */
1705084Sjohnlev XB_BSIZE, /* granularity */
1715084Sjohnlev 0, /* flags (reserved) */
1725084Sjohnlev };
1735084Sjohnlev
1745084Sjohnlev static ddi_device_acc_attr_t xc_acc_attr = {
1755084Sjohnlev DDI_DEVICE_ATTR_V0,
1765084Sjohnlev DDI_NEVERSWAP_ACC,
1775084Sjohnlev DDI_STRICTORDER_ACC
1785084Sjohnlev };
1795084Sjohnlev
1808863SEdward.Pilatowicz@Sun.COM static void
xdf_timeout_handler(void * arg)1818863SEdward.Pilatowicz@Sun.COM xdf_timeout_handler(void *arg)
1825084Sjohnlev {
1838863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = arg;
1845084Sjohnlev
1855084Sjohnlev mutex_enter(&vdp->xdf_dev_lk);
1868863SEdward.Pilatowicz@Sun.COM vdp->xdf_timeout_id = 0;
1875084Sjohnlev mutex_exit(&vdp->xdf_dev_lk);
1888863SEdward.Pilatowicz@Sun.COM
1898863SEdward.Pilatowicz@Sun.COM /* new timeout thread could be re-scheduled */
1908863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
1915084Sjohnlev }
1925084Sjohnlev
1935084Sjohnlev /*
1945084Sjohnlev * callback func when DMA/GTE resources is available
1955084Sjohnlev *
1965084Sjohnlev * Note: we only register one callback function to grant table subsystem
1975084Sjohnlev * since we only have one 'struct gnttab_free_callback' in xdf_t.
1985084Sjohnlev */
1995084Sjohnlev static int
xdf_dmacallback(caddr_t arg)2005084Sjohnlev xdf_dmacallback(caddr_t arg)
2015084Sjohnlev {
2025084Sjohnlev xdf_t *vdp = (xdf_t *)arg;
2035084Sjohnlev ASSERT(vdp != NULL);
2045084Sjohnlev
2055084Sjohnlev DPRINTF(DMA_DBG, ("xdf@%s: DMA callback started\n",
2068863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
2075084Sjohnlev
2085084Sjohnlev ddi_trigger_softintr(vdp->xdf_softintr_id);
2095084Sjohnlev return (DDI_DMA_CALLBACK_DONE);
2105084Sjohnlev }
2115084Sjohnlev
2128863SEdward.Pilatowicz@Sun.COM static ge_slot_t *
gs_get(xdf_t * vdp,int isread)2138863SEdward.Pilatowicz@Sun.COM gs_get(xdf_t *vdp, int isread)
2145084Sjohnlev {
2158863SEdward.Pilatowicz@Sun.COM grant_ref_t gh;
2168863SEdward.Pilatowicz@Sun.COM ge_slot_t *gs;
2178863SEdward.Pilatowicz@Sun.COM
2188863SEdward.Pilatowicz@Sun.COM /* try to alloc GTEs needed in this slot, first */
2198863SEdward.Pilatowicz@Sun.COM if (gnttab_alloc_grant_references(
2208863SEdward.Pilatowicz@Sun.COM BLKIF_MAX_SEGMENTS_PER_REQUEST, &gh) == -1) {
2218863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_gnt_callback.next == NULL) {
2228863SEdward.Pilatowicz@Sun.COM SETDMACBON(vdp);
2238863SEdward.Pilatowicz@Sun.COM gnttab_request_free_callback(
2248863SEdward.Pilatowicz@Sun.COM &vdp->xdf_gnt_callback,
2258863SEdward.Pilatowicz@Sun.COM (void (*)(void *))xdf_dmacallback,
2268863SEdward.Pilatowicz@Sun.COM (void *)vdp,
2278863SEdward.Pilatowicz@Sun.COM BLKIF_MAX_SEGMENTS_PER_REQUEST);
2288863SEdward.Pilatowicz@Sun.COM }
2298863SEdward.Pilatowicz@Sun.COM return (NULL);
2308863SEdward.Pilatowicz@Sun.COM }
2318863SEdward.Pilatowicz@Sun.COM
2328863SEdward.Pilatowicz@Sun.COM gs = kmem_cache_alloc(xdf_gs_cache, KM_NOSLEEP);
2338863SEdward.Pilatowicz@Sun.COM if (gs == NULL) {
2348863SEdward.Pilatowicz@Sun.COM gnttab_free_grant_references(gh);
2358863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_timeout_id == 0)
2368863SEdward.Pilatowicz@Sun.COM /* restart I/O after one second */
2378863SEdward.Pilatowicz@Sun.COM vdp->xdf_timeout_id =
2388863SEdward.Pilatowicz@Sun.COM timeout(xdf_timeout_handler, vdp, hz);
2398863SEdward.Pilatowicz@Sun.COM return (NULL);
2408863SEdward.Pilatowicz@Sun.COM }
2418863SEdward.Pilatowicz@Sun.COM
2428863SEdward.Pilatowicz@Sun.COM /* init gs_slot */
2438863SEdward.Pilatowicz@Sun.COM gs->gs_oeid = vdp->xdf_peer;
2448863SEdward.Pilatowicz@Sun.COM gs->gs_isread = isread;
2458863SEdward.Pilatowicz@Sun.COM gs->gs_ghead = gh;
2468863SEdward.Pilatowicz@Sun.COM gs->gs_ngrefs = 0;
2478863SEdward.Pilatowicz@Sun.COM
2488863SEdward.Pilatowicz@Sun.COM return (gs);
2495084Sjohnlev }
2505084Sjohnlev
2515084Sjohnlev static void
gs_free(ge_slot_t * gs)2528863SEdward.Pilatowicz@Sun.COM gs_free(ge_slot_t *gs)
2535084Sjohnlev {
2548863SEdward.Pilatowicz@Sun.COM int i;
2558863SEdward.Pilatowicz@Sun.COM
2568863SEdward.Pilatowicz@Sun.COM /* release all grant table entry resources used in this slot */
2578863SEdward.Pilatowicz@Sun.COM for (i = 0; i < gs->gs_ngrefs; i++)
2588863SEdward.Pilatowicz@Sun.COM gnttab_end_foreign_access(gs->gs_ge[i], !gs->gs_isread, 0);
2598863SEdward.Pilatowicz@Sun.COM gnttab_free_grant_references(gs->gs_ghead);
2608863SEdward.Pilatowicz@Sun.COM list_remove(&gs->gs_vreq->v_gs, gs);
2618863SEdward.Pilatowicz@Sun.COM kmem_cache_free(xdf_gs_cache, gs);
2628863SEdward.Pilatowicz@Sun.COM }
2638863SEdward.Pilatowicz@Sun.COM
2648863SEdward.Pilatowicz@Sun.COM static grant_ref_t
gs_grant(ge_slot_t * gs,mfn_t mfn)2658863SEdward.Pilatowicz@Sun.COM gs_grant(ge_slot_t *gs, mfn_t mfn)
2668863SEdward.Pilatowicz@Sun.COM {
2678863SEdward.Pilatowicz@Sun.COM grant_ref_t gr = gnttab_claim_grant_reference(&gs->gs_ghead);
2688863SEdward.Pilatowicz@Sun.COM
2698863SEdward.Pilatowicz@Sun.COM ASSERT(gr != -1);
2708863SEdward.Pilatowicz@Sun.COM ASSERT(gs->gs_ngrefs < BLKIF_MAX_SEGMENTS_PER_REQUEST);
2718863SEdward.Pilatowicz@Sun.COM gs->gs_ge[gs->gs_ngrefs++] = gr;
2728863SEdward.Pilatowicz@Sun.COM gnttab_grant_foreign_access_ref(gr, gs->gs_oeid, mfn, !gs->gs_isread);
2738863SEdward.Pilatowicz@Sun.COM
2748863SEdward.Pilatowicz@Sun.COM return (gr);
2755084Sjohnlev }
2765084Sjohnlev
2775084Sjohnlev /*
2785084Sjohnlev * Alloc a vreq for this bp
2795084Sjohnlev * bp->av_back contains the pointer to the vreq upon return
2805084Sjohnlev */
2815084Sjohnlev static v_req_t *
vreq_get(xdf_t * vdp,buf_t * bp)2825084Sjohnlev vreq_get(xdf_t *vdp, buf_t *bp)
2835084Sjohnlev {
2845084Sjohnlev v_req_t *vreq = NULL;
2855084Sjohnlev
2868863SEdward.Pilatowicz@Sun.COM ASSERT(BP_VREQ(bp) == NULL);
2875084Sjohnlev
2885084Sjohnlev vreq = kmem_cache_alloc(xdf_vreq_cache, KM_NOSLEEP);
2895084Sjohnlev if (vreq == NULL) {
2905084Sjohnlev if (vdp->xdf_timeout_id == 0)
2915084Sjohnlev /* restart I/O after one second */
2925084Sjohnlev vdp->xdf_timeout_id =
2935084Sjohnlev timeout(xdf_timeout_handler, vdp, hz);
2945084Sjohnlev return (NULL);
2955084Sjohnlev }
2965084Sjohnlev bzero(vreq, sizeof (v_req_t));
2978863SEdward.Pilatowicz@Sun.COM list_create(&vreq->v_gs, sizeof (ge_slot_t),
2988863SEdward.Pilatowicz@Sun.COM offsetof(ge_slot_t, gs_vreq_link));
2998863SEdward.Pilatowicz@Sun.COM vreq->v_buf = bp;
3008863SEdward.Pilatowicz@Sun.COM vreq->v_status = VREQ_INIT;
3018863SEdward.Pilatowicz@Sun.COM vreq->v_runq = B_FALSE;
3028863SEdward.Pilatowicz@Sun.COM BP_VREQ_SET(bp, vreq);
3038863SEdward.Pilatowicz@Sun.COM /* init of other fields in vreq is up to the caller */
3045084Sjohnlev
3055084Sjohnlev list_insert_head(&vdp->xdf_vreq_act, (void *)vreq);
3065084Sjohnlev
3075084Sjohnlev return (vreq);
3085084Sjohnlev }
3095084Sjohnlev
3105084Sjohnlev static void
vreq_free(xdf_t * vdp,v_req_t * vreq)3115084Sjohnlev vreq_free(xdf_t *vdp, v_req_t *vreq)
3125084Sjohnlev {
3138863SEdward.Pilatowicz@Sun.COM buf_t *bp = vreq->v_buf;
3148863SEdward.Pilatowicz@Sun.COM
3158863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
3168863SEdward.Pilatowicz@Sun.COM ASSERT(BP_VREQ(bp) == vreq);
3178863SEdward.Pilatowicz@Sun.COM
3188863SEdward.Pilatowicz@Sun.COM list_remove(&vdp->xdf_vreq_act, vreq);
3195084Sjohnlev
3205385Scz147101 if (vreq->v_flush_diskcache == FLUSH_DISKCACHE)
3215385Scz147101 goto done;
3225385Scz147101
3235084Sjohnlev switch (vreq->v_status) {
3245084Sjohnlev case VREQ_DMAWIN_DONE:
3255084Sjohnlev case VREQ_GS_ALLOCED:
3265084Sjohnlev case VREQ_DMABUF_BOUND:
3275084Sjohnlev (void) ddi_dma_unbind_handle(vreq->v_dmahdl);
3285084Sjohnlev /*FALLTHRU*/
3295084Sjohnlev case VREQ_DMAMEM_ALLOCED:
3305084Sjohnlev if (!ALIGNED_XFER(bp)) {
3315084Sjohnlev ASSERT(vreq->v_abuf != NULL);
3325084Sjohnlev if (!IS_ERROR(bp) && IS_READ(bp))
3335084Sjohnlev bcopy(vreq->v_abuf, bp->b_un.b_addr,
3345084Sjohnlev bp->b_bcount);
3355084Sjohnlev ddi_dma_mem_free(&vreq->v_align);
3365084Sjohnlev }
3375084Sjohnlev /*FALLTHRU*/
3385084Sjohnlev case VREQ_MEMDMAHDL_ALLOCED:
3395084Sjohnlev if (!ALIGNED_XFER(bp))
3405084Sjohnlev ddi_dma_free_handle(&vreq->v_memdmahdl);
3415084Sjohnlev /*FALLTHRU*/
3425084Sjohnlev case VREQ_DMAHDL_ALLOCED:
3435084Sjohnlev ddi_dma_free_handle(&vreq->v_dmahdl);
3445084Sjohnlev break;
3455084Sjohnlev default:
3465084Sjohnlev break;
3475084Sjohnlev }
3485385Scz147101 done:
3498863SEdward.Pilatowicz@Sun.COM ASSERT(!vreq->v_runq);
3508863SEdward.Pilatowicz@Sun.COM list_destroy(&vreq->v_gs);
3515084Sjohnlev kmem_cache_free(xdf_vreq_cache, vreq);
3525084Sjohnlev }
3535084Sjohnlev
3545084Sjohnlev /*
3558863SEdward.Pilatowicz@Sun.COM * Snarf new data if our flush block was re-written
3568863SEdward.Pilatowicz@Sun.COM */
3578863SEdward.Pilatowicz@Sun.COM static void
check_fbwrite(xdf_t * vdp,buf_t * bp,daddr_t blkno)3588863SEdward.Pilatowicz@Sun.COM check_fbwrite(xdf_t *vdp, buf_t *bp, daddr_t blkno)
3598863SEdward.Pilatowicz@Sun.COM {
3608863SEdward.Pilatowicz@Sun.COM int nblks;
3618863SEdward.Pilatowicz@Sun.COM boolean_t mapin;
3628863SEdward.Pilatowicz@Sun.COM
3638863SEdward.Pilatowicz@Sun.COM if (IS_WRITE_BARRIER(vdp, bp))
3648863SEdward.Pilatowicz@Sun.COM return; /* write was a flush write */
3658863SEdward.Pilatowicz@Sun.COM
3668863SEdward.Pilatowicz@Sun.COM mapin = B_FALSE;
3678863SEdward.Pilatowicz@Sun.COM nblks = bp->b_bcount >> DEV_BSHIFT;
3688863SEdward.Pilatowicz@Sun.COM if (xdf_flush_block >= blkno && xdf_flush_block < (blkno + nblks)) {
3698863SEdward.Pilatowicz@Sun.COM xdf_fbrewrites++;
3708863SEdward.Pilatowicz@Sun.COM if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
3718863SEdward.Pilatowicz@Sun.COM mapin = B_TRUE;
3728863SEdward.Pilatowicz@Sun.COM bp_mapin(bp);
3738863SEdward.Pilatowicz@Sun.COM }
3748863SEdward.Pilatowicz@Sun.COM bcopy(bp->b_un.b_addr +
3758863SEdward.Pilatowicz@Sun.COM ((xdf_flush_block - blkno) << DEV_BSHIFT),
3768863SEdward.Pilatowicz@Sun.COM vdp->xdf_cache_flush_block, DEV_BSIZE);
3778863SEdward.Pilatowicz@Sun.COM if (mapin)
3788863SEdward.Pilatowicz@Sun.COM bp_mapout(bp);
3798863SEdward.Pilatowicz@Sun.COM }
3808863SEdward.Pilatowicz@Sun.COM }
3818863SEdward.Pilatowicz@Sun.COM
3828863SEdward.Pilatowicz@Sun.COM /*
3835084Sjohnlev * Initalize the DMA and grant table resources for the buf
3845084Sjohnlev */
3855084Sjohnlev static int
vreq_setup(xdf_t * vdp,v_req_t * vreq)3865084Sjohnlev vreq_setup(xdf_t *vdp, v_req_t *vreq)
3875084Sjohnlev {
3885084Sjohnlev int rc;
3895084Sjohnlev ddi_dma_attr_t dmaattr;
3905084Sjohnlev uint_t ndcs, ndws;
3915084Sjohnlev ddi_dma_handle_t dh;
3925084Sjohnlev ddi_dma_handle_t mdh;
3935084Sjohnlev ddi_dma_cookie_t dc;
3945084Sjohnlev ddi_acc_handle_t abh;
3955084Sjohnlev caddr_t aba;
3965084Sjohnlev ge_slot_t *gs;
3975084Sjohnlev size_t bufsz;
3985084Sjohnlev off_t off;
3995084Sjohnlev size_t sz;
4005084Sjohnlev buf_t *bp = vreq->v_buf;
4015084Sjohnlev int dma_flags = (IS_READ(bp) ? DDI_DMA_READ : DDI_DMA_WRITE) |
4025084Sjohnlev DDI_DMA_STREAMING | DDI_DMA_PARTIAL;
4035084Sjohnlev
4045084Sjohnlev switch (vreq->v_status) {
4055084Sjohnlev case VREQ_INIT:
4065084Sjohnlev if (IS_FLUSH_DISKCACHE(bp)) {
4075084Sjohnlev if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
4088863SEdward.Pilatowicz@Sun.COM DPRINTF(DMA_DBG, ("xdf@%s: "
4098863SEdward.Pilatowicz@Sun.COM "get ge_slotfailed\n", vdp->xdf_addr));
4105084Sjohnlev return (DDI_FAILURE);
4115084Sjohnlev }
4125084Sjohnlev vreq->v_blkno = 0;
4135084Sjohnlev vreq->v_nslots = 1;
4145084Sjohnlev vreq->v_flush_diskcache = FLUSH_DISKCACHE;
4155385Scz147101 vreq->v_status = VREQ_GS_ALLOCED;
4168863SEdward.Pilatowicz@Sun.COM gs->gs_vreq = vreq;
4178863SEdward.Pilatowicz@Sun.COM list_insert_head(&vreq->v_gs, gs);
4185084Sjohnlev return (DDI_SUCCESS);
4195084Sjohnlev }
4205084Sjohnlev
4215084Sjohnlev if (IS_WRITE_BARRIER(vdp, bp))
4225084Sjohnlev vreq->v_flush_diskcache = WRITE_BARRIER;
4235084Sjohnlev vreq->v_blkno = bp->b_blkno +
4245084Sjohnlev (diskaddr_t)(uintptr_t)bp->b_private;
4255084Sjohnlev /* See if we wrote new data to our flush block */
4265084Sjohnlev if (!IS_READ(bp) && USE_WRITE_BARRIER(vdp))
4275084Sjohnlev check_fbwrite(vdp, bp, vreq->v_blkno);
4285084Sjohnlev vreq->v_status = VREQ_INIT_DONE;
4295084Sjohnlev /*FALLTHRU*/
4305084Sjohnlev
4315084Sjohnlev case VREQ_INIT_DONE:
4325084Sjohnlev /*
4335084Sjohnlev * alloc DMA handle
4345084Sjohnlev */
4355084Sjohnlev rc = ddi_dma_alloc_handle(vdp->xdf_dip, &xb_dma_attr,
4365084Sjohnlev xdf_dmacallback, (caddr_t)vdp, &dh);
4375084Sjohnlev if (rc != DDI_SUCCESS) {
4385084Sjohnlev SETDMACBON(vdp);
4395084Sjohnlev DPRINTF(DMA_DBG, ("xdf@%s: DMA handle alloc failed\n",
4408863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
4415084Sjohnlev return (DDI_FAILURE);
4425084Sjohnlev }
4435084Sjohnlev
4445084Sjohnlev vreq->v_dmahdl = dh;
4455084Sjohnlev vreq->v_status = VREQ_DMAHDL_ALLOCED;
4465084Sjohnlev /*FALLTHRU*/
4475084Sjohnlev
4485084Sjohnlev case VREQ_DMAHDL_ALLOCED:
4495084Sjohnlev /*
4505084Sjohnlev * alloc dma handle for 512-byte aligned buf
4515084Sjohnlev */
4525084Sjohnlev if (!ALIGNED_XFER(bp)) {
4535084Sjohnlev /*
4545084Sjohnlev * XXPV: we need to temporarily enlarge the seg
4555084Sjohnlev * boundary and s/g length to work round CR6381968
4565084Sjohnlev */
4575084Sjohnlev dmaattr = xb_dma_attr;
4585084Sjohnlev dmaattr.dma_attr_seg = (uint64_t)-1;
4595084Sjohnlev dmaattr.dma_attr_sgllen = INT_MAX;
4605084Sjohnlev rc = ddi_dma_alloc_handle(vdp->xdf_dip, &dmaattr,
4615084Sjohnlev xdf_dmacallback, (caddr_t)vdp, &mdh);
4625084Sjohnlev if (rc != DDI_SUCCESS) {
4635084Sjohnlev SETDMACBON(vdp);
4648863SEdward.Pilatowicz@Sun.COM DPRINTF(DMA_DBG, ("xdf@%s: "
4658863SEdward.Pilatowicz@Sun.COM "unaligned buf DMAhandle alloc failed\n",
4668863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
4675084Sjohnlev return (DDI_FAILURE);
4685084Sjohnlev }
4695084Sjohnlev vreq->v_memdmahdl = mdh;
4705084Sjohnlev vreq->v_status = VREQ_MEMDMAHDL_ALLOCED;
4715084Sjohnlev }
4725084Sjohnlev /*FALLTHRU*/
4735084Sjohnlev
4745084Sjohnlev case VREQ_MEMDMAHDL_ALLOCED:
4755084Sjohnlev /*
4765084Sjohnlev * alloc 512-byte aligned buf
4775084Sjohnlev */
4785084Sjohnlev if (!ALIGNED_XFER(bp)) {
4795084Sjohnlev if (bp->b_flags & (B_PAGEIO | B_PHYS))
4805084Sjohnlev bp_mapin(bp);
4815084Sjohnlev rc = ddi_dma_mem_alloc(vreq->v_memdmahdl,
4825084Sjohnlev roundup(bp->b_bcount, XB_BSIZE), &xc_acc_attr,
4835084Sjohnlev DDI_DMA_STREAMING, xdf_dmacallback, (caddr_t)vdp,
4845084Sjohnlev &aba, &bufsz, &abh);
4855084Sjohnlev if (rc != DDI_SUCCESS) {
4865084Sjohnlev SETDMACBON(vdp);
4878863SEdward.Pilatowicz@Sun.COM DPRINTF(DMA_DBG, ("xdf@%s: "
4888863SEdward.Pilatowicz@Sun.COM "DMA mem allocation failed\n",
4898863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
4905084Sjohnlev return (DDI_FAILURE);
4915084Sjohnlev }
4925084Sjohnlev
4935084Sjohnlev vreq->v_abuf = aba;
4945084Sjohnlev vreq->v_align = abh;
4955084Sjohnlev vreq->v_status = VREQ_DMAMEM_ALLOCED;
4965084Sjohnlev
4975084Sjohnlev ASSERT(bufsz >= bp->b_bcount);
4985084Sjohnlev if (!IS_READ(bp))
4995084Sjohnlev bcopy(bp->b_un.b_addr, vreq->v_abuf,
5005084Sjohnlev bp->b_bcount);
5015084Sjohnlev }
5025084Sjohnlev /*FALLTHRU*/
5035084Sjohnlev
5045084Sjohnlev case VREQ_DMAMEM_ALLOCED:
5055084Sjohnlev /*
5065084Sjohnlev * dma bind
5075084Sjohnlev */
5085084Sjohnlev if (ALIGNED_XFER(bp)) {
5095084Sjohnlev rc = ddi_dma_buf_bind_handle(vreq->v_dmahdl, bp,
5105084Sjohnlev dma_flags, xdf_dmacallback, (caddr_t)vdp,
5115084Sjohnlev &dc, &ndcs);
5125084Sjohnlev } else {
5135084Sjohnlev rc = ddi_dma_addr_bind_handle(vreq->v_dmahdl,
5145084Sjohnlev NULL, vreq->v_abuf, bp->b_bcount, dma_flags,
5155084Sjohnlev xdf_dmacallback, (caddr_t)vdp, &dc, &ndcs);
5165084Sjohnlev }
5175084Sjohnlev if (rc == DDI_DMA_MAPPED || rc == DDI_DMA_PARTIAL_MAP) {
5185084Sjohnlev /* get num of dma windows */
5195084Sjohnlev if (rc == DDI_DMA_PARTIAL_MAP) {
5205084Sjohnlev rc = ddi_dma_numwin(vreq->v_dmahdl, &ndws);
5215084Sjohnlev ASSERT(rc == DDI_SUCCESS);
5225084Sjohnlev } else {
5235084Sjohnlev ndws = 1;
5245084Sjohnlev }
5255084Sjohnlev } else {
5265084Sjohnlev SETDMACBON(vdp);
5275084Sjohnlev DPRINTF(DMA_DBG, ("xdf@%s: DMA bind failed\n",
5288863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
5295084Sjohnlev return (DDI_FAILURE);
5305084Sjohnlev }
5315084Sjohnlev
5325084Sjohnlev vreq->v_dmac = dc;
5335084Sjohnlev vreq->v_dmaw = 0;
5345084Sjohnlev vreq->v_ndmacs = ndcs;
5355084Sjohnlev vreq->v_ndmaws = ndws;
5365084Sjohnlev vreq->v_nslots = ndws;
5375084Sjohnlev vreq->v_status = VREQ_DMABUF_BOUND;
5385084Sjohnlev /*FALLTHRU*/
5395084Sjohnlev
5405084Sjohnlev case VREQ_DMABUF_BOUND:
5415084Sjohnlev /*
5425084Sjohnlev * get ge_slot, callback is set upon failure from gs_get(),
5435084Sjohnlev * if not set previously
5445084Sjohnlev */
5455084Sjohnlev if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
5465084Sjohnlev DPRINTF(DMA_DBG, ("xdf@%s: get ge_slot failed\n",
5478863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
5485084Sjohnlev return (DDI_FAILURE);
5495084Sjohnlev }
5505084Sjohnlev
5515084Sjohnlev vreq->v_status = VREQ_GS_ALLOCED;
5528863SEdward.Pilatowicz@Sun.COM gs->gs_vreq = vreq;
5538863SEdward.Pilatowicz@Sun.COM list_insert_head(&vreq->v_gs, gs);
5545084Sjohnlev break;
5555084Sjohnlev
5565084Sjohnlev case VREQ_GS_ALLOCED:
5575084Sjohnlev /* nothing need to be done */
5585084Sjohnlev break;
5595084Sjohnlev
5605084Sjohnlev case VREQ_DMAWIN_DONE:
5615084Sjohnlev /*
5625084Sjohnlev * move to the next dma window
5635084Sjohnlev */
5645084Sjohnlev ASSERT((vreq->v_dmaw + 1) < vreq->v_ndmaws);
5655084Sjohnlev
5665084Sjohnlev /* get a ge_slot for this DMA window */
5675084Sjohnlev if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
5685084Sjohnlev DPRINTF(DMA_DBG, ("xdf@%s: get ge_slot failed\n",
5698863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
5705084Sjohnlev return (DDI_FAILURE);
5715084Sjohnlev }
5725084Sjohnlev
5735084Sjohnlev vreq->v_dmaw++;
5748863SEdward.Pilatowicz@Sun.COM VERIFY(ddi_dma_getwin(vreq->v_dmahdl, vreq->v_dmaw, &off, &sz,
5758863SEdward.Pilatowicz@Sun.COM &vreq->v_dmac, &vreq->v_ndmacs) == DDI_SUCCESS);
5765084Sjohnlev vreq->v_status = VREQ_GS_ALLOCED;
5778863SEdward.Pilatowicz@Sun.COM gs->gs_vreq = vreq;
5788863SEdward.Pilatowicz@Sun.COM list_insert_head(&vreq->v_gs, gs);
5795084Sjohnlev break;
5805084Sjohnlev
5815084Sjohnlev default:
5825084Sjohnlev return (DDI_FAILURE);
5835084Sjohnlev }
5845084Sjohnlev
5855084Sjohnlev return (DDI_SUCCESS);
5865084Sjohnlev }
5875084Sjohnlev
5888863SEdward.Pilatowicz@Sun.COM static int
xdf_cmlb_attach(xdf_t * vdp)5898863SEdward.Pilatowicz@Sun.COM xdf_cmlb_attach(xdf_t *vdp)
5905084Sjohnlev {
5918863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
5928863SEdward.Pilatowicz@Sun.COM
5938863SEdward.Pilatowicz@Sun.COM return (cmlb_attach(dip, &xdf_lb_ops,
5948863SEdward.Pilatowicz@Sun.COM XD_IS_CD(vdp) ? DTYPE_RODIRECT : DTYPE_DIRECT,
5958863SEdward.Pilatowicz@Sun.COM XD_IS_RM(vdp),
5968863SEdward.Pilatowicz@Sun.COM B_TRUE,
5978863SEdward.Pilatowicz@Sun.COM XD_IS_CD(vdp) ? DDI_NT_CD_XVMD : DDI_NT_BLOCK_XVMD,
5988863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
5998863SEdward.Pilatowicz@Sun.COM (XD_IS_CD(vdp) ? 0 : CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT) |
6008863SEdward.Pilatowicz@Sun.COM CMLB_INTERNAL_MINOR_NODES,
6018863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
6028863SEdward.Pilatowicz@Sun.COM XD_IS_CD(vdp) ? 0 : CMLB_FAKE_LABEL_ONE_PARTITION,
6038863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
6048863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_lbl, NULL));
6058863SEdward.Pilatowicz@Sun.COM }
6068863SEdward.Pilatowicz@Sun.COM
6078863SEdward.Pilatowicz@Sun.COM static void
xdf_io_err(buf_t * bp,int err,size_t resid)6088863SEdward.Pilatowicz@Sun.COM xdf_io_err(buf_t *bp, int err, size_t resid)
6098863SEdward.Pilatowicz@Sun.COM {
6108863SEdward.Pilatowicz@Sun.COM bioerror(bp, err);
6118863SEdward.Pilatowicz@Sun.COM if (resid == 0)
6128863SEdward.Pilatowicz@Sun.COM bp->b_resid = bp->b_bcount;
6138863SEdward.Pilatowicz@Sun.COM biodone(bp);
6145084Sjohnlev }
6155084Sjohnlev
6165084Sjohnlev static void
xdf_kstat_enter(xdf_t * vdp,buf_t * bp)6178863SEdward.Pilatowicz@Sun.COM xdf_kstat_enter(xdf_t *vdp, buf_t *bp)
6185084Sjohnlev {
6198863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = BP_VREQ(bp);
6208863SEdward.Pilatowicz@Sun.COM
6218863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6228863SEdward.Pilatowicz@Sun.COM
6238863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_iostat == NULL)
6248863SEdward.Pilatowicz@Sun.COM return;
6258863SEdward.Pilatowicz@Sun.COM if ((vreq != NULL) && vreq->v_runq) {
6268863SEdward.Pilatowicz@Sun.COM kstat_runq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6278863SEdward.Pilatowicz@Sun.COM } else {
6288863SEdward.Pilatowicz@Sun.COM kstat_waitq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6298863SEdward.Pilatowicz@Sun.COM }
6305084Sjohnlev }
6315084Sjohnlev
6325084Sjohnlev static void
xdf_kstat_exit(xdf_t * vdp,buf_t * bp)6338863SEdward.Pilatowicz@Sun.COM xdf_kstat_exit(xdf_t *vdp, buf_t *bp)
6345084Sjohnlev {
6358863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = BP_VREQ(bp);
6368863SEdward.Pilatowicz@Sun.COM
6378863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6388863SEdward.Pilatowicz@Sun.COM
6398863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_iostat == NULL)
6408863SEdward.Pilatowicz@Sun.COM return;
6418863SEdward.Pilatowicz@Sun.COM if ((vreq != NULL) && vreq->v_runq) {
6428863SEdward.Pilatowicz@Sun.COM kstat_runq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6438863SEdward.Pilatowicz@Sun.COM } else {
6448863SEdward.Pilatowicz@Sun.COM kstat_waitq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6455084Sjohnlev }
6465084Sjohnlev }
6475084Sjohnlev
6485084Sjohnlev static void
xdf_kstat_waitq_to_runq(xdf_t * vdp,buf_t * bp)6498863SEdward.Pilatowicz@Sun.COM xdf_kstat_waitq_to_runq(xdf_t *vdp, buf_t *bp)
6508863SEdward.Pilatowicz@Sun.COM {
6518863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = BP_VREQ(bp);
6528863SEdward.Pilatowicz@Sun.COM
6538863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6548863SEdward.Pilatowicz@Sun.COM ASSERT(!vreq->v_runq);
6558863SEdward.Pilatowicz@Sun.COM
6568863SEdward.Pilatowicz@Sun.COM vreq->v_runq = B_TRUE;
6578863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_iostat == NULL)
6588863SEdward.Pilatowicz@Sun.COM return;
6598863SEdward.Pilatowicz@Sun.COM kstat_waitq_to_runq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6608863SEdward.Pilatowicz@Sun.COM }
6618863SEdward.Pilatowicz@Sun.COM
6628863SEdward.Pilatowicz@Sun.COM static void
xdf_kstat_runq_to_waitq(xdf_t * vdp,buf_t * bp)6638863SEdward.Pilatowicz@Sun.COM xdf_kstat_runq_to_waitq(xdf_t *vdp, buf_t *bp)
6648863SEdward.Pilatowicz@Sun.COM {
6658863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = BP_VREQ(bp);
6668863SEdward.Pilatowicz@Sun.COM
6678863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6688863SEdward.Pilatowicz@Sun.COM ASSERT(vreq->v_runq);
6698863SEdward.Pilatowicz@Sun.COM
6708863SEdward.Pilatowicz@Sun.COM vreq->v_runq = B_FALSE;
6718863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_iostat == NULL)
6728863SEdward.Pilatowicz@Sun.COM return;
6738863SEdward.Pilatowicz@Sun.COM kstat_runq_back_to_waitq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6748863SEdward.Pilatowicz@Sun.COM }
6758863SEdward.Pilatowicz@Sun.COM
6768863SEdward.Pilatowicz@Sun.COM int
xdf_kstat_create(dev_info_t * dip,char * ks_module,int instance)6778863SEdward.Pilatowicz@Sun.COM xdf_kstat_create(dev_info_t *dip, char *ks_module, int instance)
6785084Sjohnlev {
6798863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
6808863SEdward.Pilatowicz@Sun.COM kstat_t *kstat;
6818863SEdward.Pilatowicz@Sun.COM buf_t *bp;
6828863SEdward.Pilatowicz@Sun.COM
6838863SEdward.Pilatowicz@Sun.COM if ((kstat = kstat_create(
6848863SEdward.Pilatowicz@Sun.COM ks_module, instance, NULL, "disk",
6858863SEdward.Pilatowicz@Sun.COM KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) == NULL)
6868863SEdward.Pilatowicz@Sun.COM return (-1);
6878863SEdward.Pilatowicz@Sun.COM
6888863SEdward.Pilatowicz@Sun.COM /* See comment about locking in xdf_kstat_delete(). */
6898863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_iostat_lk);
6908863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
6918863SEdward.Pilatowicz@Sun.COM
6928863SEdward.Pilatowicz@Sun.COM /* only one kstat can exist at a time */
6938863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_iostat != NULL) {
6948863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
6958863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_iostat_lk);
6968863SEdward.Pilatowicz@Sun.COM kstat_delete(kstat);
6978863SEdward.Pilatowicz@Sun.COM return (-1);
6988863SEdward.Pilatowicz@Sun.COM }
6998863SEdward.Pilatowicz@Sun.COM
7008863SEdward.Pilatowicz@Sun.COM vdp->xdf_xdev_iostat = kstat;
7018863SEdward.Pilatowicz@Sun.COM vdp->xdf_xdev_iostat->ks_lock = &vdp->xdf_dev_lk;
7028863SEdward.Pilatowicz@Sun.COM kstat_install(vdp->xdf_xdev_iostat);
7038863SEdward.Pilatowicz@Sun.COM
7048863SEdward.Pilatowicz@Sun.COM /*
7058863SEdward.Pilatowicz@Sun.COM * Now that we've created a kstat, we need to update the waitq and
7068863SEdward.Pilatowicz@Sun.COM * runq counts for the kstat to reflect our current state.
7078863SEdward.Pilatowicz@Sun.COM *
7088863SEdward.Pilatowicz@Sun.COM * For a buf_t structure to be on the runq, it must have a ring
7098863SEdward.Pilatowicz@Sun.COM * buffer slot associated with it. To get a ring buffer slot the
7108863SEdward.Pilatowicz@Sun.COM * buf must first have a v_req_t and a ge_slot_t associated with it.
7118863SEdward.Pilatowicz@Sun.COM * Then when it is granted a ring buffer slot, v_runq will be set to
7128863SEdward.Pilatowicz@Sun.COM * true.
7138863SEdward.Pilatowicz@Sun.COM *
7148863SEdward.Pilatowicz@Sun.COM * For a buf_t structure to be on the waitq, it must not be on the
7158863SEdward.Pilatowicz@Sun.COM * runq. So to find all the buf_t's that should be on waitq, we
7168863SEdward.Pilatowicz@Sun.COM * walk the active buf list and add any buf_t's which aren't on the
7178863SEdward.Pilatowicz@Sun.COM * runq to the waitq.
7188863SEdward.Pilatowicz@Sun.COM */
7198863SEdward.Pilatowicz@Sun.COM bp = vdp->xdf_f_act;
7208863SEdward.Pilatowicz@Sun.COM while (bp != NULL) {
7218863SEdward.Pilatowicz@Sun.COM xdf_kstat_enter(vdp, bp);
7228863SEdward.Pilatowicz@Sun.COM bp = bp->av_forw;
7238863SEdward.Pilatowicz@Sun.COM }
7248863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_ready_tq_bp != NULL)
7258863SEdward.Pilatowicz@Sun.COM xdf_kstat_enter(vdp, vdp->xdf_ready_tq_bp);
7268863SEdward.Pilatowicz@Sun.COM
7278863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
7288863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_iostat_lk);
7298863SEdward.Pilatowicz@Sun.COM return (0);
7305084Sjohnlev }
7316318Sedp
7326318Sedp void
xdf_kstat_delete(dev_info_t * dip)7336318Sedp xdf_kstat_delete(dev_info_t *dip)
7346318Sedp {
7358863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
7368863SEdward.Pilatowicz@Sun.COM kstat_t *kstat;
7378863SEdward.Pilatowicz@Sun.COM buf_t *bp;
7386318Sedp
7396318Sedp /*
7406318Sedp * The locking order here is xdf_iostat_lk and then xdf_dev_lk.
7416318Sedp * xdf_dev_lk is used to protect the xdf_xdev_iostat pointer
7426318Sedp * and the contents of the our kstat. xdf_iostat_lk is used
7436318Sedp * to protect the allocation and freeing of the actual kstat.
7446318Sedp * xdf_dev_lk can't be used for this purpose because kstat
7456318Sedp * readers use it to access the contents of the kstat and
7466318Sedp * hence it can't be held when calling kstat_delete().
7476318Sedp */
7486318Sedp mutex_enter(&vdp->xdf_iostat_lk);
7496318Sedp mutex_enter(&vdp->xdf_dev_lk);
7506318Sedp
7516318Sedp if (vdp->xdf_xdev_iostat == NULL) {
7526318Sedp mutex_exit(&vdp->xdf_dev_lk);
7536318Sedp mutex_exit(&vdp->xdf_iostat_lk);
7546318Sedp return;
7556318Sedp }
7566318Sedp
7578863SEdward.Pilatowicz@Sun.COM /*
7588863SEdward.Pilatowicz@Sun.COM * We're about to destroy the kstat structures, so it isn't really
7598863SEdward.Pilatowicz@Sun.COM * necessary to update the runq and waitq counts. But, since this
7608863SEdward.Pilatowicz@Sun.COM * isn't a hot code path we can afford to be a little pedantic and
7618863SEdward.Pilatowicz@Sun.COM * go ahead and decrement the runq and waitq kstat counters to zero
7628863SEdward.Pilatowicz@Sun.COM * before free'ing them. This helps us ensure that we've gotten all
7638863SEdward.Pilatowicz@Sun.COM * our accounting correct.
7648863SEdward.Pilatowicz@Sun.COM *
7658863SEdward.Pilatowicz@Sun.COM * For an explanation of how we determine which buffers go on the
7668863SEdward.Pilatowicz@Sun.COM * runq vs which go on the waitq, see the comments in
7678863SEdward.Pilatowicz@Sun.COM * xdf_kstat_create().
7688863SEdward.Pilatowicz@Sun.COM */
7698863SEdward.Pilatowicz@Sun.COM bp = vdp->xdf_f_act;
7708863SEdward.Pilatowicz@Sun.COM while (bp != NULL) {
7718863SEdward.Pilatowicz@Sun.COM xdf_kstat_exit(vdp, bp);
7728863SEdward.Pilatowicz@Sun.COM bp = bp->av_forw;
7738863SEdward.Pilatowicz@Sun.COM }
7748863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_ready_tq_bp != NULL)
7758863SEdward.Pilatowicz@Sun.COM xdf_kstat_exit(vdp, vdp->xdf_ready_tq_bp);
7768863SEdward.Pilatowicz@Sun.COM
7776318Sedp kstat = vdp->xdf_xdev_iostat;
7786318Sedp vdp->xdf_xdev_iostat = NULL;
7796318Sedp mutex_exit(&vdp->xdf_dev_lk);
7806318Sedp kstat_delete(kstat);
7816318Sedp mutex_exit(&vdp->xdf_iostat_lk);
7826318Sedp }
7836318Sedp
7848863SEdward.Pilatowicz@Sun.COM /*
7858863SEdward.Pilatowicz@Sun.COM * Add an IO requests onto the active queue.
7868863SEdward.Pilatowicz@Sun.COM *
7878863SEdward.Pilatowicz@Sun.COM * We have to detect IOs generated by xdf_ready_tq_thread. These IOs
7888863SEdward.Pilatowicz@Sun.COM * are used to establish a connection to the backend, so they recieve
7898863SEdward.Pilatowicz@Sun.COM * priority over all other IOs. Since xdf_ready_tq_thread only does
7908863SEdward.Pilatowicz@Sun.COM * synchronous IO, there can only be one xdf_ready_tq_thread request at any
7918863SEdward.Pilatowicz@Sun.COM * given time and we record the buf associated with that request in
7928863SEdward.Pilatowicz@Sun.COM * xdf_ready_tq_bp.
7938863SEdward.Pilatowicz@Sun.COM */
7948863SEdward.Pilatowicz@Sun.COM static void
xdf_bp_push(xdf_t * vdp,buf_t * bp)7958863SEdward.Pilatowicz@Sun.COM xdf_bp_push(xdf_t *vdp, buf_t *bp)
7968863SEdward.Pilatowicz@Sun.COM {
7978863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
7988863SEdward.Pilatowicz@Sun.COM ASSERT(bp->av_forw == NULL);
7998863SEdward.Pilatowicz@Sun.COM
8008863SEdward.Pilatowicz@Sun.COM xdf_kstat_enter(vdp, bp);
8018863SEdward.Pilatowicz@Sun.COM
8028863SEdward.Pilatowicz@Sun.COM if (curthread == vdp->xdf_ready_tq_thread) {
8038863SEdward.Pilatowicz@Sun.COM /* new IO requests from the ready thread */
8048863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_ready_tq_bp == NULL);
8058863SEdward.Pilatowicz@Sun.COM vdp->xdf_ready_tq_bp = bp;
8068863SEdward.Pilatowicz@Sun.COM return;
8078863SEdward.Pilatowicz@Sun.COM }
8088863SEdward.Pilatowicz@Sun.COM
8098863SEdward.Pilatowicz@Sun.COM /* this is normal IO request */
8108863SEdward.Pilatowicz@Sun.COM ASSERT(bp != vdp->xdf_ready_tq_bp);
8118863SEdward.Pilatowicz@Sun.COM
8128863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_f_act == NULL) {
8138863SEdward.Pilatowicz@Sun.COM /* this is only only IO on the active queue */
8148863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_l_act == NULL);
8158863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_i_act == NULL);
8168863SEdward.Pilatowicz@Sun.COM vdp->xdf_f_act = vdp->xdf_l_act = vdp->xdf_i_act = bp;
8178863SEdward.Pilatowicz@Sun.COM return;
8188863SEdward.Pilatowicz@Sun.COM }
8198863SEdward.Pilatowicz@Sun.COM
8208863SEdward.Pilatowicz@Sun.COM /* add this IO to the tail of the active queue */
8218863SEdward.Pilatowicz@Sun.COM vdp->xdf_l_act->av_forw = bp;
8228863SEdward.Pilatowicz@Sun.COM vdp->xdf_l_act = bp;
8238863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_i_act == NULL)
8248863SEdward.Pilatowicz@Sun.COM vdp->xdf_i_act = bp;
8258863SEdward.Pilatowicz@Sun.COM }
8268863SEdward.Pilatowicz@Sun.COM
8278863SEdward.Pilatowicz@Sun.COM static void
xdf_bp_pop(xdf_t * vdp,buf_t * bp)8288863SEdward.Pilatowicz@Sun.COM xdf_bp_pop(xdf_t *vdp, buf_t *bp)
8298863SEdward.Pilatowicz@Sun.COM {
8308863SEdward.Pilatowicz@Sun.COM buf_t *bp_iter;
8318863SEdward.Pilatowicz@Sun.COM
8328863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
8338863SEdward.Pilatowicz@Sun.COM ASSERT(VREQ_DONE(BP_VREQ(bp)));
8348863SEdward.Pilatowicz@Sun.COM
8358863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_ready_tq_bp == bp) {
8368863SEdward.Pilatowicz@Sun.COM /* we're done with a ready thread IO request */
8378863SEdward.Pilatowicz@Sun.COM ASSERT(bp->av_forw == NULL);
8388863SEdward.Pilatowicz@Sun.COM vdp->xdf_ready_tq_bp = NULL;
8398863SEdward.Pilatowicz@Sun.COM return;
8408863SEdward.Pilatowicz@Sun.COM }
8418863SEdward.Pilatowicz@Sun.COM
8428863SEdward.Pilatowicz@Sun.COM /* we're done with a normal IO request */
8438863SEdward.Pilatowicz@Sun.COM ASSERT((bp->av_forw != NULL) || (bp == vdp->xdf_l_act));
8448863SEdward.Pilatowicz@Sun.COM ASSERT((bp->av_forw == NULL) || (bp != vdp->xdf_l_act));
8458863SEdward.Pilatowicz@Sun.COM ASSERT(VREQ_DONE(BP_VREQ(vdp->xdf_f_act)));
8468863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_f_act != vdp->xdf_i_act);
8478863SEdward.Pilatowicz@Sun.COM
8488863SEdward.Pilatowicz@Sun.COM if (bp == vdp->xdf_f_act) {
8498863SEdward.Pilatowicz@Sun.COM /* This IO was at the head of our active queue. */
8508863SEdward.Pilatowicz@Sun.COM vdp->xdf_f_act = bp->av_forw;
8518863SEdward.Pilatowicz@Sun.COM if (bp == vdp->xdf_l_act)
8528863SEdward.Pilatowicz@Sun.COM vdp->xdf_l_act = NULL;
8538863SEdward.Pilatowicz@Sun.COM } else {
8548863SEdward.Pilatowicz@Sun.COM /* There IO finished before some other pending IOs. */
8558863SEdward.Pilatowicz@Sun.COM bp_iter = vdp->xdf_f_act;
8568863SEdward.Pilatowicz@Sun.COM while (bp != bp_iter->av_forw) {
8578863SEdward.Pilatowicz@Sun.COM bp_iter = bp_iter->av_forw;
8588863SEdward.Pilatowicz@Sun.COM ASSERT(VREQ_DONE(BP_VREQ(bp_iter)));
8598863SEdward.Pilatowicz@Sun.COM ASSERT(bp_iter != vdp->xdf_i_act);
8608863SEdward.Pilatowicz@Sun.COM }
8618863SEdward.Pilatowicz@Sun.COM bp_iter->av_forw = bp->av_forw;
8628863SEdward.Pilatowicz@Sun.COM if (bp == vdp->xdf_l_act)
8638863SEdward.Pilatowicz@Sun.COM vdp->xdf_l_act = bp_iter;
8648863SEdward.Pilatowicz@Sun.COM }
8658863SEdward.Pilatowicz@Sun.COM bp->av_forw = NULL;
8668863SEdward.Pilatowicz@Sun.COM }
8678863SEdward.Pilatowicz@Sun.COM
8688863SEdward.Pilatowicz@Sun.COM static buf_t *
xdf_bp_next(xdf_t * vdp)8698863SEdward.Pilatowicz@Sun.COM xdf_bp_next(xdf_t *vdp)
8708863SEdward.Pilatowicz@Sun.COM {
8718863SEdward.Pilatowicz@Sun.COM v_req_t *vreq;
8728863SEdward.Pilatowicz@Sun.COM buf_t *bp;
8738863SEdward.Pilatowicz@Sun.COM
8748863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_CONNECTED) {
8758863SEdward.Pilatowicz@Sun.COM /*
8768863SEdward.Pilatowicz@Sun.COM * If we're in the XD_CONNECTED state, we only service IOs
8778863SEdward.Pilatowicz@Sun.COM * from the xdf_ready_tq_thread thread.
8788863SEdward.Pilatowicz@Sun.COM */
8798863SEdward.Pilatowicz@Sun.COM if ((bp = vdp->xdf_ready_tq_bp) == NULL)
8808863SEdward.Pilatowicz@Sun.COM return (NULL);
8818863SEdward.Pilatowicz@Sun.COM if (((vreq = BP_VREQ(bp)) == NULL) || (!VREQ_DONE(vreq)))
8828863SEdward.Pilatowicz@Sun.COM return (bp);
8838863SEdward.Pilatowicz@Sun.COM return (NULL);
8848863SEdward.Pilatowicz@Sun.COM }
8858863SEdward.Pilatowicz@Sun.COM
8868863SEdward.Pilatowicz@Sun.COM /* if we're not in the XD_CONNECTED or XD_READY state we can't do IO */
8878863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state != XD_READY)
8888863SEdward.Pilatowicz@Sun.COM return (NULL);
8898863SEdward.Pilatowicz@Sun.COM
8908863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_ready_tq_bp == NULL);
8918863SEdward.Pilatowicz@Sun.COM for (;;) {
8928863SEdward.Pilatowicz@Sun.COM if ((bp = vdp->xdf_i_act) == NULL)
8938863SEdward.Pilatowicz@Sun.COM return (NULL);
8948863SEdward.Pilatowicz@Sun.COM if (((vreq = BP_VREQ(bp)) == NULL) || (!VREQ_DONE(vreq)))
8958863SEdward.Pilatowicz@Sun.COM return (bp);
8968863SEdward.Pilatowicz@Sun.COM
8978863SEdward.Pilatowicz@Sun.COM /* advance the active buf index pointer */
8988863SEdward.Pilatowicz@Sun.COM vdp->xdf_i_act = bp->av_forw;
8998863SEdward.Pilatowicz@Sun.COM }
9008863SEdward.Pilatowicz@Sun.COM }
9018863SEdward.Pilatowicz@Sun.COM
9028863SEdward.Pilatowicz@Sun.COM static void
xdf_io_fini(xdf_t * vdp,uint64_t id,int bioerr)9038863SEdward.Pilatowicz@Sun.COM xdf_io_fini(xdf_t *vdp, uint64_t id, int bioerr)
9048863SEdward.Pilatowicz@Sun.COM {
9058863SEdward.Pilatowicz@Sun.COM ge_slot_t *gs = (ge_slot_t *)(uintptr_t)id;
9068863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = gs->gs_vreq;
9078863SEdward.Pilatowicz@Sun.COM buf_t *bp = vreq->v_buf;
9088863SEdward.Pilatowicz@Sun.COM
9098863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
9108863SEdward.Pilatowicz@Sun.COM ASSERT(BP_VREQ(bp) == vreq);
9118863SEdward.Pilatowicz@Sun.COM
9128863SEdward.Pilatowicz@Sun.COM gs_free(gs);
9138863SEdward.Pilatowicz@Sun.COM
9148863SEdward.Pilatowicz@Sun.COM if (bioerr != 0)
9158863SEdward.Pilatowicz@Sun.COM bioerror(bp, bioerr);
9168863SEdward.Pilatowicz@Sun.COM ASSERT(vreq->v_nslots > 0);
9178863SEdward.Pilatowicz@Sun.COM if (--vreq->v_nslots > 0)
9188863SEdward.Pilatowicz@Sun.COM return;
9198863SEdward.Pilatowicz@Sun.COM
9208863SEdward.Pilatowicz@Sun.COM /* remove this IO from our active queue */
9218863SEdward.Pilatowicz@Sun.COM xdf_bp_pop(vdp, bp);
9228863SEdward.Pilatowicz@Sun.COM
9238863SEdward.Pilatowicz@Sun.COM ASSERT(vreq->v_runq);
9248863SEdward.Pilatowicz@Sun.COM xdf_kstat_exit(vdp, bp);
9258863SEdward.Pilatowicz@Sun.COM vreq->v_runq = B_FALSE;
9268863SEdward.Pilatowicz@Sun.COM vreq_free(vdp, vreq);
9278863SEdward.Pilatowicz@Sun.COM
9288863SEdward.Pilatowicz@Sun.COM if (IS_ERROR(bp)) {
9298863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, geterror(bp), 0);
9308863SEdward.Pilatowicz@Sun.COM } else if (bp->b_resid != 0) {
9318863SEdward.Pilatowicz@Sun.COM /* Partial transfers are an error */
9328863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, EIO, bp->b_resid);
9338863SEdward.Pilatowicz@Sun.COM } else {
9348863SEdward.Pilatowicz@Sun.COM biodone(bp);
9358863SEdward.Pilatowicz@Sun.COM }
9368863SEdward.Pilatowicz@Sun.COM }
9378863SEdward.Pilatowicz@Sun.COM
9388863SEdward.Pilatowicz@Sun.COM /*
9398863SEdward.Pilatowicz@Sun.COM * xdf interrupt handler
9408863SEdward.Pilatowicz@Sun.COM */
9418863SEdward.Pilatowicz@Sun.COM static uint_t
xdf_intr_locked(xdf_t * vdp)9428863SEdward.Pilatowicz@Sun.COM xdf_intr_locked(xdf_t *vdp)
9438863SEdward.Pilatowicz@Sun.COM {
9448863SEdward.Pilatowicz@Sun.COM xendev_ring_t *xbr;
9458863SEdward.Pilatowicz@Sun.COM blkif_response_t *resp;
9468863SEdward.Pilatowicz@Sun.COM int bioerr;
9478863SEdward.Pilatowicz@Sun.COM uint64_t id;
9488863SEdward.Pilatowicz@Sun.COM uint8_t op;
9498863SEdward.Pilatowicz@Sun.COM uint16_t status;
9508863SEdward.Pilatowicz@Sun.COM ddi_acc_handle_t acchdl;
9518863SEdward.Pilatowicz@Sun.COM
9528863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
9538863SEdward.Pilatowicz@Sun.COM
9548863SEdward.Pilatowicz@Sun.COM if ((xbr = vdp->xdf_xb_ring) == NULL)
9558863SEdward.Pilatowicz@Sun.COM return (DDI_INTR_UNCLAIMED);
9568863SEdward.Pilatowicz@Sun.COM
9578863SEdward.Pilatowicz@Sun.COM acchdl = vdp->xdf_xb_ring_hdl;
9588863SEdward.Pilatowicz@Sun.COM
9598863SEdward.Pilatowicz@Sun.COM /*
9608863SEdward.Pilatowicz@Sun.COM * complete all requests which have a response
9618863SEdward.Pilatowicz@Sun.COM */
9628863SEdward.Pilatowicz@Sun.COM while (resp = xvdi_ring_get_response(xbr)) {
9638863SEdward.Pilatowicz@Sun.COM id = ddi_get64(acchdl, &resp->id);
9648863SEdward.Pilatowicz@Sun.COM op = ddi_get8(acchdl, &resp->operation);
9658863SEdward.Pilatowicz@Sun.COM status = ddi_get16(acchdl, (uint16_t *)&resp->status);
9668863SEdward.Pilatowicz@Sun.COM DPRINTF(INTR_DBG, ("resp: op %d id %"PRIu64" status %d\n",
9678863SEdward.Pilatowicz@Sun.COM op, id, status));
9688863SEdward.Pilatowicz@Sun.COM
9698863SEdward.Pilatowicz@Sun.COM if (status != BLKIF_RSP_OKAY) {
9708863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: I/O error while %s",
9718863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr,
9728863SEdward.Pilatowicz@Sun.COM (op == BLKIF_OP_READ) ? "reading" : "writing"));
9738863SEdward.Pilatowicz@Sun.COM bioerr = EIO;
9748863SEdward.Pilatowicz@Sun.COM } else {
9758863SEdward.Pilatowicz@Sun.COM bioerr = 0;
9768863SEdward.Pilatowicz@Sun.COM }
9778863SEdward.Pilatowicz@Sun.COM
9788863SEdward.Pilatowicz@Sun.COM xdf_io_fini(vdp, id, bioerr);
9798863SEdward.Pilatowicz@Sun.COM }
9808863SEdward.Pilatowicz@Sun.COM return (DDI_INTR_CLAIMED);
9818863SEdward.Pilatowicz@Sun.COM }
9828863SEdward.Pilatowicz@Sun.COM
9839471SEdward.Pilatowicz@Sun.COM /*
9849471SEdward.Pilatowicz@Sun.COM * xdf_intr runs at PIL 5, so no one else can grab xdf_dev_lk and
9859471SEdward.Pilatowicz@Sun.COM * block at a lower pil.
9869471SEdward.Pilatowicz@Sun.COM */
9878863SEdward.Pilatowicz@Sun.COM static uint_t
xdf_intr(caddr_t arg)9888863SEdward.Pilatowicz@Sun.COM xdf_intr(caddr_t arg)
9898863SEdward.Pilatowicz@Sun.COM {
9908863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)arg;
9918863SEdward.Pilatowicz@Sun.COM int rv;
9928863SEdward.Pilatowicz@Sun.COM
9938863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
9948863SEdward.Pilatowicz@Sun.COM rv = xdf_intr_locked(vdp);
9958863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
9968863SEdward.Pilatowicz@Sun.COM
9978863SEdward.Pilatowicz@Sun.COM if (!do_polled_io)
9988863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
9998863SEdward.Pilatowicz@Sun.COM
10008863SEdward.Pilatowicz@Sun.COM return (rv);
10018863SEdward.Pilatowicz@Sun.COM }
10028863SEdward.Pilatowicz@Sun.COM
10038863SEdward.Pilatowicz@Sun.COM static void
xdf_ring_push(xdf_t * vdp)10048863SEdward.Pilatowicz@Sun.COM xdf_ring_push(xdf_t *vdp)
10058863SEdward.Pilatowicz@Sun.COM {
10068863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10078863SEdward.Pilatowicz@Sun.COM
10088863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xb_ring == NULL)
10098863SEdward.Pilatowicz@Sun.COM return;
10108863SEdward.Pilatowicz@Sun.COM
10118863SEdward.Pilatowicz@Sun.COM if (xvdi_ring_push_request(vdp->xdf_xb_ring)) {
10128863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, (
10138863SEdward.Pilatowicz@Sun.COM "xdf@%s: xdf_ring_push: sent request(s) to backend\n",
10148863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr));
10158863SEdward.Pilatowicz@Sun.COM }
10168863SEdward.Pilatowicz@Sun.COM
10178863SEdward.Pilatowicz@Sun.COM if (xvdi_get_evtchn(vdp->xdf_dip) != INVALID_EVTCHN)
10188863SEdward.Pilatowicz@Sun.COM xvdi_notify_oe(vdp->xdf_dip);
10198863SEdward.Pilatowicz@Sun.COM }
10208863SEdward.Pilatowicz@Sun.COM
10218863SEdward.Pilatowicz@Sun.COM static int
xdf_ring_drain_locked(xdf_t * vdp)10228863SEdward.Pilatowicz@Sun.COM xdf_ring_drain_locked(xdf_t *vdp)
10238863SEdward.Pilatowicz@Sun.COM {
10248863SEdward.Pilatowicz@Sun.COM int pollc, rv = 0;
10258863SEdward.Pilatowicz@Sun.COM
10268863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10278863SEdward.Pilatowicz@Sun.COM
10288863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
10298863SEdward.Pilatowicz@Sun.COM xen_printf("xdf_ring_drain: start\n");
10308863SEdward.Pilatowicz@Sun.COM
10318863SEdward.Pilatowicz@Sun.COM for (pollc = 0; pollc < XDF_DRAIN_RETRY_COUNT; pollc++) {
10328863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xb_ring == NULL)
10338863SEdward.Pilatowicz@Sun.COM goto out;
10348863SEdward.Pilatowicz@Sun.COM
10358863SEdward.Pilatowicz@Sun.COM if (xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
10368863SEdward.Pilatowicz@Sun.COM (void) xdf_intr_locked(vdp);
10378863SEdward.Pilatowicz@Sun.COM if (!xvdi_ring_has_incomp_request(vdp->xdf_xb_ring))
10388863SEdward.Pilatowicz@Sun.COM goto out;
10398863SEdward.Pilatowicz@Sun.COM xdf_ring_push(vdp);
10408863SEdward.Pilatowicz@Sun.COM
10418863SEdward.Pilatowicz@Sun.COM /* file-backed devices can be slow */
10428863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
10438863SEdward.Pilatowicz@Sun.COM #ifdef XPV_HVM_DRIVER
10448863SEdward.Pilatowicz@Sun.COM (void) HYPERVISOR_yield();
10458863SEdward.Pilatowicz@Sun.COM #endif /* XPV_HVM_DRIVER */
10468863SEdward.Pilatowicz@Sun.COM delay(drv_usectohz(XDF_DRAIN_MSEC_DELAY));
10478863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
10488863SEdward.Pilatowicz@Sun.COM }
10498863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: xdf_ring_drain: timeout", vdp->xdf_addr);
10508863SEdward.Pilatowicz@Sun.COM
10518863SEdward.Pilatowicz@Sun.COM out:
10528863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xb_ring != NULL) {
10538863SEdward.Pilatowicz@Sun.COM if (xvdi_ring_has_incomp_request(vdp->xdf_xb_ring) ||
10548863SEdward.Pilatowicz@Sun.COM xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
10558863SEdward.Pilatowicz@Sun.COM rv = EIO;
10568863SEdward.Pilatowicz@Sun.COM }
10578863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
10588863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_ring_drain: end, err=%d\n",
10598863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, rv);
10608863SEdward.Pilatowicz@Sun.COM return (rv);
10618863SEdward.Pilatowicz@Sun.COM }
10628863SEdward.Pilatowicz@Sun.COM
10638863SEdward.Pilatowicz@Sun.COM static int
xdf_ring_drain(xdf_t * vdp)10648863SEdward.Pilatowicz@Sun.COM xdf_ring_drain(xdf_t *vdp)
10658863SEdward.Pilatowicz@Sun.COM {
10668863SEdward.Pilatowicz@Sun.COM int rv;
10678863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
10688863SEdward.Pilatowicz@Sun.COM rv = xdf_ring_drain_locked(vdp);
10698863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
10708863SEdward.Pilatowicz@Sun.COM return (rv);
10718863SEdward.Pilatowicz@Sun.COM }
10728863SEdward.Pilatowicz@Sun.COM
10738863SEdward.Pilatowicz@Sun.COM /*
10748863SEdward.Pilatowicz@Sun.COM * Destroy all v_req_t, grant table entries, and our ring buffer.
10758863SEdward.Pilatowicz@Sun.COM */
10768863SEdward.Pilatowicz@Sun.COM static void
xdf_ring_destroy(xdf_t * vdp)10778863SEdward.Pilatowicz@Sun.COM xdf_ring_destroy(xdf_t *vdp)
10788863SEdward.Pilatowicz@Sun.COM {
10798863SEdward.Pilatowicz@Sun.COM v_req_t *vreq;
10808863SEdward.Pilatowicz@Sun.COM buf_t *bp;
10818863SEdward.Pilatowicz@Sun.COM ge_slot_t *gs;
10828863SEdward.Pilatowicz@Sun.COM
10838863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
10848863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10858863SEdward.Pilatowicz@Sun.COM
10868863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_state != XD_INIT) &&
10878863SEdward.Pilatowicz@Sun.COM (vdp->xdf_state != XD_CONNECTED) &&
10888863SEdward.Pilatowicz@Sun.COM (vdp->xdf_state != XD_READY)) {
10898863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_xb_ring == NULL);
10908863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_xb_ring_hdl == NULL);
10918863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_peer == INVALID_DOMID);
10928863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_evtchn == INVALID_EVTCHN);
10938863SEdward.Pilatowicz@Sun.COM ASSERT(list_is_empty(&vdp->xdf_vreq_act));
10948863SEdward.Pilatowicz@Sun.COM return;
10958863SEdward.Pilatowicz@Sun.COM }
10968863SEdward.Pilatowicz@Sun.COM
10978863SEdward.Pilatowicz@Sun.COM /*
10988863SEdward.Pilatowicz@Sun.COM * We don't want to recieve async notifications from the backend
10998863SEdward.Pilatowicz@Sun.COM * when it finishes processing ring entries.
11008863SEdward.Pilatowicz@Sun.COM */
11018863SEdward.Pilatowicz@Sun.COM #ifdef XPV_HVM_DRIVER
11028863SEdward.Pilatowicz@Sun.COM ec_unbind_evtchn(vdp->xdf_evtchn);
11038863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
11048863SEdward.Pilatowicz@Sun.COM (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
11058863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
11068863SEdward.Pilatowicz@Sun.COM
11078863SEdward.Pilatowicz@Sun.COM /*
11088863SEdward.Pilatowicz@Sun.COM * Drain any requests in the ring. We need to do this before we
11098863SEdward.Pilatowicz@Sun.COM * can free grant table entries, because if active ring entries
11108863SEdward.Pilatowicz@Sun.COM * point to grants, then the backend could be trying to access
11118863SEdward.Pilatowicz@Sun.COM * those grants.
11128863SEdward.Pilatowicz@Sun.COM */
11138863SEdward.Pilatowicz@Sun.COM (void) xdf_ring_drain_locked(vdp);
11148863SEdward.Pilatowicz@Sun.COM
11158863SEdward.Pilatowicz@Sun.COM /* We're done talking to the backend so free up our event channel */
11168863SEdward.Pilatowicz@Sun.COM xvdi_free_evtchn(vdp->xdf_dip);
11178863SEdward.Pilatowicz@Sun.COM vdp->xdf_evtchn = INVALID_EVTCHN;
11188863SEdward.Pilatowicz@Sun.COM
11198863SEdward.Pilatowicz@Sun.COM while ((vreq = list_head(&vdp->xdf_vreq_act)) != NULL) {
11208863SEdward.Pilatowicz@Sun.COM bp = vreq->v_buf;
11218863SEdward.Pilatowicz@Sun.COM ASSERT(BP_VREQ(bp) == vreq);
11228863SEdward.Pilatowicz@Sun.COM
11238863SEdward.Pilatowicz@Sun.COM /* Free up any grant table entries associaed with this IO */
11248863SEdward.Pilatowicz@Sun.COM while ((gs = list_head(&vreq->v_gs)) != NULL)
11258863SEdward.Pilatowicz@Sun.COM gs_free(gs);
11268863SEdward.Pilatowicz@Sun.COM
11278863SEdward.Pilatowicz@Sun.COM /* If this IO was on the runq, move it back to the waitq. */
11288863SEdward.Pilatowicz@Sun.COM if (vreq->v_runq)
11298863SEdward.Pilatowicz@Sun.COM xdf_kstat_runq_to_waitq(vdp, bp);
11308863SEdward.Pilatowicz@Sun.COM
11318863SEdward.Pilatowicz@Sun.COM /*
11328863SEdward.Pilatowicz@Sun.COM * Reset any buf IO state since we're going to re-issue the
11338863SEdward.Pilatowicz@Sun.COM * IO when we reconnect.
11348863SEdward.Pilatowicz@Sun.COM */
11358863SEdward.Pilatowicz@Sun.COM vreq_free(vdp, vreq);
11368863SEdward.Pilatowicz@Sun.COM BP_VREQ_SET(bp, NULL);
11378863SEdward.Pilatowicz@Sun.COM bioerror(bp, 0);
11388863SEdward.Pilatowicz@Sun.COM }
11398863SEdward.Pilatowicz@Sun.COM
11408863SEdward.Pilatowicz@Sun.COM /* reset the active queue index pointer */
11418863SEdward.Pilatowicz@Sun.COM vdp->xdf_i_act = vdp->xdf_f_act;
11428863SEdward.Pilatowicz@Sun.COM
11438863SEdward.Pilatowicz@Sun.COM /* Destroy the ring */
11448863SEdward.Pilatowicz@Sun.COM xvdi_free_ring(vdp->xdf_xb_ring);
11458863SEdward.Pilatowicz@Sun.COM vdp->xdf_xb_ring = NULL;
11468863SEdward.Pilatowicz@Sun.COM vdp->xdf_xb_ring_hdl = NULL;
11478863SEdward.Pilatowicz@Sun.COM vdp->xdf_peer = INVALID_DOMID;
11488863SEdward.Pilatowicz@Sun.COM }
11498863SEdward.Pilatowicz@Sun.COM
11508863SEdward.Pilatowicz@Sun.COM void
xdfmin(struct buf * bp)11518863SEdward.Pilatowicz@Sun.COM xdfmin(struct buf *bp)
11528863SEdward.Pilatowicz@Sun.COM {
11538863SEdward.Pilatowicz@Sun.COM if (bp->b_bcount > xdf_maxphys)
11548863SEdward.Pilatowicz@Sun.COM bp->b_bcount = xdf_maxphys;
11558863SEdward.Pilatowicz@Sun.COM }
11568863SEdward.Pilatowicz@Sun.COM
11578863SEdward.Pilatowicz@Sun.COM /*
11588863SEdward.Pilatowicz@Sun.COM * Check if we have a pending "eject" media request.
11598863SEdward.Pilatowicz@Sun.COM */
11608863SEdward.Pilatowicz@Sun.COM static int
xdf_eject_pending(xdf_t * vdp)11618863SEdward.Pilatowicz@Sun.COM xdf_eject_pending(xdf_t *vdp)
11628863SEdward.Pilatowicz@Sun.COM {
11638863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
11648863SEdward.Pilatowicz@Sun.COM char *xsname, *str;
11658863SEdward.Pilatowicz@Sun.COM
11668863SEdward.Pilatowicz@Sun.COM if (!vdp->xdf_media_req_supported)
11678863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
11688863SEdward.Pilatowicz@Sun.COM
11698863SEdward.Pilatowicz@Sun.COM if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
11708863SEdward.Pilatowicz@Sun.COM (xenbus_read_str(xsname, XBP_MEDIA_REQ, &str) != 0))
11718863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
11728863SEdward.Pilatowicz@Sun.COM
11738863SEdward.Pilatowicz@Sun.COM if (strcmp(str, XBV_MEDIA_REQ_EJECT) != 0) {
11748863SEdward.Pilatowicz@Sun.COM strfree(str);
11758863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
11768863SEdward.Pilatowicz@Sun.COM }
11778863SEdward.Pilatowicz@Sun.COM strfree(str);
11788863SEdward.Pilatowicz@Sun.COM return (B_TRUE);
11798863SEdward.Pilatowicz@Sun.COM }
11808863SEdward.Pilatowicz@Sun.COM
11818863SEdward.Pilatowicz@Sun.COM /*
11828863SEdward.Pilatowicz@Sun.COM * Generate a media request.
11838863SEdward.Pilatowicz@Sun.COM */
11848863SEdward.Pilatowicz@Sun.COM static int
xdf_media_req(xdf_t * vdp,char * req,boolean_t media_required)11858863SEdward.Pilatowicz@Sun.COM xdf_media_req(xdf_t *vdp, char *req, boolean_t media_required)
11868863SEdward.Pilatowicz@Sun.COM {
11878863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
11888863SEdward.Pilatowicz@Sun.COM char *xsname;
11898863SEdward.Pilatowicz@Sun.COM
11909471SEdward.Pilatowicz@Sun.COM /*
11919471SEdward.Pilatowicz@Sun.COM * we can't be holding xdf_dev_lk because xenbus_printf() can
11929471SEdward.Pilatowicz@Sun.COM * block while waiting for a PIL 1 interrupt message. this
11939471SEdward.Pilatowicz@Sun.COM * would cause a deadlock with xdf_intr() which needs to grab
11949471SEdward.Pilatowicz@Sun.COM * xdf_dev_lk as well and runs at PIL 5.
11959471SEdward.Pilatowicz@Sun.COM */
11968863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
11979471SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
11988863SEdward.Pilatowicz@Sun.COM
11998863SEdward.Pilatowicz@Sun.COM if ((xsname = xvdi_get_xsname(dip)) == NULL)
12008863SEdward.Pilatowicz@Sun.COM return (ENXIO);
12018863SEdward.Pilatowicz@Sun.COM
12028863SEdward.Pilatowicz@Sun.COM /* Check if we support media requests */
12038863SEdward.Pilatowicz@Sun.COM if (!XD_IS_CD(vdp) || !vdp->xdf_media_req_supported)
12048863SEdward.Pilatowicz@Sun.COM return (ENOTTY);
12058863SEdward.Pilatowicz@Sun.COM
12068863SEdward.Pilatowicz@Sun.COM /* If an eject is pending then don't allow any new requests */
12078863SEdward.Pilatowicz@Sun.COM if (xdf_eject_pending(vdp))
12088863SEdward.Pilatowicz@Sun.COM return (ENXIO);
12098863SEdward.Pilatowicz@Sun.COM
12108863SEdward.Pilatowicz@Sun.COM /* Make sure that there is media present */
12118863SEdward.Pilatowicz@Sun.COM if (media_required && (vdp->xdf_xdev_nblocks == 0))
12128863SEdward.Pilatowicz@Sun.COM return (ENXIO);
12138863SEdward.Pilatowicz@Sun.COM
12148863SEdward.Pilatowicz@Sun.COM /* We only allow operations when the device is ready and connected */
12158863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state != XD_READY)
12168863SEdward.Pilatowicz@Sun.COM return (EIO);
12178863SEdward.Pilatowicz@Sun.COM
12188863SEdward.Pilatowicz@Sun.COM if (xenbus_printf(XBT_NULL, xsname, XBP_MEDIA_REQ, "%s", req) != 0)
12198863SEdward.Pilatowicz@Sun.COM return (EIO);
12208863SEdward.Pilatowicz@Sun.COM
12218863SEdward.Pilatowicz@Sun.COM return (0);
12228863SEdward.Pilatowicz@Sun.COM }
12238863SEdward.Pilatowicz@Sun.COM
12248863SEdward.Pilatowicz@Sun.COM /*
12258863SEdward.Pilatowicz@Sun.COM * populate a single blkif_request_t w/ a buf
12268863SEdward.Pilatowicz@Sun.COM */
12278863SEdward.Pilatowicz@Sun.COM static void
xdf_process_rreq(xdf_t * vdp,struct buf * bp,blkif_request_t * rreq)12288863SEdward.Pilatowicz@Sun.COM xdf_process_rreq(xdf_t *vdp, struct buf *bp, blkif_request_t *rreq)
12298863SEdward.Pilatowicz@Sun.COM {
12308863SEdward.Pilatowicz@Sun.COM grant_ref_t gr;
12318863SEdward.Pilatowicz@Sun.COM uint8_t fsect, lsect;
12328863SEdward.Pilatowicz@Sun.COM size_t bcnt;
12338863SEdward.Pilatowicz@Sun.COM paddr_t dma_addr;
12348863SEdward.Pilatowicz@Sun.COM off_t blk_off;
12358863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
12368863SEdward.Pilatowicz@Sun.COM blkif_vdev_t vdev = xvdi_get_vdevnum(dip);
12378863SEdward.Pilatowicz@Sun.COM v_req_t *vreq = BP_VREQ(bp);
12388863SEdward.Pilatowicz@Sun.COM uint64_t blkno = vreq->v_blkno;
12398863SEdward.Pilatowicz@Sun.COM uint_t ndmacs = vreq->v_ndmacs;
12408863SEdward.Pilatowicz@Sun.COM ddi_acc_handle_t acchdl = vdp->xdf_xb_ring_hdl;
12418863SEdward.Pilatowicz@Sun.COM int seg = 0;
12428863SEdward.Pilatowicz@Sun.COM int isread = IS_READ(bp);
12438863SEdward.Pilatowicz@Sun.COM ge_slot_t *gs = list_head(&vreq->v_gs);
12448863SEdward.Pilatowicz@Sun.COM
12458863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
12468863SEdward.Pilatowicz@Sun.COM ASSERT(vreq->v_status == VREQ_GS_ALLOCED);
12478863SEdward.Pilatowicz@Sun.COM
12488863SEdward.Pilatowicz@Sun.COM if (isread)
12498863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->operation, BLKIF_OP_READ);
12508863SEdward.Pilatowicz@Sun.COM else {
12518863SEdward.Pilatowicz@Sun.COM switch (vreq->v_flush_diskcache) {
12528863SEdward.Pilatowicz@Sun.COM case FLUSH_DISKCACHE:
12538863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->operation,
12548863SEdward.Pilatowicz@Sun.COM BLKIF_OP_FLUSH_DISKCACHE);
12558863SEdward.Pilatowicz@Sun.COM ddi_put16(acchdl, &rreq->handle, vdev);
12568863SEdward.Pilatowicz@Sun.COM ddi_put64(acchdl, &rreq->id,
12578863SEdward.Pilatowicz@Sun.COM (uint64_t)(uintptr_t)(gs));
12588863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->nr_segments, 0);
12598863SEdward.Pilatowicz@Sun.COM vreq->v_status = VREQ_DMAWIN_DONE;
12608863SEdward.Pilatowicz@Sun.COM return;
12618863SEdward.Pilatowicz@Sun.COM case WRITE_BARRIER:
12628863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->operation,
12638863SEdward.Pilatowicz@Sun.COM BLKIF_OP_WRITE_BARRIER);
12648863SEdward.Pilatowicz@Sun.COM break;
12658863SEdward.Pilatowicz@Sun.COM default:
12668863SEdward.Pilatowicz@Sun.COM if (!vdp->xdf_wce)
12678863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->operation,
12688863SEdward.Pilatowicz@Sun.COM BLKIF_OP_WRITE_BARRIER);
12698863SEdward.Pilatowicz@Sun.COM else
12708863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->operation,
12718863SEdward.Pilatowicz@Sun.COM BLKIF_OP_WRITE);
12728863SEdward.Pilatowicz@Sun.COM break;
12738863SEdward.Pilatowicz@Sun.COM }
12748863SEdward.Pilatowicz@Sun.COM }
12758863SEdward.Pilatowicz@Sun.COM
12768863SEdward.Pilatowicz@Sun.COM ddi_put16(acchdl, &rreq->handle, vdev);
12778863SEdward.Pilatowicz@Sun.COM ddi_put64(acchdl, &rreq->sector_number, blkno);
12788863SEdward.Pilatowicz@Sun.COM ddi_put64(acchdl, &rreq->id, (uint64_t)(uintptr_t)(gs));
12798863SEdward.Pilatowicz@Sun.COM
12808863SEdward.Pilatowicz@Sun.COM /*
12818863SEdward.Pilatowicz@Sun.COM * loop until all segments are populated or no more dma cookie in buf
12828863SEdward.Pilatowicz@Sun.COM */
12838863SEdward.Pilatowicz@Sun.COM for (;;) {
12848863SEdward.Pilatowicz@Sun.COM /*
12858863SEdward.Pilatowicz@Sun.COM * Each segment of a blkif request can transfer up to
12868863SEdward.Pilatowicz@Sun.COM * one 4K page of data.
12878863SEdward.Pilatowicz@Sun.COM */
12888863SEdward.Pilatowicz@Sun.COM bcnt = vreq->v_dmac.dmac_size;
12898863SEdward.Pilatowicz@Sun.COM dma_addr = vreq->v_dmac.dmac_laddress;
12908863SEdward.Pilatowicz@Sun.COM blk_off = (uint_t)((paddr_t)XB_SEGOFFSET & dma_addr);
12918863SEdward.Pilatowicz@Sun.COM fsect = blk_off >> XB_BSHIFT;
12928863SEdward.Pilatowicz@Sun.COM lsect = fsect + (bcnt >> XB_BSHIFT) - 1;
12938863SEdward.Pilatowicz@Sun.COM
12948863SEdward.Pilatowicz@Sun.COM ASSERT(bcnt <= PAGESIZE);
12958863SEdward.Pilatowicz@Sun.COM ASSERT((bcnt % XB_BSIZE) == 0);
12968863SEdward.Pilatowicz@Sun.COM ASSERT((blk_off & XB_BMASK) == 0);
12978863SEdward.Pilatowicz@Sun.COM ASSERT(fsect < XB_MAX_SEGLEN / XB_BSIZE &&
12988863SEdward.Pilatowicz@Sun.COM lsect < XB_MAX_SEGLEN / XB_BSIZE);
12998863SEdward.Pilatowicz@Sun.COM
13008863SEdward.Pilatowicz@Sun.COM gr = gs_grant(gs, PATOMA(dma_addr) >> PAGESHIFT);
13018863SEdward.Pilatowicz@Sun.COM ddi_put32(acchdl, &rreq->seg[seg].gref, gr);
13028863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->seg[seg].first_sect, fsect);
13038863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->seg[seg].last_sect, lsect);
13048863SEdward.Pilatowicz@Sun.COM
13058863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, (
13068863SEdward.Pilatowicz@Sun.COM "xdf@%s: seg%d: dmacS %lu blk_off %ld\n",
13078863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, seg, vreq->v_dmac.dmac_size, blk_off));
13088863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, (
13098863SEdward.Pilatowicz@Sun.COM "xdf@%s: seg%d: fs %d ls %d gr %d dma 0x%"PRIx64"\n",
13108863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, seg, fsect, lsect, gr, dma_addr));
13118863SEdward.Pilatowicz@Sun.COM
13128863SEdward.Pilatowicz@Sun.COM blkno += (bcnt >> XB_BSHIFT);
13138863SEdward.Pilatowicz@Sun.COM seg++;
13148863SEdward.Pilatowicz@Sun.COM ASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
13158863SEdward.Pilatowicz@Sun.COM if (--ndmacs) {
13168863SEdward.Pilatowicz@Sun.COM ddi_dma_nextcookie(vreq->v_dmahdl, &vreq->v_dmac);
13178863SEdward.Pilatowicz@Sun.COM continue;
13188863SEdward.Pilatowicz@Sun.COM }
13198863SEdward.Pilatowicz@Sun.COM
13208863SEdward.Pilatowicz@Sun.COM vreq->v_status = VREQ_DMAWIN_DONE;
13218863SEdward.Pilatowicz@Sun.COM vreq->v_blkno = blkno;
13228863SEdward.Pilatowicz@Sun.COM break;
13238863SEdward.Pilatowicz@Sun.COM }
13248863SEdward.Pilatowicz@Sun.COM ddi_put8(acchdl, &rreq->nr_segments, seg);
13258863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, (
13268863SEdward.Pilatowicz@Sun.COM "xdf@%s: xdf_process_rreq: request id=%"PRIx64" ready\n",
13278863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, rreq->id));
13288863SEdward.Pilatowicz@Sun.COM }
13298863SEdward.Pilatowicz@Sun.COM
13308863SEdward.Pilatowicz@Sun.COM static void
xdf_io_start(xdf_t * vdp)13318863SEdward.Pilatowicz@Sun.COM xdf_io_start(xdf_t *vdp)
13328863SEdward.Pilatowicz@Sun.COM {
13338863SEdward.Pilatowicz@Sun.COM struct buf *bp;
13348863SEdward.Pilatowicz@Sun.COM v_req_t *vreq;
13358863SEdward.Pilatowicz@Sun.COM blkif_request_t *rreq;
13368863SEdward.Pilatowicz@Sun.COM boolean_t rreqready = B_FALSE;
13378863SEdward.Pilatowicz@Sun.COM
13388863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
13398863SEdward.Pilatowicz@Sun.COM
13408863SEdward.Pilatowicz@Sun.COM /*
13418863SEdward.Pilatowicz@Sun.COM * Populate the ring request(s). Loop until there is no buf to
13428863SEdward.Pilatowicz@Sun.COM * transfer or no free slot available in I/O ring.
13438863SEdward.Pilatowicz@Sun.COM */
13448863SEdward.Pilatowicz@Sun.COM for (;;) {
13458863SEdward.Pilatowicz@Sun.COM /* don't start any new IO if we're suspending */
13468863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_suspending)
13478863SEdward.Pilatowicz@Sun.COM break;
13488863SEdward.Pilatowicz@Sun.COM if ((bp = xdf_bp_next(vdp)) == NULL)
13498863SEdward.Pilatowicz@Sun.COM break;
13508863SEdward.Pilatowicz@Sun.COM
13518863SEdward.Pilatowicz@Sun.COM /* if the buf doesn't already have a vreq, allocate one */
13528863SEdward.Pilatowicz@Sun.COM if (((vreq = BP_VREQ(bp)) == NULL) &&
13538863SEdward.Pilatowicz@Sun.COM ((vreq = vreq_get(vdp, bp)) == NULL))
13548863SEdward.Pilatowicz@Sun.COM break;
13558863SEdward.Pilatowicz@Sun.COM
13568863SEdward.Pilatowicz@Sun.COM /* alloc DMA/GTE resources */
13578863SEdward.Pilatowicz@Sun.COM if (vreq_setup(vdp, vreq) != DDI_SUCCESS)
13588863SEdward.Pilatowicz@Sun.COM break;
13598863SEdward.Pilatowicz@Sun.COM
13608863SEdward.Pilatowicz@Sun.COM /* get next blkif_request in the ring */
13618863SEdward.Pilatowicz@Sun.COM if ((rreq = xvdi_ring_get_request(vdp->xdf_xb_ring)) == NULL)
13628863SEdward.Pilatowicz@Sun.COM break;
13638863SEdward.Pilatowicz@Sun.COM bzero(rreq, sizeof (blkif_request_t));
13648863SEdward.Pilatowicz@Sun.COM rreqready = B_TRUE;
13658863SEdward.Pilatowicz@Sun.COM
13668863SEdward.Pilatowicz@Sun.COM /* populate blkif_request with this buf */
13678863SEdward.Pilatowicz@Sun.COM xdf_process_rreq(vdp, bp, rreq);
13688863SEdward.Pilatowicz@Sun.COM
13698863SEdward.Pilatowicz@Sun.COM /*
13708863SEdward.Pilatowicz@Sun.COM * This buffer/vreq pair is has been allocated a ring buffer
13718863SEdward.Pilatowicz@Sun.COM * resources, so if it isn't already in our runq, add it.
13728863SEdward.Pilatowicz@Sun.COM */
13738863SEdward.Pilatowicz@Sun.COM if (!vreq->v_runq)
13748863SEdward.Pilatowicz@Sun.COM xdf_kstat_waitq_to_runq(vdp, bp);
13758863SEdward.Pilatowicz@Sun.COM }
13768863SEdward.Pilatowicz@Sun.COM
13778863SEdward.Pilatowicz@Sun.COM /* Send the request(s) to the backend */
13788863SEdward.Pilatowicz@Sun.COM if (rreqready)
13798863SEdward.Pilatowicz@Sun.COM xdf_ring_push(vdp);
13808863SEdward.Pilatowicz@Sun.COM
13818863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
13828863SEdward.Pilatowicz@Sun.COM }
13838863SEdward.Pilatowicz@Sun.COM
13848863SEdward.Pilatowicz@Sun.COM
13858863SEdward.Pilatowicz@Sun.COM /* check if partition is open, -1 - check all partitions on the disk */
13868863SEdward.Pilatowicz@Sun.COM static boolean_t
xdf_isopen(xdf_t * vdp,int partition)13878863SEdward.Pilatowicz@Sun.COM xdf_isopen(xdf_t *vdp, int partition)
13888863SEdward.Pilatowicz@Sun.COM {
13898863SEdward.Pilatowicz@Sun.COM int i;
13908863SEdward.Pilatowicz@Sun.COM ulong_t parbit;
13918863SEdward.Pilatowicz@Sun.COM boolean_t rval = B_FALSE;
13928863SEdward.Pilatowicz@Sun.COM
13938863SEdward.Pilatowicz@Sun.COM ASSERT((partition == -1) ||
13948863SEdward.Pilatowicz@Sun.COM ((partition >= 0) || (partition < XDF_PEXT)));
13958863SEdward.Pilatowicz@Sun.COM
13968863SEdward.Pilatowicz@Sun.COM if (partition == -1)
13978863SEdward.Pilatowicz@Sun.COM parbit = (ulong_t)-1;
13988863SEdward.Pilatowicz@Sun.COM else
13998863SEdward.Pilatowicz@Sun.COM parbit = 1 << partition;
14008863SEdward.Pilatowicz@Sun.COM
14018863SEdward.Pilatowicz@Sun.COM for (i = 0; i < OTYPCNT; i++) {
14028863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_vd_open[i] & parbit)
14038863SEdward.Pilatowicz@Sun.COM rval = B_TRUE;
14048863SEdward.Pilatowicz@Sun.COM }
14058863SEdward.Pilatowicz@Sun.COM
14068863SEdward.Pilatowicz@Sun.COM return (rval);
14078863SEdward.Pilatowicz@Sun.COM }
14088863SEdward.Pilatowicz@Sun.COM
14098863SEdward.Pilatowicz@Sun.COM /*
14108863SEdward.Pilatowicz@Sun.COM * The connection should never be closed as long as someone is holding
14118863SEdward.Pilatowicz@Sun.COM * us open, there is pending IO, or someone is waiting waiting for a
14128863SEdward.Pilatowicz@Sun.COM * connection.
14138863SEdward.Pilatowicz@Sun.COM */
14148863SEdward.Pilatowicz@Sun.COM static boolean_t
xdf_busy(xdf_t * vdp)14158863SEdward.Pilatowicz@Sun.COM xdf_busy(xdf_t *vdp)
14168863SEdward.Pilatowicz@Sun.COM {
14178863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
14188863SEdward.Pilatowicz@Sun.COM
14198863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_xb_ring != NULL) &&
14208863SEdward.Pilatowicz@Sun.COM xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring)) {
14218863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state != XD_CLOSED);
14228863SEdward.Pilatowicz@Sun.COM return (B_TRUE);
14238863SEdward.Pilatowicz@Sun.COM }
14248863SEdward.Pilatowicz@Sun.COM
14258863SEdward.Pilatowicz@Sun.COM if (!list_is_empty(&vdp->xdf_vreq_act) || (vdp->xdf_f_act != NULL)) {
14268863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state != XD_CLOSED);
14278863SEdward.Pilatowicz@Sun.COM return (B_TRUE);
14288863SEdward.Pilatowicz@Sun.COM }
14298863SEdward.Pilatowicz@Sun.COM
14308863SEdward.Pilatowicz@Sun.COM if (xdf_isopen(vdp, -1)) {
14318863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state != XD_CLOSED);
14328863SEdward.Pilatowicz@Sun.COM return (B_TRUE);
14338863SEdward.Pilatowicz@Sun.COM }
14348863SEdward.Pilatowicz@Sun.COM
14358863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_connect_req > 0) {
14368863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state != XD_CLOSED);
14378863SEdward.Pilatowicz@Sun.COM return (B_TRUE);
14388863SEdward.Pilatowicz@Sun.COM }
14398863SEdward.Pilatowicz@Sun.COM
14408863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
14418863SEdward.Pilatowicz@Sun.COM }
14428863SEdward.Pilatowicz@Sun.COM
14438863SEdward.Pilatowicz@Sun.COM static void
xdf_set_state(xdf_t * vdp,xdf_state_t new_state)14448863SEdward.Pilatowicz@Sun.COM xdf_set_state(xdf_t *vdp, xdf_state_t new_state)
14458863SEdward.Pilatowicz@Sun.COM {
14469471SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
14478863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
14488863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: state change %d -> %d\n",
14498863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, vdp->xdf_state, new_state));
14508863SEdward.Pilatowicz@Sun.COM vdp->xdf_state = new_state;
14518863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_dev_cv);
14528863SEdward.Pilatowicz@Sun.COM }
14538863SEdward.Pilatowicz@Sun.COM
14548863SEdward.Pilatowicz@Sun.COM static void
xdf_disconnect(xdf_t * vdp,xdf_state_t new_state,boolean_t quiet)14558863SEdward.Pilatowicz@Sun.COM xdf_disconnect(xdf_t *vdp, xdf_state_t new_state, boolean_t quiet)
14568863SEdward.Pilatowicz@Sun.COM {
14578863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
14588863SEdward.Pilatowicz@Sun.COM boolean_t busy;
14598863SEdward.Pilatowicz@Sun.COM
14608863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
14618863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
14628863SEdward.Pilatowicz@Sun.COM ASSERT((new_state == XD_UNKNOWN) || (new_state == XD_CLOSED));
14638863SEdward.Pilatowicz@Sun.COM
14648863SEdward.Pilatowicz@Sun.COM /* Check if we're already there. */
14658863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == new_state)
14668863SEdward.Pilatowicz@Sun.COM return;
14678863SEdward.Pilatowicz@Sun.COM
14688863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
14698863SEdward.Pilatowicz@Sun.COM busy = xdf_busy(vdp);
14708863SEdward.Pilatowicz@Sun.COM
14718863SEdward.Pilatowicz@Sun.COM /* If we're already closed then there's nothing todo. */
14728863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_CLOSED) {
14738863SEdward.Pilatowicz@Sun.COM ASSERT(!busy);
14748863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, new_state);
14758863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
14768863SEdward.Pilatowicz@Sun.COM return;
14778863SEdward.Pilatowicz@Sun.COM }
14788863SEdward.Pilatowicz@Sun.COM
14798863SEdward.Pilatowicz@Sun.COM #ifdef DEBUG
14808863SEdward.Pilatowicz@Sun.COM /* UhOh. Warn the user that something bad has happened. */
14818863SEdward.Pilatowicz@Sun.COM if (!quiet && busy && (vdp->xdf_state == XD_READY) &&
14828863SEdward.Pilatowicz@Sun.COM (vdp->xdf_xdev_nblocks != 0)) {
14838863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: disconnected while in use",
14848863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
14858863SEdward.Pilatowicz@Sun.COM }
14868863SEdward.Pilatowicz@Sun.COM #endif /* DEBUG */
14878863SEdward.Pilatowicz@Sun.COM
14888863SEdward.Pilatowicz@Sun.COM xdf_ring_destroy(vdp);
14898863SEdward.Pilatowicz@Sun.COM
14908863SEdward.Pilatowicz@Sun.COM /* If we're busy then we can only go into the unknown state */
14918863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, (busy) ? XD_UNKNOWN : new_state);
14928863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
14938863SEdward.Pilatowicz@Sun.COM
14948863SEdward.Pilatowicz@Sun.COM /* if we're closed now, let the other end know */
14958863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_CLOSED)
14968863SEdward.Pilatowicz@Sun.COM (void) xvdi_switch_state(dip, XBT_NULL, XenbusStateClosed);
14978863SEdward.Pilatowicz@Sun.COM }
14988863SEdward.Pilatowicz@Sun.COM
14998863SEdward.Pilatowicz@Sun.COM
15008863SEdward.Pilatowicz@Sun.COM /*
15018863SEdward.Pilatowicz@Sun.COM * Kick-off connect process
15028863SEdward.Pilatowicz@Sun.COM * Status should be XD_UNKNOWN or XD_CLOSED
15038863SEdward.Pilatowicz@Sun.COM * On success, status will be changed to XD_INIT
15048863SEdward.Pilatowicz@Sun.COM * On error, it will be changed to XD_UNKNOWN
15058863SEdward.Pilatowicz@Sun.COM */
15068863SEdward.Pilatowicz@Sun.COM static int
xdf_setstate_init(xdf_t * vdp)15078863SEdward.Pilatowicz@Sun.COM xdf_setstate_init(xdf_t *vdp)
15088863SEdward.Pilatowicz@Sun.COM {
15098863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
15108863SEdward.Pilatowicz@Sun.COM xenbus_transaction_t xbt;
15118863SEdward.Pilatowicz@Sun.COM grant_ref_t gref;
15128863SEdward.Pilatowicz@Sun.COM char *xsname, *str;
15138863SEdward.Pilatowicz@Sun.COM int rv;
15148863SEdward.Pilatowicz@Sun.COM
15158863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
15168863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
15178863SEdward.Pilatowicz@Sun.COM ASSERT((vdp->xdf_state == XD_UNKNOWN) ||
15188863SEdward.Pilatowicz@Sun.COM (vdp->xdf_state == XD_CLOSED));
15198863SEdward.Pilatowicz@Sun.COM
15208863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG,
15218863SEdward.Pilatowicz@Sun.COM ("xdf@%s: starting connection process\n", vdp->xdf_addr));
15228863SEdward.Pilatowicz@Sun.COM
15238863SEdward.Pilatowicz@Sun.COM /*
15249471SEdward.Pilatowicz@Sun.COM * If an eject is pending then don't allow a new connection.
15259471SEdward.Pilatowicz@Sun.COM * (Only the backend can clear media request eject request.)
15268863SEdward.Pilatowicz@Sun.COM */
15279471SEdward.Pilatowicz@Sun.COM if (xdf_eject_pending(vdp))
15288863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
15298863SEdward.Pilatowicz@Sun.COM
15308863SEdward.Pilatowicz@Sun.COM if ((xsname = xvdi_get_xsname(dip)) == NULL)
15318863SEdward.Pilatowicz@Sun.COM goto errout;
15328863SEdward.Pilatowicz@Sun.COM
15338863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_peer = xvdi_get_oeid(dip)) == INVALID_DOMID)
15348863SEdward.Pilatowicz@Sun.COM goto errout;
15358863SEdward.Pilatowicz@Sun.COM
15368863SEdward.Pilatowicz@Sun.COM (void) xvdi_switch_state(dip, XBT_NULL, XenbusStateInitialising);
15378863SEdward.Pilatowicz@Sun.COM
15388863SEdward.Pilatowicz@Sun.COM /*
15398863SEdward.Pilatowicz@Sun.COM * Sanity check for the existance of the xenbus device-type property.
15408863SEdward.Pilatowicz@Sun.COM * This property might not exist if we our xenbus device nodes was
15418863SEdward.Pilatowicz@Sun.COM * force destroyed while we were still connected to the backend.
15428863SEdward.Pilatowicz@Sun.COM */
15438863SEdward.Pilatowicz@Sun.COM if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0)
15448863SEdward.Pilatowicz@Sun.COM goto errout;
15458863SEdward.Pilatowicz@Sun.COM strfree(str);
15468863SEdward.Pilatowicz@Sun.COM
15478863SEdward.Pilatowicz@Sun.COM if (xvdi_alloc_evtchn(dip) != DDI_SUCCESS)
15488863SEdward.Pilatowicz@Sun.COM goto errout;
15498863SEdward.Pilatowicz@Sun.COM
15508863SEdward.Pilatowicz@Sun.COM vdp->xdf_evtchn = xvdi_get_evtchn(dip);
15518863SEdward.Pilatowicz@Sun.COM #ifdef XPV_HVM_DRIVER
15528863SEdward.Pilatowicz@Sun.COM ec_bind_evtchn_to_handler(vdp->xdf_evtchn, IPL_VBD, xdf_intr, vdp);
15538863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
15548863SEdward.Pilatowicz@Sun.COM if (ddi_add_intr(dip, 0, NULL, NULL, xdf_intr, (caddr_t)vdp) !=
15558863SEdward.Pilatowicz@Sun.COM DDI_SUCCESS) {
15568863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: xdf_setstate_init: "
15578863SEdward.Pilatowicz@Sun.COM "failed to add intr handler", vdp->xdf_addr);
15588863SEdward.Pilatowicz@Sun.COM goto errout1;
15598863SEdward.Pilatowicz@Sun.COM }
15608863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
15618863SEdward.Pilatowicz@Sun.COM
15628863SEdward.Pilatowicz@Sun.COM if (xvdi_alloc_ring(dip, BLKIF_RING_SIZE,
15638863SEdward.Pilatowicz@Sun.COM sizeof (union blkif_sring_entry), &gref, &vdp->xdf_xb_ring) !=
15648863SEdward.Pilatowicz@Sun.COM DDI_SUCCESS) {
15658863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: failed to alloc comm ring",
15668863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
15678863SEdward.Pilatowicz@Sun.COM goto errout2;
15688863SEdward.Pilatowicz@Sun.COM }
15698863SEdward.Pilatowicz@Sun.COM vdp->xdf_xb_ring_hdl = vdp->xdf_xb_ring->xr_acc_hdl; /* ugly!! */
15708863SEdward.Pilatowicz@Sun.COM
15718863SEdward.Pilatowicz@Sun.COM /*
15728863SEdward.Pilatowicz@Sun.COM * Write into xenstore the info needed by backend
15738863SEdward.Pilatowicz@Sun.COM */
15748863SEdward.Pilatowicz@Sun.COM trans_retry:
15758863SEdward.Pilatowicz@Sun.COM if (xenbus_transaction_start(&xbt)) {
15768863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: failed to start transaction",
15778863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
15788863SEdward.Pilatowicz@Sun.COM xvdi_fatal_error(dip, EIO, "connect transaction init");
15798863SEdward.Pilatowicz@Sun.COM goto fail_trans;
15808863SEdward.Pilatowicz@Sun.COM }
15818863SEdward.Pilatowicz@Sun.COM
15828863SEdward.Pilatowicz@Sun.COM /*
15838863SEdward.Pilatowicz@Sun.COM * XBP_PROTOCOL is written by the domain builder in the case of PV
15848863SEdward.Pilatowicz@Sun.COM * domains. However, it is not written for HVM domains, so let's
15858863SEdward.Pilatowicz@Sun.COM * write it here.
15868863SEdward.Pilatowicz@Sun.COM */
15878863SEdward.Pilatowicz@Sun.COM if (((rv = xenbus_printf(xbt, xsname,
15888863SEdward.Pilatowicz@Sun.COM XBP_MEDIA_REQ, "%s", XBV_MEDIA_REQ_NONE)) != 0) ||
15898863SEdward.Pilatowicz@Sun.COM ((rv = xenbus_printf(xbt, xsname,
15908863SEdward.Pilatowicz@Sun.COM XBP_RING_REF, "%u", gref)) != 0) ||
15918863SEdward.Pilatowicz@Sun.COM ((rv = xenbus_printf(xbt, xsname,
15928863SEdward.Pilatowicz@Sun.COM XBP_EVENT_CHAN, "%u", vdp->xdf_evtchn)) != 0) ||
15938863SEdward.Pilatowicz@Sun.COM ((rv = xenbus_printf(xbt, xsname,
15948863SEdward.Pilatowicz@Sun.COM XBP_PROTOCOL, "%s", XEN_IO_PROTO_ABI_NATIVE)) != 0) ||
15958863SEdward.Pilatowicz@Sun.COM ((rv = xvdi_switch_state(dip, xbt, XenbusStateInitialised)) > 0)) {
15968863SEdward.Pilatowicz@Sun.COM (void) xenbus_transaction_end(xbt, 1);
15978863SEdward.Pilatowicz@Sun.COM xvdi_fatal_error(dip, rv, "connect transaction setup");
15988863SEdward.Pilatowicz@Sun.COM goto fail_trans;
15998863SEdward.Pilatowicz@Sun.COM }
16008863SEdward.Pilatowicz@Sun.COM
16018863SEdward.Pilatowicz@Sun.COM /* kick-off connect process */
16028863SEdward.Pilatowicz@Sun.COM if (rv = xenbus_transaction_end(xbt, 0)) {
16038863SEdward.Pilatowicz@Sun.COM if (rv == EAGAIN)
16048863SEdward.Pilatowicz@Sun.COM goto trans_retry;
16058863SEdward.Pilatowicz@Sun.COM xvdi_fatal_error(dip, rv, "connect transaction commit");
16068863SEdward.Pilatowicz@Sun.COM goto fail_trans;
16078863SEdward.Pilatowicz@Sun.COM }
16088863SEdward.Pilatowicz@Sun.COM
16098863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
16108863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
16118863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, XD_INIT);
16128863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
16138863SEdward.Pilatowicz@Sun.COM
16148863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
16158863SEdward.Pilatowicz@Sun.COM
16168863SEdward.Pilatowicz@Sun.COM fail_trans:
16178863SEdward.Pilatowicz@Sun.COM xvdi_free_ring(vdp->xdf_xb_ring);
16188863SEdward.Pilatowicz@Sun.COM errout2:
16198863SEdward.Pilatowicz@Sun.COM #ifdef XPV_HVM_DRIVER
16208863SEdward.Pilatowicz@Sun.COM ec_unbind_evtchn(vdp->xdf_evtchn);
16218863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
16228863SEdward.Pilatowicz@Sun.COM (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
16238863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
16248863SEdward.Pilatowicz@Sun.COM errout1:
16258863SEdward.Pilatowicz@Sun.COM xvdi_free_evtchn(dip);
16268863SEdward.Pilatowicz@Sun.COM vdp->xdf_evtchn = INVALID_EVTCHN;
16278863SEdward.Pilatowicz@Sun.COM errout:
16288863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
16298863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: failed to start connection to backend",
16308863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
16318863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
16328863SEdward.Pilatowicz@Sun.COM }
16338863SEdward.Pilatowicz@Sun.COM
16346318Sedp int
xdf_get_flush_block(xdf_t * vdp)16358863SEdward.Pilatowicz@Sun.COM xdf_get_flush_block(xdf_t *vdp)
16368863SEdward.Pilatowicz@Sun.COM {
16378863SEdward.Pilatowicz@Sun.COM /*
16388863SEdward.Pilatowicz@Sun.COM * Get a DEV_BSIZE aligned bufer
16398863SEdward.Pilatowicz@Sun.COM */
16409889SLarry.Liu@Sun.COM vdp->xdf_flush_mem = kmem_alloc(vdp->xdf_xdev_secsize * 2, KM_SLEEP);
16418863SEdward.Pilatowicz@Sun.COM vdp->xdf_cache_flush_block =
16429889SLarry.Liu@Sun.COM (char *)P2ROUNDUP((uintptr_t)(vdp->xdf_flush_mem),
16439889SLarry.Liu@Sun.COM (int)vdp->xdf_xdev_secsize);
16449889SLarry.Liu@Sun.COM
16458863SEdward.Pilatowicz@Sun.COM if (xdf_lb_rdwr(vdp->xdf_dip, TG_READ, vdp->xdf_cache_flush_block,
16469889SLarry.Liu@Sun.COM xdf_flush_block, vdp->xdf_xdev_secsize, NULL) != 0)
16478863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
16488863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
16498863SEdward.Pilatowicz@Sun.COM }
16508863SEdward.Pilatowicz@Sun.COM
16518863SEdward.Pilatowicz@Sun.COM static void
xdf_setstate_ready(void * arg)16528863SEdward.Pilatowicz@Sun.COM xdf_setstate_ready(void *arg)
16536318Sedp {
16548863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)arg;
16558863SEdward.Pilatowicz@Sun.COM
16568863SEdward.Pilatowicz@Sun.COM vdp->xdf_ready_tq_thread = curthread;
16578863SEdward.Pilatowicz@Sun.COM
16588863SEdward.Pilatowicz@Sun.COM /*
16598863SEdward.Pilatowicz@Sun.COM * We've created all the minor nodes via cmlb_attach() using default
16608863SEdward.Pilatowicz@Sun.COM * value in xdf_attach() to make it possible to block in xdf_open(),
16618863SEdward.Pilatowicz@Sun.COM * in case there's anyone (say, booting thread) ever trying to open
16628863SEdward.Pilatowicz@Sun.COM * it before connected to backend. We will refresh all those minor
16638863SEdward.Pilatowicz@Sun.COM * nodes w/ latest info we've got now when we are almost connected.
16648863SEdward.Pilatowicz@Sun.COM */
16658863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
16668863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_cmbl_reattach) {
16678863SEdward.Pilatowicz@Sun.COM vdp->xdf_cmbl_reattach = B_FALSE;
16688863SEdward.Pilatowicz@Sun.COM
16698863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
16708863SEdward.Pilatowicz@Sun.COM if (xdf_cmlb_attach(vdp) != 0) {
16718863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
16728863SEdward.Pilatowicz@Sun.COM return;
16738863SEdward.Pilatowicz@Sun.COM }
16748863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
16758863SEdward.Pilatowicz@Sun.COM }
16768863SEdward.Pilatowicz@Sun.COM
16778863SEdward.Pilatowicz@Sun.COM /* If we're not still trying to get to the ready state, then bail. */
16788863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state != XD_CONNECTED) {
16798863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
16808863SEdward.Pilatowicz@Sun.COM return;
16818863SEdward.Pilatowicz@Sun.COM }
16828863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
16838863SEdward.Pilatowicz@Sun.COM
16848863SEdward.Pilatowicz@Sun.COM /*
16858863SEdward.Pilatowicz@Sun.COM * If backend has feature-barrier, see if it supports disk
16868863SEdward.Pilatowicz@Sun.COM * cache flush op.
16878863SEdward.Pilatowicz@Sun.COM */
16888863SEdward.Pilatowicz@Sun.COM vdp->xdf_flush_supported = B_FALSE;
16898863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_feature_barrier) {
16908863SEdward.Pilatowicz@Sun.COM /*
16918863SEdward.Pilatowicz@Sun.COM * Pretend we already know flush is supported so probe
16928863SEdward.Pilatowicz@Sun.COM * will attempt the correct op.
16938863SEdward.Pilatowicz@Sun.COM */
16948863SEdward.Pilatowicz@Sun.COM vdp->xdf_flush_supported = B_TRUE;
16958863SEdward.Pilatowicz@Sun.COM if (xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE, NULL, 0, 0, 0) == 0) {
16968863SEdward.Pilatowicz@Sun.COM vdp->xdf_flush_supported = B_TRUE;
16978863SEdward.Pilatowicz@Sun.COM } else {
16988863SEdward.Pilatowicz@Sun.COM vdp->xdf_flush_supported = B_FALSE;
16998863SEdward.Pilatowicz@Sun.COM /*
17008863SEdward.Pilatowicz@Sun.COM * If the other end does not support the cache flush op
17018863SEdward.Pilatowicz@Sun.COM * then we must use a barrier-write to force disk
17028863SEdward.Pilatowicz@Sun.COM * cache flushing. Barrier writes require that a data
17038863SEdward.Pilatowicz@Sun.COM * block actually be written.
17048863SEdward.Pilatowicz@Sun.COM * Cache a block to barrier-write when we are
17058863SEdward.Pilatowicz@Sun.COM * asked to perform a flush.
17068863SEdward.Pilatowicz@Sun.COM * XXX - would it be better to just copy 1 block
17078863SEdward.Pilatowicz@Sun.COM * (512 bytes) from whatever write we did last
17088863SEdward.Pilatowicz@Sun.COM * and rewrite that block?
17098863SEdward.Pilatowicz@Sun.COM */
17108863SEdward.Pilatowicz@Sun.COM if (xdf_get_flush_block(vdp) != DDI_SUCCESS) {
17118863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
17128863SEdward.Pilatowicz@Sun.COM return;
17138863SEdward.Pilatowicz@Sun.COM }
17148863SEdward.Pilatowicz@Sun.COM }
17158863SEdward.Pilatowicz@Sun.COM }
17168863SEdward.Pilatowicz@Sun.COM
17178863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
17186318Sedp mutex_enter(&vdp->xdf_dev_lk);
17198863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_CONNECTED)
17208863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, XD_READY);
17218863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
17228863SEdward.Pilatowicz@Sun.COM
17238863SEdward.Pilatowicz@Sun.COM /* Restart any currently queued up io */
17248863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
17258863SEdward.Pilatowicz@Sun.COM
17268863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
17278863SEdward.Pilatowicz@Sun.COM }
17288863SEdward.Pilatowicz@Sun.COM
17298863SEdward.Pilatowicz@Sun.COM /*
17308863SEdward.Pilatowicz@Sun.COM * synthetic geometry
17318863SEdward.Pilatowicz@Sun.COM */
17328863SEdward.Pilatowicz@Sun.COM #define XDF_NSECTS 256
17338863SEdward.Pilatowicz@Sun.COM #define XDF_NHEADS 16
17348863SEdward.Pilatowicz@Sun.COM
17358863SEdward.Pilatowicz@Sun.COM static void
xdf_synthetic_pgeom(dev_info_t * dip,cmlb_geom_t * geomp)17368863SEdward.Pilatowicz@Sun.COM xdf_synthetic_pgeom(dev_info_t *dip, cmlb_geom_t *geomp)
17378863SEdward.Pilatowicz@Sun.COM {
17388863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
17398863SEdward.Pilatowicz@Sun.COM uint_t ncyl;
17408863SEdward.Pilatowicz@Sun.COM
17418863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
17428863SEdward.Pilatowicz@Sun.COM
17438863SEdward.Pilatowicz@Sun.COM ncyl = vdp->xdf_xdev_nblocks / (XDF_NHEADS * XDF_NSECTS);
17448863SEdward.Pilatowicz@Sun.COM
17458863SEdward.Pilatowicz@Sun.COM bzero(geomp, sizeof (*geomp));
17468863SEdward.Pilatowicz@Sun.COM geomp->g_ncyl = ncyl == 0 ? 1 : ncyl;
17478863SEdward.Pilatowicz@Sun.COM geomp->g_acyl = 0;
17488863SEdward.Pilatowicz@Sun.COM geomp->g_nhead = XDF_NHEADS;
17498863SEdward.Pilatowicz@Sun.COM geomp->g_nsect = XDF_NSECTS;
17509889SLarry.Liu@Sun.COM geomp->g_secsize = vdp->xdf_xdev_secsize;
17518863SEdward.Pilatowicz@Sun.COM geomp->g_capacity = vdp->xdf_xdev_nblocks;
17528863SEdward.Pilatowicz@Sun.COM geomp->g_intrlv = 0;
17538863SEdward.Pilatowicz@Sun.COM geomp->g_rpm = 7200;
17548863SEdward.Pilatowicz@Sun.COM }
17558863SEdward.Pilatowicz@Sun.COM
17568863SEdward.Pilatowicz@Sun.COM /*
17578863SEdward.Pilatowicz@Sun.COM * Finish other initialization after we've connected to backend
17588863SEdward.Pilatowicz@Sun.COM * Status should be XD_INIT before calling this routine
17598863SEdward.Pilatowicz@Sun.COM * On success, status should be changed to XD_CONNECTED.
17608863SEdward.Pilatowicz@Sun.COM * On error, status should stay XD_INIT
17618863SEdward.Pilatowicz@Sun.COM */
17628863SEdward.Pilatowicz@Sun.COM static int
xdf_setstate_connected(xdf_t * vdp)17638863SEdward.Pilatowicz@Sun.COM xdf_setstate_connected(xdf_t *vdp)
17648863SEdward.Pilatowicz@Sun.COM {
17658863SEdward.Pilatowicz@Sun.COM dev_info_t *dip = vdp->xdf_dip;
17668863SEdward.Pilatowicz@Sun.COM cmlb_geom_t pgeom;
17678863SEdward.Pilatowicz@Sun.COM diskaddr_t nblocks = 0;
17689889SLarry.Liu@Sun.COM uint_t secsize = 0;
17698863SEdward.Pilatowicz@Sun.COM char *oename, *xsname, *str;
17708863SEdward.Pilatowicz@Sun.COM uint_t dinfo;
17718863SEdward.Pilatowicz@Sun.COM
17728863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
17738863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
17748863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state == XD_INIT);
17758863SEdward.Pilatowicz@Sun.COM
17768863SEdward.Pilatowicz@Sun.COM if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
17778863SEdward.Pilatowicz@Sun.COM ((oename = xvdi_get_oename(dip)) == NULL))
17788863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
17798863SEdward.Pilatowicz@Sun.COM
17809471SEdward.Pilatowicz@Sun.COM /* Make sure the other end is XenbusStateConnected */
17819471SEdward.Pilatowicz@Sun.COM if (xenbus_read_driver_state(oename) != XenbusStateConnected)
17829471SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
17839471SEdward.Pilatowicz@Sun.COM
17848863SEdward.Pilatowicz@Sun.COM /* Determine if feature barrier is supported by backend */
17858863SEdward.Pilatowicz@Sun.COM if (!(vdp->xdf_feature_barrier = xenbus_exists(oename, XBP_FB)))
17869893SMark.Johnson@Sun.COM cmn_err(CE_NOTE, "!xdf@%s: feature-barrier not supported",
17878863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
17888863SEdward.Pilatowicz@Sun.COM
17898863SEdward.Pilatowicz@Sun.COM /*
17908863SEdward.Pilatowicz@Sun.COM * Probe backend. Read the device size into xdf_xdev_nblocks
17918863SEdward.Pilatowicz@Sun.COM * and set the VDISK_READONLY, VDISK_CDROM, and VDISK_REMOVABLE
17928863SEdward.Pilatowicz@Sun.COM * flags in xdf_dinfo. If the emulated device type is "cdrom",
17938863SEdward.Pilatowicz@Sun.COM * we always set VDISK_CDROM, regardless of if it's present in
17948863SEdward.Pilatowicz@Sun.COM * the xenbus info parameter.
17958863SEdward.Pilatowicz@Sun.COM */
17968863SEdward.Pilatowicz@Sun.COM if (xenbus_gather(XBT_NULL, oename,
17978863SEdward.Pilatowicz@Sun.COM XBP_SECTORS, "%"SCNu64, &nblocks,
17989889SLarry.Liu@Sun.COM XBP_SECTOR_SIZE, "%u", &secsize,
17998863SEdward.Pilatowicz@Sun.COM XBP_INFO, "%u", &dinfo,
18008863SEdward.Pilatowicz@Sun.COM NULL) != 0) {
18018863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: xdf_setstate_connected: "
18028863SEdward.Pilatowicz@Sun.COM "cannot read backend info", vdp->xdf_addr);
18038863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
18048863SEdward.Pilatowicz@Sun.COM }
18058863SEdward.Pilatowicz@Sun.COM if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0) {
18068863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: cannot read device-type",
18078863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
18088863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
18098863SEdward.Pilatowicz@Sun.COM }
18108863SEdward.Pilatowicz@Sun.COM if (strcmp(str, XBV_DEV_TYPE_CD) == 0)
18118863SEdward.Pilatowicz@Sun.COM dinfo |= VDISK_CDROM;
18128863SEdward.Pilatowicz@Sun.COM strfree(str);
18138863SEdward.Pilatowicz@Sun.COM
18149889SLarry.Liu@Sun.COM if (secsize == 0 || !(ISP2(secsize / DEV_BSIZE)))
18159889SLarry.Liu@Sun.COM secsize = DEV_BSIZE;
18168863SEdward.Pilatowicz@Sun.COM vdp->xdf_xdev_nblocks = nblocks;
18179889SLarry.Liu@Sun.COM vdp->xdf_xdev_secsize = secsize;
18188863SEdward.Pilatowicz@Sun.COM #ifdef _ILP32
18198863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_nblocks > DK_MAX_BLOCKS) {
18208863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: xdf_setstate_connected: "
18218863SEdward.Pilatowicz@Sun.COM "backend disk device too large with %llu blocks for"
18228863SEdward.Pilatowicz@Sun.COM " 32-bit kernel", vdp->xdf_addr, vdp->xdf_xdev_nblocks);
18238863SEdward.Pilatowicz@Sun.COM xvdi_fatal_error(dip, EFBIG, "reading backend info");
18248863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
18256318Sedp }
18268863SEdward.Pilatowicz@Sun.COM #endif
18278863SEdward.Pilatowicz@Sun.COM
18288863SEdward.Pilatowicz@Sun.COM /*
18298863SEdward.Pilatowicz@Sun.COM * If the physical geometry for a fixed disk has been explicity
18308863SEdward.Pilatowicz@Sun.COM * set then make sure that the specified physical geometry isn't
18318863SEdward.Pilatowicz@Sun.COM * larger than the device we connected to.
18328863SEdward.Pilatowicz@Sun.COM */
18338863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_pgeom_fixed &&
18348863SEdward.Pilatowicz@Sun.COM (vdp->xdf_pgeom.g_capacity > vdp->xdf_xdev_nblocks)) {
18358863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN,
18368863SEdward.Pilatowicz@Sun.COM "xdf@%s: connect failed, fixed geometry too large",
18378863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
18388863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
18398863SEdward.Pilatowicz@Sun.COM }
18408863SEdward.Pilatowicz@Sun.COM
18418863SEdward.Pilatowicz@Sun.COM vdp->xdf_media_req_supported = xenbus_exists(oename, XBP_MEDIA_REQ_SUP);
18428863SEdward.Pilatowicz@Sun.COM
18438863SEdward.Pilatowicz@Sun.COM /* mark vbd is ready for I/O */
18448863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
18458863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, XD_CONNECTED);
18468863SEdward.Pilatowicz@Sun.COM
18478863SEdward.Pilatowicz@Sun.COM /* check if the cmlb label should be updated */
18488863SEdward.Pilatowicz@Sun.COM xdf_synthetic_pgeom(dip, &pgeom);
18498863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_dinfo != dinfo) ||
18508863SEdward.Pilatowicz@Sun.COM (!vdp->xdf_pgeom_fixed &&
18518863SEdward.Pilatowicz@Sun.COM (memcmp(&vdp->xdf_pgeom, &pgeom, sizeof (pgeom)) != 0))) {
18528863SEdward.Pilatowicz@Sun.COM vdp->xdf_cmbl_reattach = B_TRUE;
18538863SEdward.Pilatowicz@Sun.COM
18548863SEdward.Pilatowicz@Sun.COM vdp->xdf_dinfo = dinfo;
18558863SEdward.Pilatowicz@Sun.COM if (!vdp->xdf_pgeom_fixed)
18568863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom = pgeom;
18578863SEdward.Pilatowicz@Sun.COM }
18588863SEdward.Pilatowicz@Sun.COM
18598863SEdward.Pilatowicz@Sun.COM if (XD_IS_CD(vdp) || XD_IS_RM(vdp)) {
18608863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_xdev_nblocks == 0) {
18618863SEdward.Pilatowicz@Sun.COM vdp->xdf_mstate = DKIO_EJECTED;
18628863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_mstate_cv);
18638863SEdward.Pilatowicz@Sun.COM } else {
18648863SEdward.Pilatowicz@Sun.COM vdp->xdf_mstate = DKIO_INSERTED;
18658863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_mstate_cv);
18668863SEdward.Pilatowicz@Sun.COM }
18678863SEdward.Pilatowicz@Sun.COM } else {
18688863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_mstate != DKIO_NONE) {
18698863SEdward.Pilatowicz@Sun.COM vdp->xdf_mstate = DKIO_NONE;
18708863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_mstate_cv);
18718863SEdward.Pilatowicz@Sun.COM }
18728863SEdward.Pilatowicz@Sun.COM }
18738863SEdward.Pilatowicz@Sun.COM
18748863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
18758863SEdward.Pilatowicz@Sun.COM
18768863SEdward.Pilatowicz@Sun.COM cmn_err(CE_CONT, "?xdf@%s: %"PRIu64" blocks", vdp->xdf_addr,
18778863SEdward.Pilatowicz@Sun.COM (uint64_t)vdp->xdf_xdev_nblocks);
18788863SEdward.Pilatowicz@Sun.COM
18798863SEdward.Pilatowicz@Sun.COM /* Restart any currently queued up io */
18808863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
18818863SEdward.Pilatowicz@Sun.COM
18828863SEdward.Pilatowicz@Sun.COM /*
18838863SEdward.Pilatowicz@Sun.COM * To get to the ready state we have to do IO to the backend device,
18848863SEdward.Pilatowicz@Sun.COM * but we can't initiate IO from the other end change callback thread
18858863SEdward.Pilatowicz@Sun.COM * (which is the current context we're executing in.) This is because
18868863SEdward.Pilatowicz@Sun.COM * if the other end disconnects while we're doing IO from the callback
18878863SEdward.Pilatowicz@Sun.COM * thread, then we can't recieve that disconnect event and we hang
18888863SEdward.Pilatowicz@Sun.COM * waiting for an IO that can never complete.
18898863SEdward.Pilatowicz@Sun.COM */
18908863SEdward.Pilatowicz@Sun.COM (void) ddi_taskq_dispatch(vdp->xdf_ready_tq, xdf_setstate_ready, vdp,
18918863SEdward.Pilatowicz@Sun.COM DDI_SLEEP);
18928863SEdward.Pilatowicz@Sun.COM
18938863SEdward.Pilatowicz@Sun.COM (void) xvdi_switch_state(dip, XBT_NULL, XenbusStateConnected);
18948863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
18958863SEdward.Pilatowicz@Sun.COM }
18968863SEdward.Pilatowicz@Sun.COM
18978863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
18988863SEdward.Pilatowicz@Sun.COM static void
xdf_oe_change(dev_info_t * dip,ddi_eventcookie_t id,void * arg,void * impl_data)18998863SEdward.Pilatowicz@Sun.COM xdf_oe_change(dev_info_t *dip, ddi_eventcookie_t id, void *arg, void *impl_data)
19008863SEdward.Pilatowicz@Sun.COM {
19018863SEdward.Pilatowicz@Sun.COM XenbusState new_state = *(XenbusState *)impl_data;
19028863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
19038863SEdward.Pilatowicz@Sun.COM
19048863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: otherend state change to %d!\n",
19058863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, new_state));
19068863SEdward.Pilatowicz@Sun.COM
19078863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
19088863SEdward.Pilatowicz@Sun.COM
19098863SEdward.Pilatowicz@Sun.COM /* We assume that this callback is single threaded */
19108863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_oe_change_thread == NULL);
19118863SEdward.Pilatowicz@Sun.COM DEBUG_EVAL(vdp->xdf_oe_change_thread = curthread);
19128863SEdward.Pilatowicz@Sun.COM
19138863SEdward.Pilatowicz@Sun.COM /* ignore any backend state changes if we're suspending/suspended */
19148863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_suspending || (vdp->xdf_state == XD_SUSPEND)) {
19158863SEdward.Pilatowicz@Sun.COM DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
19168863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
19178863SEdward.Pilatowicz@Sun.COM return;
19186318Sedp }
19196318Sedp
19208863SEdward.Pilatowicz@Sun.COM switch (new_state) {
19218863SEdward.Pilatowicz@Sun.COM case XenbusStateUnknown:
19228863SEdward.Pilatowicz@Sun.COM case XenbusStateInitialising:
19238863SEdward.Pilatowicz@Sun.COM case XenbusStateInitWait:
19248863SEdward.Pilatowicz@Sun.COM case XenbusStateInitialised:
19258863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_INIT)
19268863SEdward.Pilatowicz@Sun.COM break;
19278863SEdward.Pilatowicz@Sun.COM
19288863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19298863SEdward.Pilatowicz@Sun.COM if (xdf_setstate_init(vdp) != DDI_SUCCESS)
19308863SEdward.Pilatowicz@Sun.COM break;
19318863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state == XD_INIT);
19328863SEdward.Pilatowicz@Sun.COM break;
19338863SEdward.Pilatowicz@Sun.COM
19348863SEdward.Pilatowicz@Sun.COM case XenbusStateConnected:
19358863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_state == XD_CONNECTED) ||
19368863SEdward.Pilatowicz@Sun.COM (vdp->xdf_state == XD_READY))
19378863SEdward.Pilatowicz@Sun.COM break;
19388863SEdward.Pilatowicz@Sun.COM
19398863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state != XD_INIT) {
19408863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19418863SEdward.Pilatowicz@Sun.COM if (xdf_setstate_init(vdp) != DDI_SUCCESS)
19428863SEdward.Pilatowicz@Sun.COM break;
19438863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state == XD_INIT);
19448863SEdward.Pilatowicz@Sun.COM }
19458863SEdward.Pilatowicz@Sun.COM
19468863SEdward.Pilatowicz@Sun.COM if (xdf_setstate_connected(vdp) != DDI_SUCCESS) {
19478863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19488863SEdward.Pilatowicz@Sun.COM break;
19498863SEdward.Pilatowicz@Sun.COM }
19508863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state == XD_CONNECTED);
19518863SEdward.Pilatowicz@Sun.COM break;
19528863SEdward.Pilatowicz@Sun.COM
19538863SEdward.Pilatowicz@Sun.COM case XenbusStateClosing:
19548863SEdward.Pilatowicz@Sun.COM if (xdf_isopen(vdp, -1)) {
19558863SEdward.Pilatowicz@Sun.COM cmn_err(CE_NOTE,
19568863SEdward.Pilatowicz@Sun.COM "xdf@%s: hot-unplug failed, still in use",
19578863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr);
19588863SEdward.Pilatowicz@Sun.COM break;
19598863SEdward.Pilatowicz@Sun.COM }
19608863SEdward.Pilatowicz@Sun.COM /*FALLTHROUGH*/
19618863SEdward.Pilatowicz@Sun.COM case XenbusStateClosed:
19628863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
19638863SEdward.Pilatowicz@Sun.COM break;
19648863SEdward.Pilatowicz@Sun.COM }
19658863SEdward.Pilatowicz@Sun.COM
19668863SEdward.Pilatowicz@Sun.COM /* notify anybody waiting for oe state change */
19678863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_dev_cv);
19688863SEdward.Pilatowicz@Sun.COM DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
19698863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
19708863SEdward.Pilatowicz@Sun.COM }
19718863SEdward.Pilatowicz@Sun.COM
19728863SEdward.Pilatowicz@Sun.COM static int
xdf_connect_locked(xdf_t * vdp,boolean_t wait)19738863SEdward.Pilatowicz@Sun.COM xdf_connect_locked(xdf_t *vdp, boolean_t wait)
19748863SEdward.Pilatowicz@Sun.COM {
19759471SEdward.Pilatowicz@Sun.COM int rv, timeouts = 0, reset = 20;
19768863SEdward.Pilatowicz@Sun.COM
19778863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
19788863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
19798863SEdward.Pilatowicz@Sun.COM
19808863SEdward.Pilatowicz@Sun.COM /* we can't connect once we're in the closed state */
19818863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_CLOSED)
19828863SEdward.Pilatowicz@Sun.COM return (XD_CLOSED);
19838863SEdward.Pilatowicz@Sun.COM
19848863SEdward.Pilatowicz@Sun.COM vdp->xdf_connect_req++;
19858863SEdward.Pilatowicz@Sun.COM while (vdp->xdf_state != XD_READY) {
19868863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
19879471SEdward.Pilatowicz@Sun.COM
19889471SEdward.Pilatowicz@Sun.COM /* only one thread at a time can be the connection thread */
19899471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_connect_thread == NULL)
19909471SEdward.Pilatowicz@Sun.COM vdp->xdf_connect_thread = curthread;
19919471SEdward.Pilatowicz@Sun.COM
19929471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_connect_thread == curthread) {
19939471SEdward.Pilatowicz@Sun.COM if ((timeouts > 0) && ((timeouts % reset) == 0)) {
19949471SEdward.Pilatowicz@Sun.COM /*
19959471SEdward.Pilatowicz@Sun.COM * If we haven't establised a connection
19969471SEdward.Pilatowicz@Sun.COM * within the reset time, then disconnect
19979471SEdward.Pilatowicz@Sun.COM * so we can try again, and double the reset
19989471SEdward.Pilatowicz@Sun.COM * time. The reset time starts at 2 sec.
19999471SEdward.Pilatowicz@Sun.COM */
20009471SEdward.Pilatowicz@Sun.COM (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
20019471SEdward.Pilatowicz@Sun.COM reset *= 2;
20029471SEdward.Pilatowicz@Sun.COM }
20039471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_UNKNOWN)
20049471SEdward.Pilatowicz@Sun.COM (void) xdf_setstate_init(vdp);
20059471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state == XD_INIT)
20069471SEdward.Pilatowicz@Sun.COM (void) xdf_setstate_connected(vdp);
20079471SEdward.Pilatowicz@Sun.COM }
20089471SEdward.Pilatowicz@Sun.COM
20098863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
20108863SEdward.Pilatowicz@Sun.COM if (!wait || (vdp->xdf_state == XD_READY))
20118863SEdward.Pilatowicz@Sun.COM goto out;
20128863SEdward.Pilatowicz@Sun.COM
20138863SEdward.Pilatowicz@Sun.COM mutex_exit((&vdp->xdf_cb_lk));
20149471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_connect_thread != curthread) {
20159471SEdward.Pilatowicz@Sun.COM rv = cv_wait_sig(&vdp->xdf_dev_cv, &vdp->xdf_dev_lk);
20169471SEdward.Pilatowicz@Sun.COM } else {
20179471SEdward.Pilatowicz@Sun.COM /* delay for 0.1 sec */
2018*11066Srafael.vanoni@sun.com rv = cv_reltimedwait_sig(&vdp->xdf_dev_cv,
2019*11066Srafael.vanoni@sun.com &vdp->xdf_dev_lk, drv_usectohz(100*1000),
2020*11066Srafael.vanoni@sun.com TR_CLOCK_TICK);
20219471SEdward.Pilatowicz@Sun.COM if (rv == -1)
20229471SEdward.Pilatowicz@Sun.COM timeouts++;
20239471SEdward.Pilatowicz@Sun.COM }
20248863SEdward.Pilatowicz@Sun.COM mutex_exit((&vdp->xdf_dev_lk));
20258863SEdward.Pilatowicz@Sun.COM mutex_enter((&vdp->xdf_cb_lk));
20268863SEdward.Pilatowicz@Sun.COM mutex_enter((&vdp->xdf_dev_lk));
20278863SEdward.Pilatowicz@Sun.COM if (rv == 0)
20288863SEdward.Pilatowicz@Sun.COM goto out;
20298863SEdward.Pilatowicz@Sun.COM }
20308863SEdward.Pilatowicz@Sun.COM
20318863SEdward.Pilatowicz@Sun.COM out:
20328863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
20338863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
20348863SEdward.Pilatowicz@Sun.COM
20359471SEdward.Pilatowicz@Sun.COM if (vdp->xdf_connect_thread == curthread) {
20369471SEdward.Pilatowicz@Sun.COM /*
20379471SEdward.Pilatowicz@Sun.COM * wake up someone else so they can become the connection
20389471SEdward.Pilatowicz@Sun.COM * thread.
20399471SEdward.Pilatowicz@Sun.COM */
20409471SEdward.Pilatowicz@Sun.COM cv_signal(&vdp->xdf_dev_cv);
20419471SEdward.Pilatowicz@Sun.COM vdp->xdf_connect_thread = NULL;
20429471SEdward.Pilatowicz@Sun.COM }
20439471SEdward.Pilatowicz@Sun.COM
20448863SEdward.Pilatowicz@Sun.COM /* Try to lock the media */
20459471SEdward.Pilatowicz@Sun.COM mutex_exit((&vdp->xdf_dev_lk));
20468863SEdward.Pilatowicz@Sun.COM (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
20479471SEdward.Pilatowicz@Sun.COM mutex_enter((&vdp->xdf_dev_lk));
20488863SEdward.Pilatowicz@Sun.COM
20498863SEdward.Pilatowicz@Sun.COM vdp->xdf_connect_req--;
20508863SEdward.Pilatowicz@Sun.COM return (vdp->xdf_state);
20518863SEdward.Pilatowicz@Sun.COM }
20528863SEdward.Pilatowicz@Sun.COM
20538863SEdward.Pilatowicz@Sun.COM static uint_t
xdf_iorestart(caddr_t arg)20548863SEdward.Pilatowicz@Sun.COM xdf_iorestart(caddr_t arg)
20558863SEdward.Pilatowicz@Sun.COM {
20568863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)arg;
20578863SEdward.Pilatowicz@Sun.COM
20588863SEdward.Pilatowicz@Sun.COM ASSERT(vdp != NULL);
20598863SEdward.Pilatowicz@Sun.COM
20608863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
20618863SEdward.Pilatowicz@Sun.COM ASSERT(ISDMACBON(vdp));
20628863SEdward.Pilatowicz@Sun.COM SETDMACBOFF(vdp);
20636318Sedp mutex_exit(&vdp->xdf_dev_lk);
20648863SEdward.Pilatowicz@Sun.COM
20658863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
20668863SEdward.Pilatowicz@Sun.COM
20678863SEdward.Pilatowicz@Sun.COM return (DDI_INTR_CLAIMED);
20686318Sedp }
20696318Sedp
20706318Sedp #if defined(XPV_HVM_DRIVER)
20716318Sedp
20726318Sedp typedef struct xdf_hvm_entry {
20736318Sedp list_node_t xdf_he_list;
20746318Sedp char *xdf_he_path;
20756318Sedp dev_info_t *xdf_he_dip;
20766318Sedp } xdf_hvm_entry_t;
20776318Sedp
20786318Sedp static list_t xdf_hvm_list;
20796318Sedp static kmutex_t xdf_hvm_list_lock;
20806318Sedp
20816318Sedp static xdf_hvm_entry_t *
i_xdf_hvm_find(const char * path,dev_info_t * dip)20828863SEdward.Pilatowicz@Sun.COM i_xdf_hvm_find(const char *path, dev_info_t *dip)
20836318Sedp {
20846318Sedp xdf_hvm_entry_t *i;
20856318Sedp
20866318Sedp ASSERT((path != NULL) || (dip != NULL));
20876318Sedp ASSERT(MUTEX_HELD(&xdf_hvm_list_lock));
20886318Sedp
20896318Sedp i = list_head(&xdf_hvm_list);
20906318Sedp while (i != NULL) {
20916318Sedp if ((path != NULL) && strcmp(i->xdf_he_path, path) != 0) {
20926318Sedp i = list_next(&xdf_hvm_list, i);
20936318Sedp continue;
20946318Sedp }
20956318Sedp if ((dip != NULL) && (i->xdf_he_dip != dip)) {
20966318Sedp i = list_next(&xdf_hvm_list, i);
20976318Sedp continue;
20986318Sedp }
20996318Sedp break;
21006318Sedp }
21016318Sedp return (i);
21026318Sedp }
21036318Sedp
21046318Sedp dev_info_t *
xdf_hvm_hold(const char * path)21058863SEdward.Pilatowicz@Sun.COM xdf_hvm_hold(const char *path)
21066318Sedp {
21076318Sedp xdf_hvm_entry_t *i;
21086318Sedp dev_info_t *dip;
21096318Sedp
21106318Sedp mutex_enter(&xdf_hvm_list_lock);
21116318Sedp i = i_xdf_hvm_find(path, NULL);
21126318Sedp if (i == NULL) {
21136318Sedp mutex_exit(&xdf_hvm_list_lock);
21146318Sedp return (B_FALSE);
21156318Sedp }
21166318Sedp ndi_hold_devi(dip = i->xdf_he_dip);
21176318Sedp mutex_exit(&xdf_hvm_list_lock);
21186318Sedp return (dip);
21196318Sedp }
21206318Sedp
21216318Sedp static void
xdf_hvm_add(dev_info_t * dip)21226318Sedp xdf_hvm_add(dev_info_t *dip)
21236318Sedp {
21246318Sedp xdf_hvm_entry_t *i;
21256318Sedp char *path;
21266318Sedp
21276318Sedp /* figure out the path for the dip */
21286318Sedp path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
21296318Sedp (void) ddi_pathname(dip, path);
21306318Sedp
21316318Sedp i = kmem_alloc(sizeof (*i), KM_SLEEP);
21326318Sedp i->xdf_he_dip = dip;
21336318Sedp i->xdf_he_path = i_ddi_strdup(path, KM_SLEEP);
21346318Sedp
21356318Sedp mutex_enter(&xdf_hvm_list_lock);
21366318Sedp ASSERT(i_xdf_hvm_find(path, NULL) == NULL);
21376318Sedp ASSERT(i_xdf_hvm_find(NULL, dip) == NULL);
21386318Sedp list_insert_head(&xdf_hvm_list, i);
21396318Sedp mutex_exit(&xdf_hvm_list_lock);
21406318Sedp
21416318Sedp kmem_free(path, MAXPATHLEN);
21426318Sedp }
21436318Sedp
21446318Sedp static void
xdf_hvm_rm(dev_info_t * dip)21456318Sedp xdf_hvm_rm(dev_info_t *dip)
21466318Sedp {
21476318Sedp xdf_hvm_entry_t *i;
21486318Sedp
21496318Sedp mutex_enter(&xdf_hvm_list_lock);
21506318Sedp VERIFY((i = i_xdf_hvm_find(NULL, dip)) != NULL);
21516318Sedp list_remove(&xdf_hvm_list, i);
21526318Sedp mutex_exit(&xdf_hvm_list_lock);
21536318Sedp
21546318Sedp kmem_free(i->xdf_he_path, strlen(i->xdf_he_path) + 1);
21556318Sedp kmem_free(i, sizeof (*i));
21566318Sedp }
21576318Sedp
21586318Sedp static void
xdf_hvm_init(void)21596318Sedp xdf_hvm_init(void)
21606318Sedp {
21616318Sedp list_create(&xdf_hvm_list, sizeof (xdf_hvm_entry_t),
21626318Sedp offsetof(xdf_hvm_entry_t, xdf_he_list));
21636318Sedp mutex_init(&xdf_hvm_list_lock, NULL, MUTEX_DEFAULT, NULL);
21646318Sedp }
21656318Sedp
21666318Sedp static void
xdf_hvm_fini(void)21676318Sedp xdf_hvm_fini(void)
21686318Sedp {
21696318Sedp ASSERT(list_head(&xdf_hvm_list) == NULL);
21706318Sedp list_destroy(&xdf_hvm_list);
21716318Sedp mutex_destroy(&xdf_hvm_list_lock);
21726318Sedp }
21736318Sedp
21748863SEdward.Pilatowicz@Sun.COM boolean_t
xdf_hvm_connect(dev_info_t * dip)21756318Sedp xdf_hvm_connect(dev_info_t *dip)
21766318Sedp {
21776318Sedp xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
21788863SEdward.Pilatowicz@Sun.COM char *oename, *str;
21796318Sedp int rv;
21806318Sedp
21818863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
21828863SEdward.Pilatowicz@Sun.COM
21838863SEdward.Pilatowicz@Sun.COM /*
21848863SEdward.Pilatowicz@Sun.COM * Before try to establish a connection we need to wait for the
21858863SEdward.Pilatowicz@Sun.COM * backend hotplug scripts to have run. Once they are run the
21868863SEdward.Pilatowicz@Sun.COM * "<oename>/hotplug-status" property will be set to "connected".
21878863SEdward.Pilatowicz@Sun.COM */
21888863SEdward.Pilatowicz@Sun.COM for (;;) {
21898863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
21908863SEdward.Pilatowicz@Sun.COM
21918863SEdward.Pilatowicz@Sun.COM /*
21928863SEdward.Pilatowicz@Sun.COM * Get the xenbus path to the backend device. Note that
21938863SEdward.Pilatowicz@Sun.COM * we can't cache this path (and we look it up on each pass
21948863SEdward.Pilatowicz@Sun.COM * through this loop) because it could change during
21958863SEdward.Pilatowicz@Sun.COM * suspend, resume, and migration operations.
21968863SEdward.Pilatowicz@Sun.COM */
21978863SEdward.Pilatowicz@Sun.COM if ((oename = xvdi_get_oename(dip)) == NULL) {
21988863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
21998863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
22008863SEdward.Pilatowicz@Sun.COM }
22018863SEdward.Pilatowicz@Sun.COM
22028863SEdward.Pilatowicz@Sun.COM str = NULL;
22038863SEdward.Pilatowicz@Sun.COM if ((xenbus_read_str(oename, XBP_HP_STATUS, &str) == 0) &&
22048863SEdward.Pilatowicz@Sun.COM (strcmp(str, XBV_HP_STATUS_CONN) == 0))
22058863SEdward.Pilatowicz@Sun.COM break;
22068863SEdward.Pilatowicz@Sun.COM
22078863SEdward.Pilatowicz@Sun.COM if (str != NULL)
22088863SEdward.Pilatowicz@Sun.COM strfree(str);
22098863SEdward.Pilatowicz@Sun.COM
22108863SEdward.Pilatowicz@Sun.COM /* wait for an update to "<oename>/hotplug-status" */
22118863SEdward.Pilatowicz@Sun.COM if (cv_wait_sig(&vdp->xdf_hp_status_cv, &vdp->xdf_cb_lk) == 0) {
22128863SEdward.Pilatowicz@Sun.COM /* we got interrupted by a signal */
22138863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
22148863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
22158863SEdward.Pilatowicz@Sun.COM }
22168863SEdward.Pilatowicz@Sun.COM }
22178863SEdward.Pilatowicz@Sun.COM
22188863SEdward.Pilatowicz@Sun.COM /* Good news. The backend hotplug scripts have been run. */
22198863SEdward.Pilatowicz@Sun.COM ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
22208863SEdward.Pilatowicz@Sun.COM ASSERT(strcmp(str, XBV_HP_STATUS_CONN) == 0);
22218863SEdward.Pilatowicz@Sun.COM strfree(str);
22228863SEdward.Pilatowicz@Sun.COM
22238863SEdward.Pilatowicz@Sun.COM /*
22248863SEdward.Pilatowicz@Sun.COM * If we're emulating a cd device and if the backend doesn't support
22258863SEdward.Pilatowicz@Sun.COM * media request opreations, then we're not going to bother trying
22268863SEdward.Pilatowicz@Sun.COM * to establish a connection for a couple reasons. First off, media
22278863SEdward.Pilatowicz@Sun.COM * requests support is required to support operations like eject and
22288863SEdward.Pilatowicz@Sun.COM * media locking. Second, other backend platforms like Linux don't
22298863SEdward.Pilatowicz@Sun.COM * support hvm pv cdrom access. They don't even have a backend pv
22308863SEdward.Pilatowicz@Sun.COM * driver for cdrom device nodes, so we don't want to block forever
22318863SEdward.Pilatowicz@Sun.COM * waiting for a connection to a backend driver that doesn't exist.
22328863SEdward.Pilatowicz@Sun.COM */
22338863SEdward.Pilatowicz@Sun.COM if (XD_IS_CD(vdp) && !xenbus_exists(oename, XBP_MEDIA_REQ_SUP)) {
22348863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
22358863SEdward.Pilatowicz@Sun.COM return (B_FALSE);
22368863SEdward.Pilatowicz@Sun.COM }
22378863SEdward.Pilatowicz@Sun.COM
22389471SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
22398863SEdward.Pilatowicz@Sun.COM rv = xdf_connect_locked(vdp, B_TRUE);
22406318Sedp mutex_exit(&vdp->xdf_dev_lk);
22418863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
22428863SEdward.Pilatowicz@Sun.COM
22438863SEdward.Pilatowicz@Sun.COM return ((rv == XD_READY) ? B_TRUE : B_FALSE);
22446318Sedp }
22456318Sedp
22466318Sedp int
xdf_hvm_setpgeom(dev_info_t * dip,cmlb_geom_t * geomp)22476318Sedp xdf_hvm_setpgeom(dev_info_t *dip, cmlb_geom_t *geomp)
22486318Sedp {
22496318Sedp xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
22506318Sedp
22516318Sedp /* sanity check the requested physical geometry */
22526318Sedp mutex_enter(&vdp->xdf_dev_lk);
22536318Sedp if ((geomp->g_secsize != XB_BSIZE) ||
22546318Sedp (geomp->g_capacity == 0)) {
22556318Sedp mutex_exit(&vdp->xdf_dev_lk);
22566318Sedp return (EINVAL);
22576318Sedp }
22586318Sedp
22596318Sedp /*
22606318Sedp * If we've already connected to the backend device then make sure
22616318Sedp * we're not defining a physical geometry larger than our backend
22626318Sedp * device.
22636318Sedp */
22646318Sedp if ((vdp->xdf_xdev_nblocks != 0) &&
22656318Sedp (geomp->g_capacity > vdp->xdf_xdev_nblocks)) {
22666318Sedp mutex_exit(&vdp->xdf_dev_lk);
22676318Sedp return (EINVAL);
22686318Sedp }
22696318Sedp
22708863SEdward.Pilatowicz@Sun.COM bzero(&vdp->xdf_pgeom, sizeof (vdp->xdf_pgeom));
22718863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_ncyl = geomp->g_ncyl;
22728863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_acyl = geomp->g_acyl;
22738863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_nhead = geomp->g_nhead;
22748863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_nsect = geomp->g_nsect;
22758863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_secsize = geomp->g_secsize;
22768863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_capacity = geomp->g_capacity;
22778863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_intrlv = geomp->g_intrlv;
22788863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom.g_rpm = geomp->g_rpm;
22798863SEdward.Pilatowicz@Sun.COM
22808863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom_fixed = B_TRUE;
22816318Sedp mutex_exit(&vdp->xdf_dev_lk);
22826318Sedp
22836318Sedp /* force a re-validation */
22846318Sedp cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
22856318Sedp
22866318Sedp return (0);
22876318Sedp }
22886318Sedp
22898863SEdward.Pilatowicz@Sun.COM boolean_t
xdf_is_cd(dev_info_t * dip)22908863SEdward.Pilatowicz@Sun.COM xdf_is_cd(dev_info_t *dip)
22918863SEdward.Pilatowicz@Sun.COM {
22928863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
22938863SEdward.Pilatowicz@Sun.COM boolean_t rv;
22948863SEdward.Pilatowicz@Sun.COM
22958863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
22968863SEdward.Pilatowicz@Sun.COM rv = XD_IS_CD(vdp);
22978863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
22988863SEdward.Pilatowicz@Sun.COM return (rv);
22998863SEdward.Pilatowicz@Sun.COM }
23008863SEdward.Pilatowicz@Sun.COM
23018863SEdward.Pilatowicz@Sun.COM boolean_t
xdf_is_rm(dev_info_t * dip)23028863SEdward.Pilatowicz@Sun.COM xdf_is_rm(dev_info_t *dip)
23038863SEdward.Pilatowicz@Sun.COM {
23048863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
23058863SEdward.Pilatowicz@Sun.COM boolean_t rv;
23068863SEdward.Pilatowicz@Sun.COM
23078863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
23088863SEdward.Pilatowicz@Sun.COM rv = XD_IS_RM(vdp);
23098863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
23108863SEdward.Pilatowicz@Sun.COM return (rv);
23118863SEdward.Pilatowicz@Sun.COM }
23128863SEdward.Pilatowicz@Sun.COM
23138863SEdward.Pilatowicz@Sun.COM boolean_t
xdf_media_req_supported(dev_info_t * dip)23148863SEdward.Pilatowicz@Sun.COM xdf_media_req_supported(dev_info_t *dip)
23158863SEdward.Pilatowicz@Sun.COM {
23168863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
23178863SEdward.Pilatowicz@Sun.COM boolean_t rv;
23188863SEdward.Pilatowicz@Sun.COM
23198863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
23208863SEdward.Pilatowicz@Sun.COM rv = vdp->xdf_media_req_supported;
23218863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
23228863SEdward.Pilatowicz@Sun.COM return (rv);
23238863SEdward.Pilatowicz@Sun.COM }
23248863SEdward.Pilatowicz@Sun.COM
23256318Sedp #endif /* XPV_HVM_DRIVER */
23268863SEdward.Pilatowicz@Sun.COM
23278863SEdward.Pilatowicz@Sun.COM static int
xdf_lb_getcap(dev_info_t * dip,diskaddr_t * capp)23288863SEdward.Pilatowicz@Sun.COM xdf_lb_getcap(dev_info_t *dip, diskaddr_t *capp)
23298863SEdward.Pilatowicz@Sun.COM {
23308863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
23318863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
23328863SEdward.Pilatowicz@Sun.COM
23338863SEdward.Pilatowicz@Sun.COM if (vdp == NULL)
23348863SEdward.Pilatowicz@Sun.COM return (ENXIO);
23358863SEdward.Pilatowicz@Sun.COM
23368863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
23378863SEdward.Pilatowicz@Sun.COM *capp = vdp->xdf_pgeom.g_capacity;
23388863SEdward.Pilatowicz@Sun.COM DPRINTF(LBL_DBG, ("xdf@%s:capacity %llu\n", vdp->xdf_addr, *capp));
23398863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
23408863SEdward.Pilatowicz@Sun.COM return (0);
23418863SEdward.Pilatowicz@Sun.COM }
23428863SEdward.Pilatowicz@Sun.COM
23438863SEdward.Pilatowicz@Sun.COM static int
xdf_lb_getpgeom(dev_info_t * dip,cmlb_geom_t * geomp)23448863SEdward.Pilatowicz@Sun.COM xdf_lb_getpgeom(dev_info_t *dip, cmlb_geom_t *geomp)
23458863SEdward.Pilatowicz@Sun.COM {
23468863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
23478863SEdward.Pilatowicz@Sun.COM
23488863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
23498863SEdward.Pilatowicz@Sun.COM return (ENXIO);
23508863SEdward.Pilatowicz@Sun.COM *geomp = vdp->xdf_pgeom;
23518863SEdward.Pilatowicz@Sun.COM return (0);
23528863SEdward.Pilatowicz@Sun.COM }
23538863SEdward.Pilatowicz@Sun.COM
23548863SEdward.Pilatowicz@Sun.COM /*
23558863SEdward.Pilatowicz@Sun.COM * No real HBA, no geometry available from it
23568863SEdward.Pilatowicz@Sun.COM */
23578863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
23588863SEdward.Pilatowicz@Sun.COM static int
xdf_lb_getvgeom(dev_info_t * dip,cmlb_geom_t * geomp)23598863SEdward.Pilatowicz@Sun.COM xdf_lb_getvgeom(dev_info_t *dip, cmlb_geom_t *geomp)
23608863SEdward.Pilatowicz@Sun.COM {
23618863SEdward.Pilatowicz@Sun.COM return (EINVAL);
23628863SEdward.Pilatowicz@Sun.COM }
23638863SEdward.Pilatowicz@Sun.COM
23648863SEdward.Pilatowicz@Sun.COM static int
xdf_lb_getattribute(dev_info_t * dip,tg_attribute_t * tgattributep)23658863SEdward.Pilatowicz@Sun.COM xdf_lb_getattribute(dev_info_t *dip, tg_attribute_t *tgattributep)
23668863SEdward.Pilatowicz@Sun.COM {
23678863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
23688863SEdward.Pilatowicz@Sun.COM
23698863SEdward.Pilatowicz@Sun.COM if (!(vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))))
23708863SEdward.Pilatowicz@Sun.COM return (ENXIO);
23718863SEdward.Pilatowicz@Sun.COM
23728863SEdward.Pilatowicz@Sun.COM if (XD_IS_RO(vdp))
23738863SEdward.Pilatowicz@Sun.COM tgattributep->media_is_writable = 0;
23748863SEdward.Pilatowicz@Sun.COM else
23758863SEdward.Pilatowicz@Sun.COM tgattributep->media_is_writable = 1;
23768863SEdward.Pilatowicz@Sun.COM return (0);
23778863SEdward.Pilatowicz@Sun.COM }
23788863SEdward.Pilatowicz@Sun.COM
23798863SEdward.Pilatowicz@Sun.COM /* ARGSUSED3 */
23808863SEdward.Pilatowicz@Sun.COM int
xdf_lb_getinfo(dev_info_t * dip,int cmd,void * arg,void * tg_cookie)23818863SEdward.Pilatowicz@Sun.COM xdf_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
23828863SEdward.Pilatowicz@Sun.COM {
23839889SLarry.Liu@Sun.COM int instance;
23849889SLarry.Liu@Sun.COM xdf_t *vdp;
23859889SLarry.Liu@Sun.COM
23869889SLarry.Liu@Sun.COM instance = ddi_get_instance(dip);
23879889SLarry.Liu@Sun.COM
23889889SLarry.Liu@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
23899889SLarry.Liu@Sun.COM return (ENXIO);
23909889SLarry.Liu@Sun.COM
23918863SEdward.Pilatowicz@Sun.COM switch (cmd) {
23928863SEdward.Pilatowicz@Sun.COM case TG_GETPHYGEOM:
23938863SEdward.Pilatowicz@Sun.COM return (xdf_lb_getpgeom(dip, (cmlb_geom_t *)arg));
23948863SEdward.Pilatowicz@Sun.COM case TG_GETVIRTGEOM:
23958863SEdward.Pilatowicz@Sun.COM return (xdf_lb_getvgeom(dip, (cmlb_geom_t *)arg));
23968863SEdward.Pilatowicz@Sun.COM case TG_GETCAPACITY:
23978863SEdward.Pilatowicz@Sun.COM return (xdf_lb_getcap(dip, (diskaddr_t *)arg));
23988863SEdward.Pilatowicz@Sun.COM case TG_GETBLOCKSIZE:
23999889SLarry.Liu@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
24009889SLarry.Liu@Sun.COM *(uint32_t *)arg = vdp->xdf_xdev_secsize;
24019889SLarry.Liu@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
24028863SEdward.Pilatowicz@Sun.COM return (0);
24038863SEdward.Pilatowicz@Sun.COM case TG_GETATTR:
24048863SEdward.Pilatowicz@Sun.COM return (xdf_lb_getattribute(dip, (tg_attribute_t *)arg));
24058863SEdward.Pilatowicz@Sun.COM default:
24068863SEdward.Pilatowicz@Sun.COM return (ENOTTY);
24078863SEdward.Pilatowicz@Sun.COM }
24088863SEdward.Pilatowicz@Sun.COM }
24098863SEdward.Pilatowicz@Sun.COM
24108863SEdward.Pilatowicz@Sun.COM /* ARGSUSED5 */
24118863SEdward.Pilatowicz@Sun.COM int
xdf_lb_rdwr(dev_info_t * dip,uchar_t cmd,void * bufp,diskaddr_t start,size_t reqlen,void * tg_cookie)24128863SEdward.Pilatowicz@Sun.COM xdf_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufp,
24138863SEdward.Pilatowicz@Sun.COM diskaddr_t start, size_t reqlen, void *tg_cookie)
24148863SEdward.Pilatowicz@Sun.COM {
24158863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
24168863SEdward.Pilatowicz@Sun.COM struct buf *bp;
24178863SEdward.Pilatowicz@Sun.COM int err = 0;
24188863SEdward.Pilatowicz@Sun.COM
24198863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
24208863SEdward.Pilatowicz@Sun.COM
24218863SEdward.Pilatowicz@Sun.COM /* We don't allow IO from the oe_change callback thread */
24228863SEdward.Pilatowicz@Sun.COM ASSERT(curthread != vdp->xdf_oe_change_thread);
24238863SEdward.Pilatowicz@Sun.COM
24249889SLarry.Liu@Sun.COM if ((start + ((reqlen / (vdp->xdf_xdev_secsize / DEV_BSIZE))
24259889SLarry.Liu@Sun.COM >> DEV_BSHIFT)) > vdp->xdf_pgeom.g_capacity)
24268863SEdward.Pilatowicz@Sun.COM return (EINVAL);
24278863SEdward.Pilatowicz@Sun.COM
24288863SEdward.Pilatowicz@Sun.COM bp = getrbuf(KM_SLEEP);
24298863SEdward.Pilatowicz@Sun.COM if (cmd == TG_READ)
24308863SEdward.Pilatowicz@Sun.COM bp->b_flags = B_BUSY | B_READ;
24318863SEdward.Pilatowicz@Sun.COM else
24328863SEdward.Pilatowicz@Sun.COM bp->b_flags = B_BUSY | B_WRITE;
24339889SLarry.Liu@Sun.COM
24348863SEdward.Pilatowicz@Sun.COM bp->b_un.b_addr = bufp;
24358863SEdward.Pilatowicz@Sun.COM bp->b_bcount = reqlen;
24369889SLarry.Liu@Sun.COM bp->b_blkno = start * (vdp->xdf_xdev_secsize / DEV_BSIZE);
24378863SEdward.Pilatowicz@Sun.COM bp->b_edev = DDI_DEV_T_NONE; /* don't have dev_t */
24388863SEdward.Pilatowicz@Sun.COM
24398863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
24408863SEdward.Pilatowicz@Sun.COM xdf_bp_push(vdp, bp);
24418863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
24428863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
24438863SEdward.Pilatowicz@Sun.COM if (curthread == vdp->xdf_ready_tq_thread)
24448863SEdward.Pilatowicz@Sun.COM (void) xdf_ring_drain(vdp);
24458863SEdward.Pilatowicz@Sun.COM err = biowait(bp);
24468863SEdward.Pilatowicz@Sun.COM ASSERT(bp->b_flags & B_DONE);
24478863SEdward.Pilatowicz@Sun.COM freerbuf(bp);
24488863SEdward.Pilatowicz@Sun.COM return (err);
24498863SEdward.Pilatowicz@Sun.COM }
24508863SEdward.Pilatowicz@Sun.COM
24518863SEdward.Pilatowicz@Sun.COM /*
24528863SEdward.Pilatowicz@Sun.COM * Lock the current media. Set the media state to "lock".
24538863SEdward.Pilatowicz@Sun.COM * (Media locks are only respected by the backend driver.)
24548863SEdward.Pilatowicz@Sun.COM */
24558863SEdward.Pilatowicz@Sun.COM static int
xdf_ioctl_mlock(xdf_t * vdp)24568863SEdward.Pilatowicz@Sun.COM xdf_ioctl_mlock(xdf_t *vdp)
24578863SEdward.Pilatowicz@Sun.COM {
24588863SEdward.Pilatowicz@Sun.COM int rv;
24598863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
24608863SEdward.Pilatowicz@Sun.COM rv = xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
24618863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
24628863SEdward.Pilatowicz@Sun.COM return (rv);
24638863SEdward.Pilatowicz@Sun.COM }
24648863SEdward.Pilatowicz@Sun.COM
24658863SEdward.Pilatowicz@Sun.COM /*
24668863SEdward.Pilatowicz@Sun.COM * Release a media lock. Set the media state to "none".
24678863SEdward.Pilatowicz@Sun.COM */
24688863SEdward.Pilatowicz@Sun.COM static int
xdf_ioctl_munlock(xdf_t * vdp)24698863SEdward.Pilatowicz@Sun.COM xdf_ioctl_munlock(xdf_t *vdp)
24708863SEdward.Pilatowicz@Sun.COM {
24718863SEdward.Pilatowicz@Sun.COM int rv;
24728863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
24738863SEdward.Pilatowicz@Sun.COM rv = xdf_media_req(vdp, XBV_MEDIA_REQ_NONE, B_TRUE);
24748863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
24758863SEdward.Pilatowicz@Sun.COM return (rv);
24768863SEdward.Pilatowicz@Sun.COM }
24778863SEdward.Pilatowicz@Sun.COM
24788863SEdward.Pilatowicz@Sun.COM /*
24798863SEdward.Pilatowicz@Sun.COM * Eject the current media. Ignores any media locks. (Media locks
24808863SEdward.Pilatowicz@Sun.COM * are only for benifit of the the backend.)
24818863SEdward.Pilatowicz@Sun.COM */
24828863SEdward.Pilatowicz@Sun.COM static int
xdf_ioctl_eject(xdf_t * vdp)24838863SEdward.Pilatowicz@Sun.COM xdf_ioctl_eject(xdf_t *vdp)
24848863SEdward.Pilatowicz@Sun.COM {
24858863SEdward.Pilatowicz@Sun.COM int rv;
24868863SEdward.Pilatowicz@Sun.COM
24878863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
24888863SEdward.Pilatowicz@Sun.COM if ((rv = xdf_media_req(vdp, XBV_MEDIA_REQ_EJECT, B_FALSE)) != 0) {
24898863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
24908863SEdward.Pilatowicz@Sun.COM return (rv);
24918863SEdward.Pilatowicz@Sun.COM }
24928863SEdward.Pilatowicz@Sun.COM
24938863SEdward.Pilatowicz@Sun.COM /*
24948863SEdward.Pilatowicz@Sun.COM * We've set the media requests xenbus parameter to eject, so now
24958863SEdward.Pilatowicz@Sun.COM * disconnect from the backend, wait for the backend to clear
24968863SEdward.Pilatowicz@Sun.COM * the media requets xenbus paramter, and then we can reconnect
24978863SEdward.Pilatowicz@Sun.COM * to the backend.
24988863SEdward.Pilatowicz@Sun.COM */
24998863SEdward.Pilatowicz@Sun.COM (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
25008863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
25018863SEdward.Pilatowicz@Sun.COM if (xdf_connect_locked(vdp, B_TRUE) != XD_READY) {
25028863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
25038863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
25048863SEdward.Pilatowicz@Sun.COM return (EIO);
25058863SEdward.Pilatowicz@Sun.COM }
25068863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
25078863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
25088863SEdward.Pilatowicz@Sun.COM return (0);
25098863SEdward.Pilatowicz@Sun.COM }
25108863SEdward.Pilatowicz@Sun.COM
25118863SEdward.Pilatowicz@Sun.COM /*
25128863SEdward.Pilatowicz@Sun.COM * Watch for media state changes. This can be an insertion of a device
25138863SEdward.Pilatowicz@Sun.COM * (triggered by a 'xm block-configure' request in another domain) or
25148863SEdward.Pilatowicz@Sun.COM * the ejection of a device (triggered by a local "eject" operation).
25158863SEdward.Pilatowicz@Sun.COM * For a full description of the DKIOCSTATE ioctl behavior see dkio(7I).
25168863SEdward.Pilatowicz@Sun.COM */
25178863SEdward.Pilatowicz@Sun.COM static int
xdf_dkstate(xdf_t * vdp,enum dkio_state mstate)25188863SEdward.Pilatowicz@Sun.COM xdf_dkstate(xdf_t *vdp, enum dkio_state mstate)
25198863SEdward.Pilatowicz@Sun.COM {
25208863SEdward.Pilatowicz@Sun.COM enum dkio_state prev_state;
25218863SEdward.Pilatowicz@Sun.COM
25228863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
25238863SEdward.Pilatowicz@Sun.COM prev_state = vdp->xdf_mstate;
25248863SEdward.Pilatowicz@Sun.COM
25258863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_mstate == mstate) {
25268863SEdward.Pilatowicz@Sun.COM while (vdp->xdf_mstate == prev_state) {
25278863SEdward.Pilatowicz@Sun.COM if (cv_wait_sig(&vdp->xdf_mstate_cv,
25288863SEdward.Pilatowicz@Sun.COM &vdp->xdf_cb_lk) == 0) {
25298863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
25308863SEdward.Pilatowicz@Sun.COM return (EINTR);
25318863SEdward.Pilatowicz@Sun.COM }
25328863SEdward.Pilatowicz@Sun.COM }
25338863SEdward.Pilatowicz@Sun.COM }
25348863SEdward.Pilatowicz@Sun.COM
25358863SEdward.Pilatowicz@Sun.COM if ((prev_state != DKIO_INSERTED) &&
25368863SEdward.Pilatowicz@Sun.COM (vdp->xdf_mstate == DKIO_INSERTED)) {
25378863SEdward.Pilatowicz@Sun.COM (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
25388863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
25398863SEdward.Pilatowicz@Sun.COM return (0);
25408863SEdward.Pilatowicz@Sun.COM }
25418863SEdward.Pilatowicz@Sun.COM
25428863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
25438863SEdward.Pilatowicz@Sun.COM return (0);
25448863SEdward.Pilatowicz@Sun.COM }
25458863SEdward.Pilatowicz@Sun.COM
25468863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
25478863SEdward.Pilatowicz@Sun.COM static int
xdf_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)25488863SEdward.Pilatowicz@Sun.COM xdf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
25498863SEdward.Pilatowicz@Sun.COM int *rvalp)
25508863SEdward.Pilatowicz@Sun.COM {
25518863SEdward.Pilatowicz@Sun.COM minor_t minor = getminor(dev);
25528863SEdward.Pilatowicz@Sun.COM int part = XDF_PART(minor);
25538863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
25548863SEdward.Pilatowicz@Sun.COM int rv;
25558863SEdward.Pilatowicz@Sun.COM
25568863SEdward.Pilatowicz@Sun.COM if (((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL) ||
25578863SEdward.Pilatowicz@Sun.COM (!xdf_isopen(vdp, part)))
25588863SEdward.Pilatowicz@Sun.COM return (ENXIO);
25598863SEdward.Pilatowicz@Sun.COM
25608863SEdward.Pilatowicz@Sun.COM DPRINTF(IOCTL_DBG, ("xdf@%s:ioctl: cmd %d (0x%x)\n",
25618863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, cmd, cmd));
25628863SEdward.Pilatowicz@Sun.COM
25638863SEdward.Pilatowicz@Sun.COM switch (cmd) {
25648863SEdward.Pilatowicz@Sun.COM default:
25658863SEdward.Pilatowicz@Sun.COM return (ENOTTY);
25668863SEdward.Pilatowicz@Sun.COM case DKIOCG_PHYGEOM:
25678863SEdward.Pilatowicz@Sun.COM case DKIOCG_VIRTGEOM:
25688863SEdward.Pilatowicz@Sun.COM case DKIOCGGEOM:
25698863SEdward.Pilatowicz@Sun.COM case DKIOCSGEOM:
25708863SEdward.Pilatowicz@Sun.COM case DKIOCGAPART:
25718863SEdward.Pilatowicz@Sun.COM case DKIOCSAPART:
25728863SEdward.Pilatowicz@Sun.COM case DKIOCGVTOC:
25738863SEdward.Pilatowicz@Sun.COM case DKIOCSVTOC:
25748863SEdward.Pilatowicz@Sun.COM case DKIOCPARTINFO:
25758863SEdward.Pilatowicz@Sun.COM case DKIOCGEXTVTOC:
25768863SEdward.Pilatowicz@Sun.COM case DKIOCSEXTVTOC:
25778863SEdward.Pilatowicz@Sun.COM case DKIOCEXTPARTINFO:
25788863SEdward.Pilatowicz@Sun.COM case DKIOCGMBOOT:
25798863SEdward.Pilatowicz@Sun.COM case DKIOCSMBOOT:
25808863SEdward.Pilatowicz@Sun.COM case DKIOCGETEFI:
25818863SEdward.Pilatowicz@Sun.COM case DKIOCSETEFI:
258210021SSheshadri.Vasudevan@Sun.COM case DKIOCSETEXTPART:
25838863SEdward.Pilatowicz@Sun.COM case DKIOCPARTITION:
25848863SEdward.Pilatowicz@Sun.COM return (cmlb_ioctl(vdp->xdf_vd_lbl, dev, cmd, arg, mode, credp,
25858863SEdward.Pilatowicz@Sun.COM rvalp, NULL));
25868863SEdward.Pilatowicz@Sun.COM case FDEJECT:
25878863SEdward.Pilatowicz@Sun.COM case DKIOCEJECT:
25888863SEdward.Pilatowicz@Sun.COM case CDROMEJECT:
25898863SEdward.Pilatowicz@Sun.COM return (xdf_ioctl_eject(vdp));
25908863SEdward.Pilatowicz@Sun.COM case DKIOCLOCK:
25918863SEdward.Pilatowicz@Sun.COM return (xdf_ioctl_mlock(vdp));
25928863SEdward.Pilatowicz@Sun.COM case DKIOCUNLOCK:
25938863SEdward.Pilatowicz@Sun.COM return (xdf_ioctl_munlock(vdp));
25948863SEdward.Pilatowicz@Sun.COM case CDROMREADOFFSET: {
25958863SEdward.Pilatowicz@Sun.COM int offset = 0;
25968863SEdward.Pilatowicz@Sun.COM if (!XD_IS_CD(vdp))
25978863SEdward.Pilatowicz@Sun.COM return (ENOTTY);
25988863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&offset, (void *)arg, sizeof (int), mode))
25998863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26008863SEdward.Pilatowicz@Sun.COM return (0);
26018863SEdward.Pilatowicz@Sun.COM }
26028863SEdward.Pilatowicz@Sun.COM case DKIOCGMEDIAINFO: {
26038863SEdward.Pilatowicz@Sun.COM struct dk_minfo media_info;
26048863SEdward.Pilatowicz@Sun.COM
26059889SLarry.Liu@Sun.COM media_info.dki_lbsize = vdp->xdf_xdev_secsize;
26068863SEdward.Pilatowicz@Sun.COM media_info.dki_capacity = vdp->xdf_pgeom.g_capacity;
26078863SEdward.Pilatowicz@Sun.COM if (XD_IS_CD(vdp))
26088863SEdward.Pilatowicz@Sun.COM media_info.dki_media_type = DK_CDROM;
26098863SEdward.Pilatowicz@Sun.COM else
26108863SEdward.Pilatowicz@Sun.COM media_info.dki_media_type = DK_FIXED_DISK;
26118863SEdward.Pilatowicz@Sun.COM
26128863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&media_info, (void *)arg,
26138863SEdward.Pilatowicz@Sun.COM sizeof (struct dk_minfo), mode))
26148863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26158863SEdward.Pilatowicz@Sun.COM return (0);
26168863SEdward.Pilatowicz@Sun.COM }
26178863SEdward.Pilatowicz@Sun.COM case DKIOCINFO: {
26188863SEdward.Pilatowicz@Sun.COM struct dk_cinfo info;
26198863SEdward.Pilatowicz@Sun.COM
26208863SEdward.Pilatowicz@Sun.COM /* controller information */
26218863SEdward.Pilatowicz@Sun.COM if (XD_IS_CD(vdp))
26228863SEdward.Pilatowicz@Sun.COM info.dki_ctype = DKC_CDROM;
26238863SEdward.Pilatowicz@Sun.COM else
26248863SEdward.Pilatowicz@Sun.COM info.dki_ctype = DKC_VBD;
26258863SEdward.Pilatowicz@Sun.COM
26268863SEdward.Pilatowicz@Sun.COM info.dki_cnum = 0;
26278863SEdward.Pilatowicz@Sun.COM (void) strncpy((char *)(&info.dki_cname), "xdf", 8);
26288863SEdward.Pilatowicz@Sun.COM
26298863SEdward.Pilatowicz@Sun.COM /* unit information */
26308863SEdward.Pilatowicz@Sun.COM info.dki_unit = ddi_get_instance(vdp->xdf_dip);
26318863SEdward.Pilatowicz@Sun.COM (void) strncpy((char *)(&info.dki_dname), "xdf", 8);
26328863SEdward.Pilatowicz@Sun.COM info.dki_flags = DKI_FMTVOL;
26338863SEdward.Pilatowicz@Sun.COM info.dki_partition = part;
26348863SEdward.Pilatowicz@Sun.COM info.dki_maxtransfer = maxphys / DEV_BSIZE;
26358863SEdward.Pilatowicz@Sun.COM info.dki_addr = 0;
26368863SEdward.Pilatowicz@Sun.COM info.dki_space = 0;
26378863SEdward.Pilatowicz@Sun.COM info.dki_prio = 0;
26388863SEdward.Pilatowicz@Sun.COM info.dki_vec = 0;
26398863SEdward.Pilatowicz@Sun.COM
26408863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&info, (void *)arg, sizeof (info), mode))
26418863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26428863SEdward.Pilatowicz@Sun.COM return (0);
26438863SEdward.Pilatowicz@Sun.COM }
26448863SEdward.Pilatowicz@Sun.COM case DKIOCSTATE: {
26458863SEdward.Pilatowicz@Sun.COM enum dkio_state mstate;
26468863SEdward.Pilatowicz@Sun.COM
26478863SEdward.Pilatowicz@Sun.COM if (ddi_copyin((void *)arg, &mstate,
26488863SEdward.Pilatowicz@Sun.COM sizeof (mstate), mode) != 0)
26498863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26508863SEdward.Pilatowicz@Sun.COM if ((rv = xdf_dkstate(vdp, mstate)) != 0)
26518863SEdward.Pilatowicz@Sun.COM return (rv);
26528863SEdward.Pilatowicz@Sun.COM mstate = vdp->xdf_mstate;
26538863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&mstate, (void *)arg,
26548863SEdward.Pilatowicz@Sun.COM sizeof (mstate), mode) != 0)
26558863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26568863SEdward.Pilatowicz@Sun.COM return (0);
26578863SEdward.Pilatowicz@Sun.COM }
26588863SEdward.Pilatowicz@Sun.COM case DKIOCREMOVABLE: {
26598863SEdward.Pilatowicz@Sun.COM int i = BOOLEAN2VOID(XD_IS_RM(vdp));
26608863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&i, (caddr_t)arg, sizeof (i), mode))
26618863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26628863SEdward.Pilatowicz@Sun.COM return (0);
26638863SEdward.Pilatowicz@Sun.COM }
26648863SEdward.Pilatowicz@Sun.COM case DKIOCGETWCE: {
26658863SEdward.Pilatowicz@Sun.COM int i = BOOLEAN2VOID(XD_IS_RM(vdp));
26668863SEdward.Pilatowicz@Sun.COM if (ddi_copyout(&i, (void *)arg, sizeof (i), mode))
26678863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26688863SEdward.Pilatowicz@Sun.COM return (0);
26698863SEdward.Pilatowicz@Sun.COM }
26708863SEdward.Pilatowicz@Sun.COM case DKIOCSETWCE: {
26718863SEdward.Pilatowicz@Sun.COM int i;
26728863SEdward.Pilatowicz@Sun.COM if (ddi_copyin((void *)arg, &i, sizeof (i), mode))
26738863SEdward.Pilatowicz@Sun.COM return (EFAULT);
26748863SEdward.Pilatowicz@Sun.COM vdp->xdf_wce = VOID2BOOLEAN(i);
26758863SEdward.Pilatowicz@Sun.COM return (0);
26768863SEdward.Pilatowicz@Sun.COM }
26778863SEdward.Pilatowicz@Sun.COM case DKIOCFLUSHWRITECACHE: {
26788863SEdward.Pilatowicz@Sun.COM struct dk_callback *dkc = (struct dk_callback *)arg;
26798863SEdward.Pilatowicz@Sun.COM
26808863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_flush_supported) {
26818863SEdward.Pilatowicz@Sun.COM rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
26828863SEdward.Pilatowicz@Sun.COM NULL, 0, 0, (void *)dev);
26838863SEdward.Pilatowicz@Sun.COM } else if (vdp->xdf_feature_barrier &&
26848863SEdward.Pilatowicz@Sun.COM !xdf_barrier_flush_disable) {
26858863SEdward.Pilatowicz@Sun.COM rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
26868863SEdward.Pilatowicz@Sun.COM vdp->xdf_cache_flush_block, xdf_flush_block,
26879889SLarry.Liu@Sun.COM vdp->xdf_xdev_secsize, (void *)dev);
26888863SEdward.Pilatowicz@Sun.COM } else {
26898863SEdward.Pilatowicz@Sun.COM return (ENOTTY);
26908863SEdward.Pilatowicz@Sun.COM }
26918863SEdward.Pilatowicz@Sun.COM if ((mode & FKIOCTL) && (dkc != NULL) &&
26928863SEdward.Pilatowicz@Sun.COM (dkc->dkc_callback != NULL)) {
26938863SEdward.Pilatowicz@Sun.COM (*dkc->dkc_callback)(dkc->dkc_cookie, rv);
26948863SEdward.Pilatowicz@Sun.COM /* need to return 0 after calling callback */
26958863SEdward.Pilatowicz@Sun.COM rv = 0;
26968863SEdward.Pilatowicz@Sun.COM }
26978863SEdward.Pilatowicz@Sun.COM return (rv);
26988863SEdward.Pilatowicz@Sun.COM }
26998863SEdward.Pilatowicz@Sun.COM }
27008863SEdward.Pilatowicz@Sun.COM /*NOTREACHED*/
27018863SEdward.Pilatowicz@Sun.COM }
27028863SEdward.Pilatowicz@Sun.COM
27038863SEdward.Pilatowicz@Sun.COM static int
xdf_strategy(struct buf * bp)27048863SEdward.Pilatowicz@Sun.COM xdf_strategy(struct buf *bp)
27058863SEdward.Pilatowicz@Sun.COM {
27068863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
27078863SEdward.Pilatowicz@Sun.COM minor_t minor;
27088863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkct, p_blkst;
27099889SLarry.Liu@Sun.COM daddr_t blkno;
27108863SEdward.Pilatowicz@Sun.COM ulong_t nblks;
27118863SEdward.Pilatowicz@Sun.COM int part;
27128863SEdward.Pilatowicz@Sun.COM
27138863SEdward.Pilatowicz@Sun.COM minor = getminor(bp->b_edev);
27148863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
27158863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor));
27168863SEdward.Pilatowicz@Sun.COM
27178863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
27188863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part)) {
27198863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
27208863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, ENXIO, 0);
27218863SEdward.Pilatowicz@Sun.COM return (0);
27228863SEdward.Pilatowicz@Sun.COM }
27238863SEdward.Pilatowicz@Sun.COM
27248863SEdward.Pilatowicz@Sun.COM /* We don't allow IO from the oe_change callback thread */
27258863SEdward.Pilatowicz@Sun.COM ASSERT(curthread != vdp->xdf_oe_change_thread);
27268863SEdward.Pilatowicz@Sun.COM
27278863SEdward.Pilatowicz@Sun.COM /* Check for writes to a read only device */
27288863SEdward.Pilatowicz@Sun.COM if (!IS_READ(bp) && XD_IS_RO(vdp)) {
27298863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
27308863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, EROFS, 0);
27318863SEdward.Pilatowicz@Sun.COM return (0);
27328863SEdward.Pilatowicz@Sun.COM }
27338863SEdward.Pilatowicz@Sun.COM
27348863SEdward.Pilatowicz@Sun.COM /* Check if this I/O is accessing a partition or the entire disk */
27358863SEdward.Pilatowicz@Sun.COM if ((long)bp->b_private == XB_SLICE_NONE) {
27368863SEdward.Pilatowicz@Sun.COM /* This I/O is using an absolute offset */
27378863SEdward.Pilatowicz@Sun.COM p_blkct = vdp->xdf_xdev_nblocks;
27388863SEdward.Pilatowicz@Sun.COM p_blkst = 0;
27398863SEdward.Pilatowicz@Sun.COM } else {
27408863SEdward.Pilatowicz@Sun.COM /* This I/O is using a partition relative offset */
27418863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
27428863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
27438863SEdward.Pilatowicz@Sun.COM &p_blkst, NULL, NULL, NULL)) {
27448863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, ENXIO, 0);
27458863SEdward.Pilatowicz@Sun.COM return (0);
27468863SEdward.Pilatowicz@Sun.COM }
27478863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
27488863SEdward.Pilatowicz@Sun.COM }
27498863SEdward.Pilatowicz@Sun.COM
27509889SLarry.Liu@Sun.COM /*
27519889SLarry.Liu@Sun.COM * Adjust the real blkno and bcount according to the underline
27529889SLarry.Liu@Sun.COM * physical sector size.
27539889SLarry.Liu@Sun.COM */
27549889SLarry.Liu@Sun.COM blkno = bp->b_blkno / (vdp->xdf_xdev_secsize / XB_BSIZE);
27559889SLarry.Liu@Sun.COM
27568863SEdward.Pilatowicz@Sun.COM /* check for a starting block beyond the disk or partition limit */
27579889SLarry.Liu@Sun.COM if (blkno > p_blkct) {
27588863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: block %lld exceeds VBD size %"PRIu64,
27599889SLarry.Liu@Sun.COM vdp->xdf_addr, (longlong_t)blkno, (uint64_t)p_blkct));
27609889SLarry.Liu@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
27618863SEdward.Pilatowicz@Sun.COM xdf_io_err(bp, EINVAL, 0);
27628863SEdward.Pilatowicz@Sun.COM return (0);
27638863SEdward.Pilatowicz@Sun.COM }
27648863SEdward.Pilatowicz@Sun.COM
27658863SEdward.Pilatowicz@Sun.COM /* Legacy: don't set error flag at this case */
27669889SLarry.Liu@Sun.COM if (blkno == p_blkct) {
27679889SLarry.Liu@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
27688863SEdward.Pilatowicz@Sun.COM bp->b_resid = bp->b_bcount;
27698863SEdward.Pilatowicz@Sun.COM biodone(bp);
27708863SEdward.Pilatowicz@Sun.COM return (0);
27718863SEdward.Pilatowicz@Sun.COM }
27728863SEdward.Pilatowicz@Sun.COM
27738863SEdward.Pilatowicz@Sun.COM /* sanitize the input buf */
27748863SEdward.Pilatowicz@Sun.COM bioerror(bp, 0);
27758863SEdward.Pilatowicz@Sun.COM bp->b_resid = 0;
27768863SEdward.Pilatowicz@Sun.COM bp->av_back = bp->av_forw = NULL;
27778863SEdward.Pilatowicz@Sun.COM
27788863SEdward.Pilatowicz@Sun.COM /* Adjust for partial transfer, this will result in an error later */
27799889SLarry.Liu@Sun.COM if (vdp->xdf_xdev_secsize != 0 &&
27809889SLarry.Liu@Sun.COM vdp->xdf_xdev_secsize != XB_BSIZE) {
27819889SLarry.Liu@Sun.COM nblks = bp->b_bcount / vdp->xdf_xdev_secsize;
27829889SLarry.Liu@Sun.COM } else {
27839889SLarry.Liu@Sun.COM nblks = bp->b_bcount >> XB_BSHIFT;
27849889SLarry.Liu@Sun.COM }
27859889SLarry.Liu@Sun.COM
27869889SLarry.Liu@Sun.COM if ((blkno + nblks) > p_blkct) {
27879889SLarry.Liu@Sun.COM if (vdp->xdf_xdev_secsize != 0 &&
27889889SLarry.Liu@Sun.COM vdp->xdf_xdev_secsize != XB_BSIZE) {
27899889SLarry.Liu@Sun.COM bp->b_resid =
27909889SLarry.Liu@Sun.COM ((blkno + nblks) - p_blkct) *
27919889SLarry.Liu@Sun.COM vdp->xdf_xdev_secsize;
27929889SLarry.Liu@Sun.COM } else {
27939889SLarry.Liu@Sun.COM bp->b_resid =
27949889SLarry.Liu@Sun.COM ((blkno + nblks) - p_blkct) <<
27959889SLarry.Liu@Sun.COM XB_BSHIFT;
27969889SLarry.Liu@Sun.COM }
27978863SEdward.Pilatowicz@Sun.COM bp->b_bcount -= bp->b_resid;
27988863SEdward.Pilatowicz@Sun.COM }
27998863SEdward.Pilatowicz@Sun.COM
28008863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: strategy blk %lld len %lu\n",
28019889SLarry.Liu@Sun.COM vdp->xdf_addr, (longlong_t)blkno, (ulong_t)bp->b_bcount));
28028863SEdward.Pilatowicz@Sun.COM
28038863SEdward.Pilatowicz@Sun.COM /* Fix up the buf struct */
28048863SEdward.Pilatowicz@Sun.COM bp->b_flags |= B_BUSY;
28058863SEdward.Pilatowicz@Sun.COM bp->b_private = (void *)(uintptr_t)p_blkst;
28068863SEdward.Pilatowicz@Sun.COM
28078863SEdward.Pilatowicz@Sun.COM xdf_bp_push(vdp, bp);
28088863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
28098863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
28108863SEdward.Pilatowicz@Sun.COM if (do_polled_io)
28118863SEdward.Pilatowicz@Sun.COM (void) xdf_ring_drain(vdp);
28128863SEdward.Pilatowicz@Sun.COM return (0);
28138863SEdward.Pilatowicz@Sun.COM }
28148863SEdward.Pilatowicz@Sun.COM
28158863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
28168863SEdward.Pilatowicz@Sun.COM static int
xdf_read(dev_t dev,struct uio * uiop,cred_t * credp)28178863SEdward.Pilatowicz@Sun.COM xdf_read(dev_t dev, struct uio *uiop, cred_t *credp)
28188863SEdward.Pilatowicz@Sun.COM {
28198863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
28208863SEdward.Pilatowicz@Sun.COM minor_t minor;
28218863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkcnt;
28228863SEdward.Pilatowicz@Sun.COM int part;
28238863SEdward.Pilatowicz@Sun.COM
28248863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
28258863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28268863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28278863SEdward.Pilatowicz@Sun.COM
28288863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: read offset 0x%"PRIx64"\n",
28298863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, (int64_t)uiop->uio_offset));
28308863SEdward.Pilatowicz@Sun.COM
28318863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
28328863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part))
28338863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28348863SEdward.Pilatowicz@Sun.COM
28358863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
28368863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL, NULL))
28378863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28388863SEdward.Pilatowicz@Sun.COM
28399889SLarry.Liu@Sun.COM if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
28409889SLarry.Liu@Sun.COM return (ENOSPC);
28419889SLarry.Liu@Sun.COM
28428863SEdward.Pilatowicz@Sun.COM if (U_INVAL(uiop))
28438863SEdward.Pilatowicz@Sun.COM return (EINVAL);
28448863SEdward.Pilatowicz@Sun.COM
28458863SEdward.Pilatowicz@Sun.COM return (physio(xdf_strategy, NULL, dev, B_READ, xdfmin, uiop));
28468863SEdward.Pilatowicz@Sun.COM }
28478863SEdward.Pilatowicz@Sun.COM
28488863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
28498863SEdward.Pilatowicz@Sun.COM static int
xdf_write(dev_t dev,struct uio * uiop,cred_t * credp)28508863SEdward.Pilatowicz@Sun.COM xdf_write(dev_t dev, struct uio *uiop, cred_t *credp)
28518863SEdward.Pilatowicz@Sun.COM {
28528863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
28538863SEdward.Pilatowicz@Sun.COM minor_t minor;
28548863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkcnt;
28558863SEdward.Pilatowicz@Sun.COM int part;
28568863SEdward.Pilatowicz@Sun.COM
28578863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
28588863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28598863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28608863SEdward.Pilatowicz@Sun.COM
28618863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: write offset 0x%"PRIx64"\n",
28628863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, (int64_t)uiop->uio_offset));
28638863SEdward.Pilatowicz@Sun.COM
28648863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
28658863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part))
28668863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28678863SEdward.Pilatowicz@Sun.COM
28688863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
28698863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL, NULL))
28708863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28718863SEdward.Pilatowicz@Sun.COM
28729889SLarry.Liu@Sun.COM if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
28738863SEdward.Pilatowicz@Sun.COM return (ENOSPC);
28748863SEdward.Pilatowicz@Sun.COM
28758863SEdward.Pilatowicz@Sun.COM if (U_INVAL(uiop))
28768863SEdward.Pilatowicz@Sun.COM return (EINVAL);
28778863SEdward.Pilatowicz@Sun.COM
28788863SEdward.Pilatowicz@Sun.COM return (physio(xdf_strategy, NULL, dev, B_WRITE, xdfmin, uiop));
28798863SEdward.Pilatowicz@Sun.COM }
28808863SEdward.Pilatowicz@Sun.COM
28818863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
28828863SEdward.Pilatowicz@Sun.COM static int
xdf_aread(dev_t dev,struct aio_req * aiop,cred_t * credp)28838863SEdward.Pilatowicz@Sun.COM xdf_aread(dev_t dev, struct aio_req *aiop, cred_t *credp)
28848863SEdward.Pilatowicz@Sun.COM {
28858863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
28868863SEdward.Pilatowicz@Sun.COM minor_t minor;
28878863SEdward.Pilatowicz@Sun.COM struct uio *uiop = aiop->aio_uio;
28888863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkcnt;
28898863SEdward.Pilatowicz@Sun.COM int part;
28908863SEdward.Pilatowicz@Sun.COM
28918863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
28928863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28938863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28948863SEdward.Pilatowicz@Sun.COM
28958863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
28968863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part))
28978863SEdward.Pilatowicz@Sun.COM return (ENXIO);
28988863SEdward.Pilatowicz@Sun.COM
28998863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
29008863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL, NULL))
29018863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29028863SEdward.Pilatowicz@Sun.COM
29039889SLarry.Liu@Sun.COM if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
29048863SEdward.Pilatowicz@Sun.COM return (ENOSPC);
29058863SEdward.Pilatowicz@Sun.COM
29068863SEdward.Pilatowicz@Sun.COM if (U_INVAL(uiop))
29078863SEdward.Pilatowicz@Sun.COM return (EINVAL);
29088863SEdward.Pilatowicz@Sun.COM
29098863SEdward.Pilatowicz@Sun.COM return (aphysio(xdf_strategy, anocancel, dev, B_READ, xdfmin, aiop));
29108863SEdward.Pilatowicz@Sun.COM }
29118863SEdward.Pilatowicz@Sun.COM
29128863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
29138863SEdward.Pilatowicz@Sun.COM static int
xdf_awrite(dev_t dev,struct aio_req * aiop,cred_t * credp)29148863SEdward.Pilatowicz@Sun.COM xdf_awrite(dev_t dev, struct aio_req *aiop, cred_t *credp)
29158863SEdward.Pilatowicz@Sun.COM {
29168863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
29178863SEdward.Pilatowicz@Sun.COM minor_t minor;
29188863SEdward.Pilatowicz@Sun.COM struct uio *uiop = aiop->aio_uio;
29198863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkcnt;
29208863SEdward.Pilatowicz@Sun.COM int part;
29218863SEdward.Pilatowicz@Sun.COM
29228863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
29238863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
29248863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29258863SEdward.Pilatowicz@Sun.COM
29268863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
29278863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part))
29288863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29298863SEdward.Pilatowicz@Sun.COM
29308863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
29318863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL, NULL))
29328863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29338863SEdward.Pilatowicz@Sun.COM
29349889SLarry.Liu@Sun.COM if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
29358863SEdward.Pilatowicz@Sun.COM return (ENOSPC);
29368863SEdward.Pilatowicz@Sun.COM
29378863SEdward.Pilatowicz@Sun.COM if (U_INVAL(uiop))
29388863SEdward.Pilatowicz@Sun.COM return (EINVAL);
29398863SEdward.Pilatowicz@Sun.COM
29408863SEdward.Pilatowicz@Sun.COM return (aphysio(xdf_strategy, anocancel, dev, B_WRITE, xdfmin, aiop));
29418863SEdward.Pilatowicz@Sun.COM }
29428863SEdward.Pilatowicz@Sun.COM
29438863SEdward.Pilatowicz@Sun.COM static int
xdf_dump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)29448863SEdward.Pilatowicz@Sun.COM xdf_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
29458863SEdward.Pilatowicz@Sun.COM {
29468863SEdward.Pilatowicz@Sun.COM struct buf dumpbuf, *dbp = &dumpbuf;
29478863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
29488863SEdward.Pilatowicz@Sun.COM minor_t minor;
29498863SEdward.Pilatowicz@Sun.COM int err = 0;
29508863SEdward.Pilatowicz@Sun.COM int part;
29518863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkcnt, p_blkst;
29528863SEdward.Pilatowicz@Sun.COM
29538863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
29548863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
29558863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29568863SEdward.Pilatowicz@Sun.COM
29578863SEdward.Pilatowicz@Sun.COM DPRINTF(IO_DBG, ("xdf@%s: dump addr (0x%p) blk (%ld) nblks (%d)\n",
29588863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr, (void *)addr, blkno, nblk));
29598863SEdward.Pilatowicz@Sun.COM
29608863SEdward.Pilatowicz@Sun.COM /* We don't allow IO from the oe_change callback thread */
29618863SEdward.Pilatowicz@Sun.COM ASSERT(curthread != vdp->xdf_oe_change_thread);
29628863SEdward.Pilatowicz@Sun.COM
29638863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
29648863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part))
29658863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29668863SEdward.Pilatowicz@Sun.COM
29678863SEdward.Pilatowicz@Sun.COM if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt, &p_blkst,
29688863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL))
29698863SEdward.Pilatowicz@Sun.COM return (ENXIO);
29708863SEdward.Pilatowicz@Sun.COM
29719889SLarry.Liu@Sun.COM if ((blkno + nblk) >
29729889SLarry.Liu@Sun.COM (p_blkcnt * (vdp->xdf_xdev_secsize / XB_BSIZE))) {
29738863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: block %ld exceeds VBD size %"PRIu64,
29749889SLarry.Liu@Sun.COM vdp->xdf_addr, (daddr_t)((blkno + nblk) /
29759889SLarry.Liu@Sun.COM (vdp->xdf_xdev_secsize / XB_BSIZE)), (uint64_t)p_blkcnt);
29768863SEdward.Pilatowicz@Sun.COM return (EINVAL);
29778863SEdward.Pilatowicz@Sun.COM }
29788863SEdward.Pilatowicz@Sun.COM
29798863SEdward.Pilatowicz@Sun.COM bioinit(dbp);
29808863SEdward.Pilatowicz@Sun.COM dbp->b_flags = B_BUSY;
29818863SEdward.Pilatowicz@Sun.COM dbp->b_un.b_addr = addr;
29828863SEdward.Pilatowicz@Sun.COM dbp->b_bcount = nblk << DEV_BSHIFT;
29838863SEdward.Pilatowicz@Sun.COM dbp->b_blkno = blkno;
29848863SEdward.Pilatowicz@Sun.COM dbp->b_edev = dev;
29858863SEdward.Pilatowicz@Sun.COM dbp->b_private = (void *)(uintptr_t)p_blkst;
29868863SEdward.Pilatowicz@Sun.COM
29878863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
29888863SEdward.Pilatowicz@Sun.COM xdf_bp_push(vdp, dbp);
29898863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
29908863SEdward.Pilatowicz@Sun.COM xdf_io_start(vdp);
29918863SEdward.Pilatowicz@Sun.COM err = xdf_ring_drain(vdp);
29928863SEdward.Pilatowicz@Sun.COM biofini(dbp);
29938863SEdward.Pilatowicz@Sun.COM return (err);
29948863SEdward.Pilatowicz@Sun.COM }
29958863SEdward.Pilatowicz@Sun.COM
29968863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
29978863SEdward.Pilatowicz@Sun.COM static int
xdf_close(dev_t dev,int flag,int otyp,struct cred * credp)29988863SEdward.Pilatowicz@Sun.COM xdf_close(dev_t dev, int flag, int otyp, struct cred *credp)
29998863SEdward.Pilatowicz@Sun.COM {
30008863SEdward.Pilatowicz@Sun.COM minor_t minor;
30018863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
30028863SEdward.Pilatowicz@Sun.COM int part;
30038863SEdward.Pilatowicz@Sun.COM ulong_t parbit;
30048863SEdward.Pilatowicz@Sun.COM
30058863SEdward.Pilatowicz@Sun.COM minor = getminor(dev);
30068863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
30078863SEdward.Pilatowicz@Sun.COM return (ENXIO);
30088863SEdward.Pilatowicz@Sun.COM
30098863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
30108863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
30118863SEdward.Pilatowicz@Sun.COM if (!xdf_isopen(vdp, part)) {
30128863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30138863SEdward.Pilatowicz@Sun.COM return (ENXIO);
30148863SEdward.Pilatowicz@Sun.COM }
30158863SEdward.Pilatowicz@Sun.COM parbit = 1 << part;
30168863SEdward.Pilatowicz@Sun.COM
30178863SEdward.Pilatowicz@Sun.COM ASSERT((vdp->xdf_vd_open[otyp] & parbit) != 0);
30188863SEdward.Pilatowicz@Sun.COM if (otyp == OTYP_LYR) {
30198863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_vd_lyropen[part] > 0);
30208863SEdward.Pilatowicz@Sun.COM if (--vdp->xdf_vd_lyropen[part] == 0)
30218863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_open[otyp] &= ~parbit;
30228863SEdward.Pilatowicz@Sun.COM } else {
30238863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_open[otyp] &= ~parbit;
30248863SEdward.Pilatowicz@Sun.COM }
30258863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_exclopen &= ~parbit;
30268863SEdward.Pilatowicz@Sun.COM
30278863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30288863SEdward.Pilatowicz@Sun.COM return (0);
30298863SEdward.Pilatowicz@Sun.COM }
30308863SEdward.Pilatowicz@Sun.COM
30318863SEdward.Pilatowicz@Sun.COM static int
xdf_open(dev_t * devp,int flag,int otyp,cred_t * credp)30328863SEdward.Pilatowicz@Sun.COM xdf_open(dev_t *devp, int flag, int otyp, cred_t *credp)
30338863SEdward.Pilatowicz@Sun.COM {
30348863SEdward.Pilatowicz@Sun.COM minor_t minor;
30358863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
30368863SEdward.Pilatowicz@Sun.COM int part;
30378863SEdward.Pilatowicz@Sun.COM ulong_t parbit;
30388863SEdward.Pilatowicz@Sun.COM diskaddr_t p_blkct = 0;
30398863SEdward.Pilatowicz@Sun.COM boolean_t firstopen;
30408863SEdward.Pilatowicz@Sun.COM boolean_t nodelay;
30418863SEdward.Pilatowicz@Sun.COM
30428863SEdward.Pilatowicz@Sun.COM minor = getminor(*devp);
30438863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
30448863SEdward.Pilatowicz@Sun.COM return (ENXIO);
30458863SEdward.Pilatowicz@Sun.COM
30468863SEdward.Pilatowicz@Sun.COM nodelay = (flag & (FNDELAY | FNONBLOCK));
30478863SEdward.Pilatowicz@Sun.COM
30488863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: opening\n", vdp->xdf_addr));
30498863SEdward.Pilatowicz@Sun.COM
30508863SEdward.Pilatowicz@Sun.COM /* do cv_wait until connected or failed */
30518863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
30528863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
30538863SEdward.Pilatowicz@Sun.COM if (!nodelay && (xdf_connect_locked(vdp, B_TRUE) != XD_READY)) {
30548863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30558863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
30568863SEdward.Pilatowicz@Sun.COM return (ENXIO);
30578863SEdward.Pilatowicz@Sun.COM }
30588863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
30598863SEdward.Pilatowicz@Sun.COM
30608863SEdward.Pilatowicz@Sun.COM if ((flag & FWRITE) && XD_IS_RO(vdp)) {
30618863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30628863SEdward.Pilatowicz@Sun.COM return (EROFS);
30638863SEdward.Pilatowicz@Sun.COM }
30648863SEdward.Pilatowicz@Sun.COM
30658863SEdward.Pilatowicz@Sun.COM part = XDF_PART(minor);
30668863SEdward.Pilatowicz@Sun.COM parbit = 1 << part;
30678863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_vd_exclopen & parbit) ||
30688863SEdward.Pilatowicz@Sun.COM ((flag & FEXCL) && xdf_isopen(vdp, part))) {
30698863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30708863SEdward.Pilatowicz@Sun.COM return (EBUSY);
30718863SEdward.Pilatowicz@Sun.COM }
30728863SEdward.Pilatowicz@Sun.COM
30738863SEdward.Pilatowicz@Sun.COM /* are we the first one to open this node? */
30748863SEdward.Pilatowicz@Sun.COM firstopen = !xdf_isopen(vdp, -1);
30758863SEdward.Pilatowicz@Sun.COM
30768863SEdward.Pilatowicz@Sun.COM if (otyp == OTYP_LYR)
30778863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_lyropen[part]++;
30788863SEdward.Pilatowicz@Sun.COM
30798863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_open[otyp] |= parbit;
30808863SEdward.Pilatowicz@Sun.COM
30818863SEdward.Pilatowicz@Sun.COM if (flag & FEXCL)
30828863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_exclopen |= parbit;
30838863SEdward.Pilatowicz@Sun.COM
30848863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
30858863SEdward.Pilatowicz@Sun.COM
30868863SEdward.Pilatowicz@Sun.COM /* force a re-validation */
30878863SEdward.Pilatowicz@Sun.COM if (firstopen)
30888863SEdward.Pilatowicz@Sun.COM cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
30898863SEdward.Pilatowicz@Sun.COM
30908863SEdward.Pilatowicz@Sun.COM /* If this is a non-blocking open then we're done */
30918863SEdward.Pilatowicz@Sun.COM if (nodelay)
30928863SEdward.Pilatowicz@Sun.COM return (0);
30938863SEdward.Pilatowicz@Sun.COM
30948863SEdward.Pilatowicz@Sun.COM /*
30958863SEdward.Pilatowicz@Sun.COM * This is a blocking open, so we require:
30968863SEdward.Pilatowicz@Sun.COM * - that the disk have a valid label on it
30978863SEdward.Pilatowicz@Sun.COM * - that the size of the partition that we're opening is non-zero
30988863SEdward.Pilatowicz@Sun.COM */
30998863SEdward.Pilatowicz@Sun.COM if ((cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
31008863SEdward.Pilatowicz@Sun.COM NULL, NULL, NULL, NULL) != 0) || (p_blkct == 0)) {
31018863SEdward.Pilatowicz@Sun.COM (void) xdf_close(*devp, flag, otyp, credp);
31028863SEdward.Pilatowicz@Sun.COM return (ENXIO);
31038863SEdward.Pilatowicz@Sun.COM }
31048863SEdward.Pilatowicz@Sun.COM
31058863SEdward.Pilatowicz@Sun.COM return (0);
31068863SEdward.Pilatowicz@Sun.COM }
31078863SEdward.Pilatowicz@Sun.COM
31088863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
31098863SEdward.Pilatowicz@Sun.COM static void
xdf_watch_hp_status_cb(dev_info_t * dip,const char * path,void * arg)31108863SEdward.Pilatowicz@Sun.COM xdf_watch_hp_status_cb(dev_info_t *dip, const char *path, void *arg)
31118863SEdward.Pilatowicz@Sun.COM {
31128863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
31138863SEdward.Pilatowicz@Sun.COM cv_broadcast(&vdp->xdf_hp_status_cv);
31148863SEdward.Pilatowicz@Sun.COM }
31158863SEdward.Pilatowicz@Sun.COM
31168863SEdward.Pilatowicz@Sun.COM static int
xdf_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)31178863SEdward.Pilatowicz@Sun.COM xdf_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int flags,
31188863SEdward.Pilatowicz@Sun.COM char *name, caddr_t valuep, int *lengthp)
31198863SEdward.Pilatowicz@Sun.COM {
31208863SEdward.Pilatowicz@Sun.COM xdf_t *vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
31218863SEdward.Pilatowicz@Sun.COM
31228863SEdward.Pilatowicz@Sun.COM /*
31238863SEdward.Pilatowicz@Sun.COM * Sanity check that if a dev_t or dip were specified that they
31248863SEdward.Pilatowicz@Sun.COM * correspond to this device driver. On debug kernels we'll
31258863SEdward.Pilatowicz@Sun.COM * panic and on non-debug kernels we'll return failure.
31268863SEdward.Pilatowicz@Sun.COM */
31278863SEdward.Pilatowicz@Sun.COM ASSERT(ddi_driver_major(dip) == xdf_major);
31288863SEdward.Pilatowicz@Sun.COM ASSERT((dev == DDI_DEV_T_ANY) || (getmajor(dev) == xdf_major));
31298863SEdward.Pilatowicz@Sun.COM if ((ddi_driver_major(dip) != xdf_major) ||
31308863SEdward.Pilatowicz@Sun.COM ((dev != DDI_DEV_T_ANY) && (getmajor(dev) != xdf_major)))
31318863SEdward.Pilatowicz@Sun.COM return (DDI_PROP_NOT_FOUND);
31328863SEdward.Pilatowicz@Sun.COM
31338863SEdward.Pilatowicz@Sun.COM if (vdp == NULL)
31348863SEdward.Pilatowicz@Sun.COM return (ddi_prop_op(dev, dip, prop_op, flags,
31358863SEdward.Pilatowicz@Sun.COM name, valuep, lengthp));
31368863SEdward.Pilatowicz@Sun.COM
31378863SEdward.Pilatowicz@Sun.COM return (cmlb_prop_op(vdp->xdf_vd_lbl,
31388863SEdward.Pilatowicz@Sun.COM dev, dip, prop_op, flags, name, valuep, lengthp,
31398863SEdward.Pilatowicz@Sun.COM XDF_PART(getminor(dev)), NULL));
31408863SEdward.Pilatowicz@Sun.COM }
31418863SEdward.Pilatowicz@Sun.COM
31428863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
31438863SEdward.Pilatowicz@Sun.COM static int
xdf_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)31448863SEdward.Pilatowicz@Sun.COM xdf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
31458863SEdward.Pilatowicz@Sun.COM {
31468863SEdward.Pilatowicz@Sun.COM int instance = XDF_INST(getminor((dev_t)arg));
31478863SEdward.Pilatowicz@Sun.COM xdf_t *vbdp;
31488863SEdward.Pilatowicz@Sun.COM
31498863SEdward.Pilatowicz@Sun.COM switch (cmd) {
31508863SEdward.Pilatowicz@Sun.COM case DDI_INFO_DEVT2DEVINFO:
31518863SEdward.Pilatowicz@Sun.COM if ((vbdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL) {
31528863SEdward.Pilatowicz@Sun.COM *rp = NULL;
31538863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
31548863SEdward.Pilatowicz@Sun.COM }
31558863SEdward.Pilatowicz@Sun.COM *rp = vbdp->xdf_dip;
31568863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
31578863SEdward.Pilatowicz@Sun.COM
31588863SEdward.Pilatowicz@Sun.COM case DDI_INFO_DEVT2INSTANCE:
31598863SEdward.Pilatowicz@Sun.COM *rp = (void *)(uintptr_t)instance;
31608863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
31618863SEdward.Pilatowicz@Sun.COM
31628863SEdward.Pilatowicz@Sun.COM default:
31638863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
31648863SEdward.Pilatowicz@Sun.COM }
31658863SEdward.Pilatowicz@Sun.COM }
31668863SEdward.Pilatowicz@Sun.COM
31678863SEdward.Pilatowicz@Sun.COM /*ARGSUSED*/
31688863SEdward.Pilatowicz@Sun.COM static int
xdf_resume(dev_info_t * dip)31698863SEdward.Pilatowicz@Sun.COM xdf_resume(dev_info_t *dip)
31708863SEdward.Pilatowicz@Sun.COM {
31718863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
31728863SEdward.Pilatowicz@Sun.COM char *oename;
31738863SEdward.Pilatowicz@Sun.COM
31748863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
31758863SEdward.Pilatowicz@Sun.COM goto err;
31768863SEdward.Pilatowicz@Sun.COM
31778863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
31788863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_resume\n", vdp->xdf_addr);
31798863SEdward.Pilatowicz@Sun.COM
31808863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
31818863SEdward.Pilatowicz@Sun.COM
31828863SEdward.Pilatowicz@Sun.COM if (xvdi_resume(dip) != DDI_SUCCESS) {
31838863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
31848863SEdward.Pilatowicz@Sun.COM goto err;
31858863SEdward.Pilatowicz@Sun.COM }
31868863SEdward.Pilatowicz@Sun.COM
31878863SEdward.Pilatowicz@Sun.COM if (((oename = xvdi_get_oename(dip)) == NULL) ||
31888863SEdward.Pilatowicz@Sun.COM (xvdi_add_xb_watch_handler(dip, oename, XBP_HP_STATUS,
31898863SEdward.Pilatowicz@Sun.COM xdf_watch_hp_status_cb, NULL) != DDI_SUCCESS)) {
31908863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
31918863SEdward.Pilatowicz@Sun.COM goto err;
31928863SEdward.Pilatowicz@Sun.COM }
31938863SEdward.Pilatowicz@Sun.COM
31948863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
31958863SEdward.Pilatowicz@Sun.COM ASSERT(vdp->xdf_state != XD_READY);
31968863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, XD_UNKNOWN);
31978863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
31988863SEdward.Pilatowicz@Sun.COM
31998863SEdward.Pilatowicz@Sun.COM if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
32008863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
32018863SEdward.Pilatowicz@Sun.COM goto err;
32028863SEdward.Pilatowicz@Sun.COM }
32038863SEdward.Pilatowicz@Sun.COM
32048863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
32058863SEdward.Pilatowicz@Sun.COM
32068863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
32078863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_resume: done\n", vdp->xdf_addr);
32088863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
32098863SEdward.Pilatowicz@Sun.COM err:
32108863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
32118863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_resume: fail\n", vdp->xdf_addr);
32128863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32138863SEdward.Pilatowicz@Sun.COM }
32148863SEdward.Pilatowicz@Sun.COM
32158863SEdward.Pilatowicz@Sun.COM static int
xdf_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)32168863SEdward.Pilatowicz@Sun.COM xdf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
32178863SEdward.Pilatowicz@Sun.COM {
32188863SEdward.Pilatowicz@Sun.COM int n, instance = ddi_get_instance(dip);
32198863SEdward.Pilatowicz@Sun.COM ddi_iblock_cookie_t ibc, softibc;
32208863SEdward.Pilatowicz@Sun.COM boolean_t dev_iscd = B_FALSE;
32218863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
32228863SEdward.Pilatowicz@Sun.COM char *oename, *xsname, *str;
32238863SEdward.Pilatowicz@Sun.COM
32248863SEdward.Pilatowicz@Sun.COM if ((n = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_NOTPROM,
32258863SEdward.Pilatowicz@Sun.COM "xdf_debug", 0)) != 0)
32268863SEdward.Pilatowicz@Sun.COM xdf_debug = n;
32278863SEdward.Pilatowicz@Sun.COM
32288863SEdward.Pilatowicz@Sun.COM switch (cmd) {
32298863SEdward.Pilatowicz@Sun.COM case DDI_RESUME:
32308863SEdward.Pilatowicz@Sun.COM return (xdf_resume(dip));
32318863SEdward.Pilatowicz@Sun.COM case DDI_ATTACH:
32328863SEdward.Pilatowicz@Sun.COM break;
32338863SEdward.Pilatowicz@Sun.COM default:
32348863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32358863SEdward.Pilatowicz@Sun.COM }
32368863SEdward.Pilatowicz@Sun.COM /* DDI_ATTACH */
32378863SEdward.Pilatowicz@Sun.COM
32388863SEdward.Pilatowicz@Sun.COM if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
32398863SEdward.Pilatowicz@Sun.COM ((oename = xvdi_get_oename(dip)) == NULL))
32408863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32418863SEdward.Pilatowicz@Sun.COM
32428863SEdward.Pilatowicz@Sun.COM /*
32438863SEdward.Pilatowicz@Sun.COM * Disable auto-detach. This is necessary so that we don't get
32448863SEdward.Pilatowicz@Sun.COM * detached while we're disconnected from the back end.
32458863SEdward.Pilatowicz@Sun.COM */
32468863SEdward.Pilatowicz@Sun.COM if ((ddi_prop_update_int(DDI_DEV_T_NONE, dip,
32478863SEdward.Pilatowicz@Sun.COM DDI_NO_AUTODETACH, 1) != DDI_PROP_SUCCESS))
32488863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32498863SEdward.Pilatowicz@Sun.COM
32508863SEdward.Pilatowicz@Sun.COM /* driver handles kernel-issued IOCTLs */
32518863SEdward.Pilatowicz@Sun.COM if (ddi_prop_create(DDI_DEV_T_NONE, dip,
32528863SEdward.Pilatowicz@Sun.COM DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS)
32538863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32548863SEdward.Pilatowicz@Sun.COM
32558863SEdward.Pilatowicz@Sun.COM if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS)
32568863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32578863SEdward.Pilatowicz@Sun.COM
32588863SEdward.Pilatowicz@Sun.COM if (ddi_get_soft_iblock_cookie(dip,
32598863SEdward.Pilatowicz@Sun.COM DDI_SOFTINT_LOW, &softibc) != DDI_SUCCESS)
32608863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32618863SEdward.Pilatowicz@Sun.COM
32628863SEdward.Pilatowicz@Sun.COM if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0) {
32638863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: cannot read device-type",
32648863SEdward.Pilatowicz@Sun.COM ddi_get_name_addr(dip));
32658863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32668863SEdward.Pilatowicz@Sun.COM }
32678863SEdward.Pilatowicz@Sun.COM if (strcmp(str, XBV_DEV_TYPE_CD) == 0)
32688863SEdward.Pilatowicz@Sun.COM dev_iscd = B_TRUE;
32698863SEdward.Pilatowicz@Sun.COM strfree(str);
32708863SEdward.Pilatowicz@Sun.COM
32718863SEdward.Pilatowicz@Sun.COM if (ddi_soft_state_zalloc(xdf_ssp, instance) != DDI_SUCCESS)
32728863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
32738863SEdward.Pilatowicz@Sun.COM
32748863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: attaching\n", ddi_get_name_addr(dip)));
32758863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, instance);
32768863SEdward.Pilatowicz@Sun.COM ddi_set_driver_private(dip, vdp);
32778863SEdward.Pilatowicz@Sun.COM vdp->xdf_dip = dip;
32788863SEdward.Pilatowicz@Sun.COM vdp->xdf_addr = ddi_get_name_addr(dip);
32798863SEdward.Pilatowicz@Sun.COM vdp->xdf_suspending = B_FALSE;
32808863SEdward.Pilatowicz@Sun.COM vdp->xdf_media_req_supported = B_FALSE;
32818863SEdward.Pilatowicz@Sun.COM vdp->xdf_peer = INVALID_DOMID;
32828863SEdward.Pilatowicz@Sun.COM vdp->xdf_evtchn = INVALID_EVTCHN;
32838863SEdward.Pilatowicz@Sun.COM list_create(&vdp->xdf_vreq_act, sizeof (v_req_t),
32848863SEdward.Pilatowicz@Sun.COM offsetof(v_req_t, v_link));
32858863SEdward.Pilatowicz@Sun.COM cv_init(&vdp->xdf_dev_cv, NULL, CV_DEFAULT, NULL);
32868863SEdward.Pilatowicz@Sun.COM cv_init(&vdp->xdf_hp_status_cv, NULL, CV_DEFAULT, NULL);
32878863SEdward.Pilatowicz@Sun.COM cv_init(&vdp->xdf_mstate_cv, NULL, CV_DEFAULT, NULL);
32888863SEdward.Pilatowicz@Sun.COM mutex_init(&vdp->xdf_dev_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32898863SEdward.Pilatowicz@Sun.COM mutex_init(&vdp->xdf_cb_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32908863SEdward.Pilatowicz@Sun.COM mutex_init(&vdp->xdf_iostat_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32918863SEdward.Pilatowicz@Sun.COM vdp->xdf_cmbl_reattach = B_TRUE;
32928863SEdward.Pilatowicz@Sun.COM if (dev_iscd) {
32938863SEdward.Pilatowicz@Sun.COM vdp->xdf_dinfo |= VDISK_CDROM;
32948863SEdward.Pilatowicz@Sun.COM vdp->xdf_mstate = DKIO_EJECTED;
32958863SEdward.Pilatowicz@Sun.COM } else {
32968863SEdward.Pilatowicz@Sun.COM vdp->xdf_mstate = DKIO_NONE;
32978863SEdward.Pilatowicz@Sun.COM }
32988863SEdward.Pilatowicz@Sun.COM
32998863SEdward.Pilatowicz@Sun.COM if ((vdp->xdf_ready_tq = ddi_taskq_create(dip, "xdf_ready_tq",
33008863SEdward.Pilatowicz@Sun.COM 1, TASKQ_DEFAULTPRI, 0)) == NULL)
33018863SEdward.Pilatowicz@Sun.COM goto errout0;
33028863SEdward.Pilatowicz@Sun.COM
33038863SEdward.Pilatowicz@Sun.COM if (xvdi_add_xb_watch_handler(dip, oename, XBP_HP_STATUS,
33048863SEdward.Pilatowicz@Sun.COM xdf_watch_hp_status_cb, NULL) != DDI_SUCCESS)
33058863SEdward.Pilatowicz@Sun.COM goto errout0;
33068863SEdward.Pilatowicz@Sun.COM
33078863SEdward.Pilatowicz@Sun.COM if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &vdp->xdf_softintr_id,
33088863SEdward.Pilatowicz@Sun.COM &softibc, NULL, xdf_iorestart, (caddr_t)vdp) != DDI_SUCCESS) {
33098863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: failed to add softintr",
33108863SEdward.Pilatowicz@Sun.COM ddi_get_name_addr(dip));
33118863SEdward.Pilatowicz@Sun.COM goto errout0;
33128863SEdward.Pilatowicz@Sun.COM }
33138863SEdward.Pilatowicz@Sun.COM
33148863SEdward.Pilatowicz@Sun.COM /*
33158863SEdward.Pilatowicz@Sun.COM * Initialize the physical geometry stucture. Note that currently
33168863SEdward.Pilatowicz@Sun.COM * we don't know the size of the backend device so the number
33178863SEdward.Pilatowicz@Sun.COM * of blocks on the device will be initialized to zero. Once
33188863SEdward.Pilatowicz@Sun.COM * we connect to the backend device we'll update the physical
33198863SEdward.Pilatowicz@Sun.COM * geometry to reflect the real size of the device.
33208863SEdward.Pilatowicz@Sun.COM */
33218863SEdward.Pilatowicz@Sun.COM xdf_synthetic_pgeom(dip, &vdp->xdf_pgeom);
33228863SEdward.Pilatowicz@Sun.COM vdp->xdf_pgeom_fixed = B_FALSE;
33238863SEdward.Pilatowicz@Sun.COM
33248863SEdward.Pilatowicz@Sun.COM /*
33258863SEdward.Pilatowicz@Sun.COM * create default device minor nodes: non-removable disk
33268863SEdward.Pilatowicz@Sun.COM * we will adjust minor nodes after we are connected w/ backend
33278863SEdward.Pilatowicz@Sun.COM */
33288863SEdward.Pilatowicz@Sun.COM cmlb_alloc_handle(&vdp->xdf_vd_lbl);
33298863SEdward.Pilatowicz@Sun.COM if (xdf_cmlb_attach(vdp) != 0) {
33308863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN,
33318863SEdward.Pilatowicz@Sun.COM "xdf@%s: attach failed, cmlb attach failed",
33328863SEdward.Pilatowicz@Sun.COM ddi_get_name_addr(dip));
33338863SEdward.Pilatowicz@Sun.COM goto errout0;
33348863SEdward.Pilatowicz@Sun.COM }
33358863SEdward.Pilatowicz@Sun.COM
33368863SEdward.Pilatowicz@Sun.COM /*
33378863SEdward.Pilatowicz@Sun.COM * We ship with cache-enabled disks
33388863SEdward.Pilatowicz@Sun.COM */
33398863SEdward.Pilatowicz@Sun.COM vdp->xdf_wce = B_TRUE;
33408863SEdward.Pilatowicz@Sun.COM
33418863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
33428863SEdward.Pilatowicz@Sun.COM /* Watch backend XenbusState change */
33438863SEdward.Pilatowicz@Sun.COM if (xvdi_add_event_handler(dip,
33448863SEdward.Pilatowicz@Sun.COM XS_OE_STATE, xdf_oe_change, NULL) != DDI_SUCCESS) {
33458863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
33468863SEdward.Pilatowicz@Sun.COM goto errout0;
33478863SEdward.Pilatowicz@Sun.COM }
33488863SEdward.Pilatowicz@Sun.COM
33498863SEdward.Pilatowicz@Sun.COM if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
33508863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: start connection failed",
33518863SEdward.Pilatowicz@Sun.COM ddi_get_name_addr(dip));
33528863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
33538863SEdward.Pilatowicz@Sun.COM goto errout1;
33548863SEdward.Pilatowicz@Sun.COM }
33558863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
33568863SEdward.Pilatowicz@Sun.COM
33578863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
33588863SEdward.Pilatowicz@Sun.COM
33598863SEdward.Pilatowicz@Sun.COM xdf_hvm_add(dip);
33608863SEdward.Pilatowicz@Sun.COM
33618863SEdward.Pilatowicz@Sun.COM /* Report our version to dom0. */
336210175SStuart.Maybee@Sun.COM if (xenbus_printf(XBT_NULL, "guest/xdf", "version", "%d",
33638863SEdward.Pilatowicz@Sun.COM HVMPV_XDF_VERS))
33648863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf: couldn't write version\n");
33658863SEdward.Pilatowicz@Sun.COM
33668863SEdward.Pilatowicz@Sun.COM #else /* !XPV_HVM_DRIVER */
33678863SEdward.Pilatowicz@Sun.COM
33688863SEdward.Pilatowicz@Sun.COM /* create kstat for iostat(1M) */
33698863SEdward.Pilatowicz@Sun.COM if (xdf_kstat_create(dip, "xdf", instance) != 0) {
33708863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: failed to create kstat",
33718863SEdward.Pilatowicz@Sun.COM ddi_get_name_addr(dip));
33728863SEdward.Pilatowicz@Sun.COM goto errout1;
33738863SEdward.Pilatowicz@Sun.COM }
33748863SEdward.Pilatowicz@Sun.COM
33758863SEdward.Pilatowicz@Sun.COM #endif /* !XPV_HVM_DRIVER */
33768863SEdward.Pilatowicz@Sun.COM
33778863SEdward.Pilatowicz@Sun.COM ddi_report_dev(dip);
33788863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: attached\n", vdp->xdf_addr));
33798863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
33808863SEdward.Pilatowicz@Sun.COM
33818863SEdward.Pilatowicz@Sun.COM errout1:
33828863SEdward.Pilatowicz@Sun.COM (void) xvdi_switch_state(vdp->xdf_dip, XBT_NULL, XenbusStateClosed);
33838863SEdward.Pilatowicz@Sun.COM xvdi_remove_event_handler(dip, XS_OE_STATE);
33848863SEdward.Pilatowicz@Sun.COM errout0:
33858863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_vd_lbl != NULL) {
33868863SEdward.Pilatowicz@Sun.COM cmlb_detach(vdp->xdf_vd_lbl, NULL);
33878863SEdward.Pilatowicz@Sun.COM cmlb_free_handle(&vdp->xdf_vd_lbl);
33888863SEdward.Pilatowicz@Sun.COM vdp->xdf_vd_lbl = NULL;
33898863SEdward.Pilatowicz@Sun.COM }
33908863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_softintr_id != NULL)
33918863SEdward.Pilatowicz@Sun.COM ddi_remove_softintr(vdp->xdf_softintr_id);
33928863SEdward.Pilatowicz@Sun.COM xvdi_remove_xb_watch_handlers(dip);
33938863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_ready_tq != NULL)
33948863SEdward.Pilatowicz@Sun.COM ddi_taskq_destroy(vdp->xdf_ready_tq);
33958863SEdward.Pilatowicz@Sun.COM mutex_destroy(&vdp->xdf_cb_lk);
33968863SEdward.Pilatowicz@Sun.COM mutex_destroy(&vdp->xdf_dev_lk);
33978863SEdward.Pilatowicz@Sun.COM cv_destroy(&vdp->xdf_dev_cv);
33988863SEdward.Pilatowicz@Sun.COM cv_destroy(&vdp->xdf_hp_status_cv);
33998863SEdward.Pilatowicz@Sun.COM ddi_soft_state_free(xdf_ssp, instance);
34008863SEdward.Pilatowicz@Sun.COM ddi_set_driver_private(dip, NULL);
34018863SEdward.Pilatowicz@Sun.COM ddi_prop_remove_all(dip);
34028863SEdward.Pilatowicz@Sun.COM cmn_err(CE_WARN, "xdf@%s: attach failed", ddi_get_name_addr(dip));
34038863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
34048863SEdward.Pilatowicz@Sun.COM }
34058863SEdward.Pilatowicz@Sun.COM
34068863SEdward.Pilatowicz@Sun.COM static int
xdf_suspend(dev_info_t * dip)34078863SEdward.Pilatowicz@Sun.COM xdf_suspend(dev_info_t *dip)
34088863SEdward.Pilatowicz@Sun.COM {
34098863SEdward.Pilatowicz@Sun.COM int instance = ddi_get_instance(dip);
34108863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
34118863SEdward.Pilatowicz@Sun.COM
34128863SEdward.Pilatowicz@Sun.COM if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
34138863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
34148863SEdward.Pilatowicz@Sun.COM
34158863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
34168863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_suspend\n", vdp->xdf_addr);
34178863SEdward.Pilatowicz@Sun.COM
34188863SEdward.Pilatowicz@Sun.COM xvdi_suspend(dip);
34198863SEdward.Pilatowicz@Sun.COM
34208863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
34218863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_dev_lk);
34228863SEdward.Pilatowicz@Sun.COM
34238863SEdward.Pilatowicz@Sun.COM vdp->xdf_suspending = B_TRUE;
34248863SEdward.Pilatowicz@Sun.COM xdf_ring_destroy(vdp);
34258863SEdward.Pilatowicz@Sun.COM xdf_set_state(vdp, XD_SUSPEND);
34268863SEdward.Pilatowicz@Sun.COM vdp->xdf_suspending = B_FALSE;
34278863SEdward.Pilatowicz@Sun.COM
34288863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_dev_lk);
34298863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
34308863SEdward.Pilatowicz@Sun.COM
34318863SEdward.Pilatowicz@Sun.COM if (xdf_debug & SUSRES_DBG)
34328863SEdward.Pilatowicz@Sun.COM xen_printf("xdf@%s: xdf_suspend: done\n", vdp->xdf_addr);
34338863SEdward.Pilatowicz@Sun.COM
34348863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
34358863SEdward.Pilatowicz@Sun.COM }
34368863SEdward.Pilatowicz@Sun.COM
34378863SEdward.Pilatowicz@Sun.COM static int
xdf_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)34388863SEdward.Pilatowicz@Sun.COM xdf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
34398863SEdward.Pilatowicz@Sun.COM {
34408863SEdward.Pilatowicz@Sun.COM xdf_t *vdp;
34418863SEdward.Pilatowicz@Sun.COM int instance;
34428863SEdward.Pilatowicz@Sun.COM
34438863SEdward.Pilatowicz@Sun.COM switch (cmd) {
34448863SEdward.Pilatowicz@Sun.COM
34458863SEdward.Pilatowicz@Sun.COM case DDI_PM_SUSPEND:
34468863SEdward.Pilatowicz@Sun.COM break;
34478863SEdward.Pilatowicz@Sun.COM
34488863SEdward.Pilatowicz@Sun.COM case DDI_SUSPEND:
34498863SEdward.Pilatowicz@Sun.COM return (xdf_suspend(dip));
34508863SEdward.Pilatowicz@Sun.COM
34518863SEdward.Pilatowicz@Sun.COM case DDI_DETACH:
34528863SEdward.Pilatowicz@Sun.COM break;
34538863SEdward.Pilatowicz@Sun.COM
34548863SEdward.Pilatowicz@Sun.COM default:
34558863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
34568863SEdward.Pilatowicz@Sun.COM }
34578863SEdward.Pilatowicz@Sun.COM
34588863SEdward.Pilatowicz@Sun.COM instance = ddi_get_instance(dip);
34598863SEdward.Pilatowicz@Sun.COM DPRINTF(DDI_DBG, ("xdf@%s: detaching\n", ddi_get_name_addr(dip)));
34608863SEdward.Pilatowicz@Sun.COM vdp = ddi_get_soft_state(xdf_ssp, instance);
34618863SEdward.Pilatowicz@Sun.COM
34628863SEdward.Pilatowicz@Sun.COM if (vdp == NULL)
34638863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
34648863SEdward.Pilatowicz@Sun.COM
34658863SEdward.Pilatowicz@Sun.COM mutex_enter(&vdp->xdf_cb_lk);
34668863SEdward.Pilatowicz@Sun.COM xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
34678863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_state != XD_CLOSED) {
34688863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
34698863SEdward.Pilatowicz@Sun.COM return (DDI_FAILURE);
34708863SEdward.Pilatowicz@Sun.COM }
34718863SEdward.Pilatowicz@Sun.COM mutex_exit(&vdp->xdf_cb_lk);
34728863SEdward.Pilatowicz@Sun.COM
34738863SEdward.Pilatowicz@Sun.COM ASSERT(!ISDMACBON(vdp));
34748863SEdward.Pilatowicz@Sun.COM
34758863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
34768863SEdward.Pilatowicz@Sun.COM xdf_hvm_rm(dip);
34778863SEdward.Pilatowicz@Sun.COM #endif /* XPV_HVM_DRIVER */
34788863SEdward.Pilatowicz@Sun.COM
34798863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_timeout_id != 0)
34808863SEdward.Pilatowicz@Sun.COM (void) untimeout(vdp->xdf_timeout_id);
34818863SEdward.Pilatowicz@Sun.COM
34828863SEdward.Pilatowicz@Sun.COM xvdi_remove_event_handler(dip, XS_OE_STATE);
34838863SEdward.Pilatowicz@Sun.COM ddi_taskq_destroy(vdp->xdf_ready_tq);
34848863SEdward.Pilatowicz@Sun.COM
34858863SEdward.Pilatowicz@Sun.COM cmlb_detach(vdp->xdf_vd_lbl, NULL);
34868863SEdward.Pilatowicz@Sun.COM cmlb_free_handle(&vdp->xdf_vd_lbl);
34878863SEdward.Pilatowicz@Sun.COM
34888863SEdward.Pilatowicz@Sun.COM /* we'll support backend running in domU later */
34898863SEdward.Pilatowicz@Sun.COM #ifdef DOMU_BACKEND
34908863SEdward.Pilatowicz@Sun.COM (void) xvdi_post_event(dip, XEN_HP_REMOVE);
34918863SEdward.Pilatowicz@Sun.COM #endif
34928863SEdward.Pilatowicz@Sun.COM
34938863SEdward.Pilatowicz@Sun.COM list_destroy(&vdp->xdf_vreq_act);
34948863SEdward.Pilatowicz@Sun.COM ddi_prop_remove_all(dip);
34958863SEdward.Pilatowicz@Sun.COM xdf_kstat_delete(dip);
34968863SEdward.Pilatowicz@Sun.COM ddi_remove_softintr(vdp->xdf_softintr_id);
34978863SEdward.Pilatowicz@Sun.COM xvdi_remove_xb_watch_handlers(dip);
34988863SEdward.Pilatowicz@Sun.COM ddi_set_driver_private(dip, NULL);
34998863SEdward.Pilatowicz@Sun.COM cv_destroy(&vdp->xdf_dev_cv);
35008863SEdward.Pilatowicz@Sun.COM mutex_destroy(&vdp->xdf_cb_lk);
35018863SEdward.Pilatowicz@Sun.COM mutex_destroy(&vdp->xdf_dev_lk);
35028863SEdward.Pilatowicz@Sun.COM if (vdp->xdf_cache_flush_block != NULL)
35039889SLarry.Liu@Sun.COM kmem_free(vdp->xdf_flush_mem, 2 * vdp->xdf_xdev_secsize);
35048863SEdward.Pilatowicz@Sun.COM ddi_soft_state_free(xdf_ssp, instance);
35058863SEdward.Pilatowicz@Sun.COM return (DDI_SUCCESS);
35068863SEdward.Pilatowicz@Sun.COM }
35078863SEdward.Pilatowicz@Sun.COM
35088863SEdward.Pilatowicz@Sun.COM /*
35098863SEdward.Pilatowicz@Sun.COM * Driver linkage structures.
35108863SEdward.Pilatowicz@Sun.COM */
35118863SEdward.Pilatowicz@Sun.COM static struct cb_ops xdf_cbops = {
35128863SEdward.Pilatowicz@Sun.COM xdf_open,
35138863SEdward.Pilatowicz@Sun.COM xdf_close,
35148863SEdward.Pilatowicz@Sun.COM xdf_strategy,
35158863SEdward.Pilatowicz@Sun.COM nodev,
35168863SEdward.Pilatowicz@Sun.COM xdf_dump,
35178863SEdward.Pilatowicz@Sun.COM xdf_read,
35188863SEdward.Pilatowicz@Sun.COM xdf_write,
35198863SEdward.Pilatowicz@Sun.COM xdf_ioctl,
35208863SEdward.Pilatowicz@Sun.COM nodev,
35218863SEdward.Pilatowicz@Sun.COM nodev,
35228863SEdward.Pilatowicz@Sun.COM nodev,
35238863SEdward.Pilatowicz@Sun.COM nochpoll,
35248863SEdward.Pilatowicz@Sun.COM xdf_prop_op,
35258863SEdward.Pilatowicz@Sun.COM NULL,
35268863SEdward.Pilatowicz@Sun.COM D_MP | D_NEW | D_64BIT,
35278863SEdward.Pilatowicz@Sun.COM CB_REV,
35288863SEdward.Pilatowicz@Sun.COM xdf_aread,
35298863SEdward.Pilatowicz@Sun.COM xdf_awrite
35308863SEdward.Pilatowicz@Sun.COM };
35318863SEdward.Pilatowicz@Sun.COM
35328863SEdward.Pilatowicz@Sun.COM struct dev_ops xdf_devops = {
35338863SEdward.Pilatowicz@Sun.COM DEVO_REV, /* devo_rev */
35348863SEdward.Pilatowicz@Sun.COM 0, /* devo_refcnt */
35358863SEdward.Pilatowicz@Sun.COM xdf_getinfo, /* devo_getinfo */
35368863SEdward.Pilatowicz@Sun.COM nulldev, /* devo_identify */
35378863SEdward.Pilatowicz@Sun.COM nulldev, /* devo_probe */
35388863SEdward.Pilatowicz@Sun.COM xdf_attach, /* devo_attach */
35398863SEdward.Pilatowicz@Sun.COM xdf_detach, /* devo_detach */
35408863SEdward.Pilatowicz@Sun.COM nodev, /* devo_reset */
35418863SEdward.Pilatowicz@Sun.COM &xdf_cbops, /* devo_cb_ops */
35428863SEdward.Pilatowicz@Sun.COM NULL, /* devo_bus_ops */
35438863SEdward.Pilatowicz@Sun.COM NULL, /* devo_power */
35448863SEdward.Pilatowicz@Sun.COM ddi_quiesce_not_supported, /* devo_quiesce */
35458863SEdward.Pilatowicz@Sun.COM };
35468863SEdward.Pilatowicz@Sun.COM
35478863SEdward.Pilatowicz@Sun.COM /*
35488863SEdward.Pilatowicz@Sun.COM * Module linkage structures.
35498863SEdward.Pilatowicz@Sun.COM */
35508863SEdward.Pilatowicz@Sun.COM static struct modldrv modldrv = {
35518863SEdward.Pilatowicz@Sun.COM &mod_driverops, /* Type of module. This one is a driver */
35528863SEdward.Pilatowicz@Sun.COM "virtual block driver", /* short description */
35538863SEdward.Pilatowicz@Sun.COM &xdf_devops /* driver specific ops */
35548863SEdward.Pilatowicz@Sun.COM };
35558863SEdward.Pilatowicz@Sun.COM
35568863SEdward.Pilatowicz@Sun.COM static struct modlinkage xdf_modlinkage = {
35578863SEdward.Pilatowicz@Sun.COM MODREV_1, (void *)&modldrv, NULL
35588863SEdward.Pilatowicz@Sun.COM };
35598863SEdward.Pilatowicz@Sun.COM
35608863SEdward.Pilatowicz@Sun.COM /*
35618863SEdward.Pilatowicz@Sun.COM * standard module entry points
35628863SEdward.Pilatowicz@Sun.COM */
35638863SEdward.Pilatowicz@Sun.COM int
_init(void)35648863SEdward.Pilatowicz@Sun.COM _init(void)
35658863SEdward.Pilatowicz@Sun.COM {
35668863SEdward.Pilatowicz@Sun.COM int rc;
35678863SEdward.Pilatowicz@Sun.COM
35688863SEdward.Pilatowicz@Sun.COM xdf_major = ddi_name_to_major("xdf");
35698863SEdward.Pilatowicz@Sun.COM if (xdf_major == (major_t)-1)
35708863SEdward.Pilatowicz@Sun.COM return (EINVAL);
35718863SEdward.Pilatowicz@Sun.COM
35728863SEdward.Pilatowicz@Sun.COM if ((rc = ddi_soft_state_init(&xdf_ssp, sizeof (xdf_t), 0)) != 0)
35738863SEdward.Pilatowicz@Sun.COM return (rc);
35748863SEdward.Pilatowicz@Sun.COM
35758863SEdward.Pilatowicz@Sun.COM xdf_vreq_cache = kmem_cache_create("xdf_vreq_cache",
35768863SEdward.Pilatowicz@Sun.COM sizeof (v_req_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
35778863SEdward.Pilatowicz@Sun.COM xdf_gs_cache = kmem_cache_create("xdf_gs_cache",
35788863SEdward.Pilatowicz@Sun.COM sizeof (ge_slot_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
35798863SEdward.Pilatowicz@Sun.COM
35808863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
35818863SEdward.Pilatowicz@Sun.COM xdf_hvm_init();
35828863SEdward.Pilatowicz@Sun.COM #endif /* XPV_HVM_DRIVER */
35838863SEdward.Pilatowicz@Sun.COM
35848863SEdward.Pilatowicz@Sun.COM if ((rc = mod_install(&xdf_modlinkage)) != 0) {
35858863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
35868863SEdward.Pilatowicz@Sun.COM xdf_hvm_fini();
35878863SEdward.Pilatowicz@Sun.COM #endif /* XPV_HVM_DRIVER */
35888863SEdward.Pilatowicz@Sun.COM kmem_cache_destroy(xdf_vreq_cache);
35898863SEdward.Pilatowicz@Sun.COM kmem_cache_destroy(xdf_gs_cache);
35908863SEdward.Pilatowicz@Sun.COM ddi_soft_state_fini(&xdf_ssp);
35918863SEdward.Pilatowicz@Sun.COM return (rc);
35928863SEdward.Pilatowicz@Sun.COM }
35938863SEdward.Pilatowicz@Sun.COM
35948863SEdward.Pilatowicz@Sun.COM return (rc);
35958863SEdward.Pilatowicz@Sun.COM }
35968863SEdward.Pilatowicz@Sun.COM
35978863SEdward.Pilatowicz@Sun.COM int
_fini(void)35988863SEdward.Pilatowicz@Sun.COM _fini(void)
35998863SEdward.Pilatowicz@Sun.COM {
36008863SEdward.Pilatowicz@Sun.COM int err;
36018863SEdward.Pilatowicz@Sun.COM if ((err = mod_remove(&xdf_modlinkage)) != 0)
36028863SEdward.Pilatowicz@Sun.COM return (err);
36038863SEdward.Pilatowicz@Sun.COM
36048863SEdward.Pilatowicz@Sun.COM #if defined(XPV_HVM_DRIVER)
36058863SEdward.Pilatowicz@Sun.COM xdf_hvm_fini();
36068863SEdward.Pilatowicz@Sun.COM #endif /* XPV_HVM_DRIVER */
36078863SEdward.Pilatowicz@Sun.COM
36088863SEdward.Pilatowicz@Sun.COM kmem_cache_destroy(xdf_vreq_cache);
36098863SEdward.Pilatowicz@Sun.COM kmem_cache_destroy(xdf_gs_cache);
36108863SEdward.Pilatowicz@Sun.COM ddi_soft_state_fini(&xdf_ssp);
36118863SEdward.Pilatowicz@Sun.COM
36128863SEdward.Pilatowicz@Sun.COM return (0);
36138863SEdward.Pilatowicz@Sun.COM }
36148863SEdward.Pilatowicz@Sun.COM
36158863SEdward.Pilatowicz@Sun.COM int
_info(struct modinfo * modinfop)36168863SEdward.Pilatowicz@Sun.COM _info(struct modinfo *modinfop)
36178863SEdward.Pilatowicz@Sun.COM {
36188863SEdward.Pilatowicz@Sun.COM return (mod_info(&xdf_modlinkage, modinfop));
36198863SEdward.Pilatowicz@Sun.COM }
3620