1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * Intel PC root nexus driver 31*0Sstevel@tonic-gate * based on sun4c root nexus driver 1.30 32*0Sstevel@tonic-gate */ 33*0Sstevel@tonic-gate 34*0Sstevel@tonic-gate #include <sys/sysmacros.h> 35*0Sstevel@tonic-gate #include <sys/conf.h> 36*0Sstevel@tonic-gate #include <sys/autoconf.h> 37*0Sstevel@tonic-gate #include <sys/sysmacros.h> 38*0Sstevel@tonic-gate #include <sys/debug.h> 39*0Sstevel@tonic-gate #include <sys/psw.h> 40*0Sstevel@tonic-gate #include <sys/ddidmareq.h> 41*0Sstevel@tonic-gate #include <sys/promif.h> 42*0Sstevel@tonic-gate #include <sys/devops.h> 43*0Sstevel@tonic-gate #include <sys/kmem.h> 44*0Sstevel@tonic-gate #include <sys/cmn_err.h> 45*0Sstevel@tonic-gate #include <vm/seg.h> 46*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 47*0Sstevel@tonic-gate #include <vm/seg_dev.h> 48*0Sstevel@tonic-gate #include <sys/vmem.h> 49*0Sstevel@tonic-gate #include <sys/mman.h> 50*0Sstevel@tonic-gate #include <vm/hat.h> 51*0Sstevel@tonic-gate #include <vm/as.h> 52*0Sstevel@tonic-gate #include <vm/page.h> 53*0Sstevel@tonic-gate #include <sys/avintr.h> 54*0Sstevel@tonic-gate #include <sys/errno.h> 55*0Sstevel@tonic-gate #include <sys/modctl.h> 56*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 57*0Sstevel@tonic-gate #include <sys/sunddi.h> 58*0Sstevel@tonic-gate #include <sys/sunndi.h> 59*0Sstevel@tonic-gate #include <sys/psm.h> 60*0Sstevel@tonic-gate #include <sys/ontrap.h> 61*0Sstevel@tonic-gate 62*0Sstevel@tonic-gate #define ptob64(x) (((uint64_t)(x)) << PAGESHIFT) 63*0Sstevel@tonic-gate 64*0Sstevel@tonic-gate extern void i86_pp_map(page_t *, caddr_t); 65*0Sstevel@tonic-gate extern void i86_va_map(caddr_t, struct as *, caddr_t); 66*0Sstevel@tonic-gate extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 67*0Sstevel@tonic-gate psm_intr_op_t, int *); 68*0Sstevel@tonic-gate extern int isa_resource_setup(void); 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate /* Semi-temporary patchables to phase in bug fixes */ 71*0Sstevel@tonic-gate int rootnex_bind_fail = 1; 72*0Sstevel@tonic-gate int rootnex_bind_warn = 1; 73*0Sstevel@tonic-gate uint8_t *rootnex_warn_list; 74*0Sstevel@tonic-gate 75*0Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 76*0Sstevel@tonic-gate #define ROOTNEX_BIND_WARNING (0x1 << 0) 77*0Sstevel@tonic-gate 78*0Sstevel@tonic-gate /* 79*0Sstevel@tonic-gate * DMA related static data 80*0Sstevel@tonic-gate */ 81*0Sstevel@tonic-gate static uintptr_t dvma_call_list_id = 0; 82*0Sstevel@tonic-gate 83*0Sstevel@tonic-gate /* 84*0Sstevel@tonic-gate * Use device arena to use for device control register mappings. 85*0Sstevel@tonic-gate * Various kernel memory walkers (debugger, dtrace) need to know 86*0Sstevel@tonic-gate * to avoid this address range to prevent undesired device activity. 87*0Sstevel@tonic-gate */ 88*0Sstevel@tonic-gate extern void *device_arena_alloc(size_t size, int vm_flag); 89*0Sstevel@tonic-gate extern void device_arena_free(void * vaddr, size_t size); 90*0Sstevel@tonic-gate 91*0Sstevel@tonic-gate 92*0Sstevel@tonic-gate /* 93*0Sstevel@tonic-gate * Hack to handle poke faults on Calvin-class machines 94*0Sstevel@tonic-gate */ 95*0Sstevel@tonic-gate extern int pokefault; 96*0Sstevel@tonic-gate static kmutex_t pokefault_mutex; 97*0Sstevel@tonic-gate 98*0Sstevel@tonic-gate 99*0Sstevel@tonic-gate /* 100*0Sstevel@tonic-gate * Internal functions 101*0Sstevel@tonic-gate */ 102*0Sstevel@tonic-gate static int 103*0Sstevel@tonic-gate rootnex_ctl_children(dev_info_t *dip, dev_info_t *rdip, 104*0Sstevel@tonic-gate ddi_ctl_enum_t ctlop, dev_info_t *child); 105*0Sstevel@tonic-gate 106*0Sstevel@tonic-gate static int 107*0Sstevel@tonic-gate rootnex_ctlops_poke(peekpoke_ctlops_t *in_args); 108*0Sstevel@tonic-gate 109*0Sstevel@tonic-gate static int 110*0Sstevel@tonic-gate rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result); 111*0Sstevel@tonic-gate 112*0Sstevel@tonic-gate /* 113*0Sstevel@tonic-gate * config information 114*0Sstevel@tonic-gate */ 115*0Sstevel@tonic-gate 116*0Sstevel@tonic-gate static int 117*0Sstevel@tonic-gate rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 118*0Sstevel@tonic-gate off_t offset, off_t len, caddr_t *vaddrp); 119*0Sstevel@tonic-gate 120*0Sstevel@tonic-gate static int 121*0Sstevel@tonic-gate rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 122*0Sstevel@tonic-gate struct hat *hat, struct seg *seg, caddr_t addr, 123*0Sstevel@tonic-gate struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 124*0Sstevel@tonic-gate 125*0Sstevel@tonic-gate static int 126*0Sstevel@tonic-gate rootnex_dma_allochdl(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 127*0Sstevel@tonic-gate int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *); 128*0Sstevel@tonic-gate 129*0Sstevel@tonic-gate static int 130*0Sstevel@tonic-gate rootnex_dma_freehdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 131*0Sstevel@tonic-gate 132*0Sstevel@tonic-gate static int 133*0Sstevel@tonic-gate rootnex_dma_bindhdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 134*0Sstevel@tonic-gate struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 135*0Sstevel@tonic-gate 136*0Sstevel@tonic-gate static int 137*0Sstevel@tonic-gate rootnex_dma_unbindhdl(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 138*0Sstevel@tonic-gate 139*0Sstevel@tonic-gate static int 140*0Sstevel@tonic-gate rootnex_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 141*0Sstevel@tonic-gate off_t, size_t, uint_t); 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate static int 144*0Sstevel@tonic-gate rootnex_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 145*0Sstevel@tonic-gate uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 146*0Sstevel@tonic-gate 147*0Sstevel@tonic-gate static int 148*0Sstevel@tonic-gate rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 149*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 150*0Sstevel@tonic-gate 151*0Sstevel@tonic-gate static int 152*0Sstevel@tonic-gate rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 153*0Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 154*0Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 155*0Sstevel@tonic-gate 156*0Sstevel@tonic-gate static int 157*0Sstevel@tonic-gate rootnex_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, void *); 158*0Sstevel@tonic-gate 159*0Sstevel@tonic-gate static struct intrspec * 160*0Sstevel@tonic-gate rootnex_get_ispec(dev_info_t *rdip, int inum); 161*0Sstevel@tonic-gate 162*0Sstevel@tonic-gate static int 163*0Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t, 164*0Sstevel@tonic-gate ddi_intr_handle_impl_t *, void *); 165*0Sstevel@tonic-gate 166*0Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = { 167*0Sstevel@tonic-gate BUSO_REV, 168*0Sstevel@tonic-gate rootnex_map, 169*0Sstevel@tonic-gate NULL, 170*0Sstevel@tonic-gate NULL, 171*0Sstevel@tonic-gate NULL, 172*0Sstevel@tonic-gate rootnex_map_fault, 173*0Sstevel@tonic-gate rootnex_dma_map, 174*0Sstevel@tonic-gate rootnex_dma_allochdl, 175*0Sstevel@tonic-gate rootnex_dma_freehdl, 176*0Sstevel@tonic-gate rootnex_dma_bindhdl, 177*0Sstevel@tonic-gate rootnex_dma_unbindhdl, 178*0Sstevel@tonic-gate rootnex_dma_flush, 179*0Sstevel@tonic-gate rootnex_dma_win, 180*0Sstevel@tonic-gate rootnex_dma_mctl, 181*0Sstevel@tonic-gate rootnex_ctlops, 182*0Sstevel@tonic-gate ddi_bus_prop_op, 183*0Sstevel@tonic-gate i_ddi_rootnex_get_eventcookie, 184*0Sstevel@tonic-gate i_ddi_rootnex_add_eventcall, 185*0Sstevel@tonic-gate i_ddi_rootnex_remove_eventcall, 186*0Sstevel@tonic-gate i_ddi_rootnex_post_event, 187*0Sstevel@tonic-gate 0, /* bus_intr_ctl */ 188*0Sstevel@tonic-gate 0, /* bus_config */ 189*0Sstevel@tonic-gate 0, /* bus_unconfig */ 190*0Sstevel@tonic-gate NULL, /* bus_fm_init */ 191*0Sstevel@tonic-gate NULL, /* bus_fm_fini */ 192*0Sstevel@tonic-gate NULL, /* bus_fm_access_enter */ 193*0Sstevel@tonic-gate NULL, /* bus_fm_access_exit */ 194*0Sstevel@tonic-gate NULL, /* bus_powr */ 195*0Sstevel@tonic-gate rootnex_intr_ops /* bus_intr_op */ 196*0Sstevel@tonic-gate }; 197*0Sstevel@tonic-gate 198*0Sstevel@tonic-gate struct priv_handle { 199*0Sstevel@tonic-gate caddr_t ph_vaddr; 200*0Sstevel@tonic-gate union { 201*0Sstevel@tonic-gate page_t *pp; 202*0Sstevel@tonic-gate struct as *asp; 203*0Sstevel@tonic-gate }ph_u; 204*0Sstevel@tonic-gate uint_t ph_mapinfo; 205*0Sstevel@tonic-gate uint64_t ph_padr; 206*0Sstevel@tonic-gate }; 207*0Sstevel@tonic-gate static uint64_t rootnex_get_phyaddr(); 208*0Sstevel@tonic-gate static int rootnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 209*0Sstevel@tonic-gate static int rootnex_io_rdsync(ddi_dma_impl_t *hp); 210*0Sstevel@tonic-gate static int rootnex_io_wtsync(ddi_dma_impl_t *hp, int); 211*0Sstevel@tonic-gate static int rootnex_io_brkup_attr(dev_info_t *dip, dev_info_t *rdip, 212*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t handle, 213*0Sstevel@tonic-gate struct priv_handle *php); 214*0Sstevel@tonic-gate static int rootnex_io_brkup_lim(dev_info_t *dip, dev_info_t *rdip, 215*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep, 216*0Sstevel@tonic-gate ddi_dma_lim_t *dma_lim, struct priv_handle *php); 217*0Sstevel@tonic-gate 218*0Sstevel@tonic-gate static struct dev_ops rootnex_ops = { 219*0Sstevel@tonic-gate DEVO_REV, 220*0Sstevel@tonic-gate 0, /* refcnt */ 221*0Sstevel@tonic-gate ddi_no_info, /* info */ 222*0Sstevel@tonic-gate nulldev, 223*0Sstevel@tonic-gate nulldev, /* probe */ 224*0Sstevel@tonic-gate rootnex_attach, 225*0Sstevel@tonic-gate nulldev, /* detach */ 226*0Sstevel@tonic-gate nulldev, /* reset */ 227*0Sstevel@tonic-gate 0, /* cb_ops */ 228*0Sstevel@tonic-gate &rootnex_bus_ops 229*0Sstevel@tonic-gate }; 230*0Sstevel@tonic-gate 231*0Sstevel@tonic-gate /* 232*0Sstevel@tonic-gate * Module linkage information for the kernel. 233*0Sstevel@tonic-gate */ 234*0Sstevel@tonic-gate 235*0Sstevel@tonic-gate static struct modldrv modldrv = { 236*0Sstevel@tonic-gate &mod_driverops, /* Type of module. This one is a nexus driver */ 237*0Sstevel@tonic-gate "i86pc root nexus %I%", 238*0Sstevel@tonic-gate &rootnex_ops, /* Driver ops */ 239*0Sstevel@tonic-gate }; 240*0Sstevel@tonic-gate 241*0Sstevel@tonic-gate static struct modlinkage modlinkage = { 242*0Sstevel@tonic-gate MODREV_1, (void *)&modldrv, NULL 243*0Sstevel@tonic-gate }; 244*0Sstevel@tonic-gate 245*0Sstevel@tonic-gate 246*0Sstevel@tonic-gate int 247*0Sstevel@tonic-gate _init(void) 248*0Sstevel@tonic-gate { 249*0Sstevel@tonic-gate return (mod_install(&modlinkage)); 250*0Sstevel@tonic-gate } 251*0Sstevel@tonic-gate 252*0Sstevel@tonic-gate int 253*0Sstevel@tonic-gate _fini(void) 254*0Sstevel@tonic-gate { 255*0Sstevel@tonic-gate return (EBUSY); 256*0Sstevel@tonic-gate } 257*0Sstevel@tonic-gate 258*0Sstevel@tonic-gate int 259*0Sstevel@tonic-gate _info(struct modinfo *modinfop) 260*0Sstevel@tonic-gate { 261*0Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 262*0Sstevel@tonic-gate } 263*0Sstevel@tonic-gate 264*0Sstevel@tonic-gate /* 265*0Sstevel@tonic-gate * rootnex_attach: 266*0Sstevel@tonic-gate * 267*0Sstevel@tonic-gate * attach the root nexus. 268*0Sstevel@tonic-gate */ 269*0Sstevel@tonic-gate 270*0Sstevel@tonic-gate static void add_root_props(dev_info_t *); 271*0Sstevel@tonic-gate 272*0Sstevel@tonic-gate /*ARGSUSED*/ 273*0Sstevel@tonic-gate static int 274*0Sstevel@tonic-gate rootnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 275*0Sstevel@tonic-gate { 276*0Sstevel@tonic-gate mutex_init(&pokefault_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(15)); 277*0Sstevel@tonic-gate 278*0Sstevel@tonic-gate add_root_props(devi); 279*0Sstevel@tonic-gate 280*0Sstevel@tonic-gate cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(devi)); 281*0Sstevel@tonic-gate 282*0Sstevel@tonic-gate i_ddi_rootnex_init_events(devi); 283*0Sstevel@tonic-gate 284*0Sstevel@tonic-gate /* 285*0Sstevel@tonic-gate * allocate array to track which major numbers we have printed warnings 286*0Sstevel@tonic-gate * for. 287*0Sstevel@tonic-gate */ 288*0Sstevel@tonic-gate rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 289*0Sstevel@tonic-gate KM_SLEEP); 290*0Sstevel@tonic-gate 291*0Sstevel@tonic-gate return (DDI_SUCCESS); 292*0Sstevel@tonic-gate } 293*0Sstevel@tonic-gate 294*0Sstevel@tonic-gate 295*0Sstevel@tonic-gate /* 296*0Sstevel@tonic-gate * Add statically defined root properties to this list... 297*0Sstevel@tonic-gate */ 298*0Sstevel@tonic-gate 299*0Sstevel@tonic-gate static const int pagesize = PAGESIZE; 300*0Sstevel@tonic-gate static const int mmu_pagesize = MMU_PAGESIZE; 301*0Sstevel@tonic-gate static const int mmu_pageoffset = MMU_PAGEOFFSET; 302*0Sstevel@tonic-gate 303*0Sstevel@tonic-gate struct prop_def { 304*0Sstevel@tonic-gate char *prop_name; 305*0Sstevel@tonic-gate caddr_t prop_value; 306*0Sstevel@tonic-gate }; 307*0Sstevel@tonic-gate 308*0Sstevel@tonic-gate static struct prop_def root_props[] = { 309*0Sstevel@tonic-gate { "PAGESIZE", (caddr_t)&pagesize }, 310*0Sstevel@tonic-gate { "MMU_PAGESIZE", (caddr_t)&mmu_pagesize}, 311*0Sstevel@tonic-gate { "MMU_PAGEOFFSET", (caddr_t)&mmu_pageoffset}, 312*0Sstevel@tonic-gate }; 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate #define NROOT_PROPS (sizeof (root_props) / sizeof (struct prop_def)) 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate static void 317*0Sstevel@tonic-gate add_root_props(dev_info_t *devi) 318*0Sstevel@tonic-gate { 319*0Sstevel@tonic-gate int i; 320*0Sstevel@tonic-gate struct prop_def *rpp; 321*0Sstevel@tonic-gate 322*0Sstevel@tonic-gate /* 323*0Sstevel@tonic-gate * Note this for loop works because all of the root_prop 324*0Sstevel@tonic-gate * properties are integers - if this changes, the for 325*0Sstevel@tonic-gate * loop will have to change. 326*0Sstevel@tonic-gate */ 327*0Sstevel@tonic-gate for (i = 0, rpp = root_props; i < NROOT_PROPS; ++i, ++rpp) { 328*0Sstevel@tonic-gate (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, devi, 329*0Sstevel@tonic-gate rpp->prop_name, *((int *)rpp->prop_value)); 330*0Sstevel@tonic-gate } 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate /* 333*0Sstevel@tonic-gate * Create the root node "boolean" property 334*0Sstevel@tonic-gate * corresponding to addressing type supported in the root node: 335*0Sstevel@tonic-gate * 336*0Sstevel@tonic-gate * Choices are: 337*0Sstevel@tonic-gate * "relative-addressing" (OBP PROMS) 338*0Sstevel@tonic-gate * "generic-addressing" (Sun4 -- pseudo OBP/DDI) 339*0Sstevel@tonic-gate */ 340*0Sstevel@tonic-gate 341*0Sstevel@tonic-gate (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, devi, 342*0Sstevel@tonic-gate DDI_RELATIVE_ADDRESSING, 1); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate } 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate /* 347*0Sstevel@tonic-gate * #define DDI_MAP_DEBUG (c.f. ddi_impl.c) 348*0Sstevel@tonic-gate */ 349*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 350*0Sstevel@tonic-gate extern int ddi_map_debug_flag; 351*0Sstevel@tonic-gate #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 352*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate 355*0Sstevel@tonic-gate /* 356*0Sstevel@tonic-gate * we don't support mapping of I/O cards above 4Gb 357*0Sstevel@tonic-gate */ 358*0Sstevel@tonic-gate static int 359*0Sstevel@tonic-gate rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 360*0Sstevel@tonic-gate { 361*0Sstevel@tonic-gate ulong_t base; 362*0Sstevel@tonic-gate void *cvaddr; 363*0Sstevel@tonic-gate uint_t npages, pgoffset; 364*0Sstevel@tonic-gate struct regspec *rp; 365*0Sstevel@tonic-gate ddi_acc_hdl_t *hp; 366*0Sstevel@tonic-gate ddi_acc_impl_t *ap; 367*0Sstevel@tonic-gate uint_t hat_acc_flags; 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate rp = mp->map_obj.rp; 370*0Sstevel@tonic-gate hp = mp->map_handlep; 371*0Sstevel@tonic-gate 372*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 373*0Sstevel@tonic-gate ddi_map_debug( 374*0Sstevel@tonic-gate "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 375*0Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, 376*0Sstevel@tonic-gate rp->regspec_size, mp->map_handlep); 377*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 378*0Sstevel@tonic-gate 379*0Sstevel@tonic-gate /* 380*0Sstevel@tonic-gate * I/O or memory mapping 381*0Sstevel@tonic-gate * 382*0Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 383*0Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 384*0Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 385*0Sstevel@tonic-gate */ 386*0Sstevel@tonic-gate 387*0Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 388*0Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex: invalid register spec" 389*0Sstevel@tonic-gate " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 390*0Sstevel@tonic-gate rp->regspec_addr, rp->regspec_size); 391*0Sstevel@tonic-gate return (DDI_FAILURE); 392*0Sstevel@tonic-gate } 393*0Sstevel@tonic-gate 394*0Sstevel@tonic-gate if (rp->regspec_bustype != 0) { 395*0Sstevel@tonic-gate /* 396*0Sstevel@tonic-gate * I/O space - needs a handle. 397*0Sstevel@tonic-gate */ 398*0Sstevel@tonic-gate if (hp == NULL) { 399*0Sstevel@tonic-gate return (DDI_FAILURE); 400*0Sstevel@tonic-gate } 401*0Sstevel@tonic-gate ap = (ddi_acc_impl_t *)hp->ah_platform_private; 402*0Sstevel@tonic-gate ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 403*0Sstevel@tonic-gate impl_acc_hdl_init(hp); 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 406*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 407*0Sstevel@tonic-gate ddi_map_debug("rootnex_map_regspec: mmap() \ 408*0Sstevel@tonic-gate to I/O space is not supported.\n"); 409*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 410*0Sstevel@tonic-gate return (DDI_ME_INVAL); 411*0Sstevel@tonic-gate } else { 412*0Sstevel@tonic-gate /* 413*0Sstevel@tonic-gate * 1275-compliant vs. compatibility i/o mapping 414*0Sstevel@tonic-gate */ 415*0Sstevel@tonic-gate *vaddrp = 416*0Sstevel@tonic-gate (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 417*0Sstevel@tonic-gate ((caddr_t)(uintptr_t)rp->regspec_bustype) : 418*0Sstevel@tonic-gate ((caddr_t)(uintptr_t)rp->regspec_addr); 419*0Sstevel@tonic-gate } 420*0Sstevel@tonic-gate 421*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 422*0Sstevel@tonic-gate ddi_map_debug( 423*0Sstevel@tonic-gate "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 424*0Sstevel@tonic-gate rp->regspec_size, *vaddrp); 425*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 426*0Sstevel@tonic-gate return (DDI_SUCCESS); 427*0Sstevel@tonic-gate } 428*0Sstevel@tonic-gate 429*0Sstevel@tonic-gate /* 430*0Sstevel@tonic-gate * Memory space 431*0Sstevel@tonic-gate */ 432*0Sstevel@tonic-gate 433*0Sstevel@tonic-gate if (hp != NULL) { 434*0Sstevel@tonic-gate /* 435*0Sstevel@tonic-gate * hat layer ignores 436*0Sstevel@tonic-gate * hp->ah_acc.devacc_attr_endian_flags. 437*0Sstevel@tonic-gate */ 438*0Sstevel@tonic-gate switch (hp->ah_acc.devacc_attr_dataorder) { 439*0Sstevel@tonic-gate case DDI_STRICTORDER_ACC: 440*0Sstevel@tonic-gate hat_acc_flags = HAT_STRICTORDER; 441*0Sstevel@tonic-gate break; 442*0Sstevel@tonic-gate case DDI_UNORDERED_OK_ACC: 443*0Sstevel@tonic-gate hat_acc_flags = HAT_UNORDERED_OK; 444*0Sstevel@tonic-gate break; 445*0Sstevel@tonic-gate case DDI_MERGING_OK_ACC: 446*0Sstevel@tonic-gate hat_acc_flags = HAT_MERGING_OK; 447*0Sstevel@tonic-gate break; 448*0Sstevel@tonic-gate case DDI_LOADCACHING_OK_ACC: 449*0Sstevel@tonic-gate hat_acc_flags = HAT_LOADCACHING_OK; 450*0Sstevel@tonic-gate break; 451*0Sstevel@tonic-gate case DDI_STORECACHING_OK_ACC: 452*0Sstevel@tonic-gate hat_acc_flags = HAT_STORECACHING_OK; 453*0Sstevel@tonic-gate break; 454*0Sstevel@tonic-gate } 455*0Sstevel@tonic-gate ap = (ddi_acc_impl_t *)hp->ah_platform_private; 456*0Sstevel@tonic-gate ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 457*0Sstevel@tonic-gate impl_acc_hdl_init(hp); 458*0Sstevel@tonic-gate hp->ah_hat_flags = hat_acc_flags; 459*0Sstevel@tonic-gate } else { 460*0Sstevel@tonic-gate hat_acc_flags = HAT_STRICTORDER; 461*0Sstevel@tonic-gate } 462*0Sstevel@tonic-gate 463*0Sstevel@tonic-gate base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */ 464*0Sstevel@tonic-gate pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */ 465*0Sstevel@tonic-gate 466*0Sstevel@tonic-gate if (rp->regspec_size == 0) { 467*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 468*0Sstevel@tonic-gate ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 469*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 470*0Sstevel@tonic-gate return (DDI_ME_INVAL); 471*0Sstevel@tonic-gate } 472*0Sstevel@tonic-gate 473*0Sstevel@tonic-gate if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 474*0Sstevel@tonic-gate *vaddrp = (caddr_t)mmu_btop(base); 475*0Sstevel@tonic-gate } else { 476*0Sstevel@tonic-gate npages = mmu_btopr(rp->regspec_size + pgoffset); 477*0Sstevel@tonic-gate 478*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 479*0Sstevel@tonic-gate ddi_map_debug("rootnex_map_regspec: Mapping %d pages \ 480*0Sstevel@tonic-gate physical %x ", 481*0Sstevel@tonic-gate npages, base); 482*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 483*0Sstevel@tonic-gate 484*0Sstevel@tonic-gate cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 485*0Sstevel@tonic-gate if (cvaddr == NULL) 486*0Sstevel@tonic-gate return (DDI_ME_NORESOURCES); 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate /* 489*0Sstevel@tonic-gate * Now map in the pages we've allocated... 490*0Sstevel@tonic-gate */ 491*0Sstevel@tonic-gate hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base), 492*0Sstevel@tonic-gate mp->map_prot | hat_acc_flags, HAT_LOAD_LOCK); 493*0Sstevel@tonic-gate *vaddrp = (caddr_t)cvaddr + pgoffset; 494*0Sstevel@tonic-gate } 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 497*0Sstevel@tonic-gate ddi_map_debug("at virtual 0x%x\n", *vaddrp); 498*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 499*0Sstevel@tonic-gate return (DDI_SUCCESS); 500*0Sstevel@tonic-gate } 501*0Sstevel@tonic-gate 502*0Sstevel@tonic-gate static int 503*0Sstevel@tonic-gate rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 504*0Sstevel@tonic-gate { 505*0Sstevel@tonic-gate caddr_t addr = (caddr_t)*vaddrp; 506*0Sstevel@tonic-gate uint_t npages, pgoffset; 507*0Sstevel@tonic-gate struct regspec *rp; 508*0Sstevel@tonic-gate 509*0Sstevel@tonic-gate if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 510*0Sstevel@tonic-gate return (0); 511*0Sstevel@tonic-gate 512*0Sstevel@tonic-gate rp = mp->map_obj.rp; 513*0Sstevel@tonic-gate 514*0Sstevel@tonic-gate if (rp->regspec_size == 0) { 515*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 516*0Sstevel@tonic-gate ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 517*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 518*0Sstevel@tonic-gate return (DDI_ME_INVAL); 519*0Sstevel@tonic-gate } 520*0Sstevel@tonic-gate 521*0Sstevel@tonic-gate /* 522*0Sstevel@tonic-gate * I/O or memory mapping: 523*0Sstevel@tonic-gate * 524*0Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 525*0Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 526*0Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 527*0Sstevel@tonic-gate */ 528*0Sstevel@tonic-gate if (rp->regspec_bustype != 0) { 529*0Sstevel@tonic-gate /* 530*0Sstevel@tonic-gate * This is I/O space, which requires no particular 531*0Sstevel@tonic-gate * processing on unmap since it isn't mapped in the 532*0Sstevel@tonic-gate * first place. 533*0Sstevel@tonic-gate */ 534*0Sstevel@tonic-gate return (DDI_SUCCESS); 535*0Sstevel@tonic-gate } 536*0Sstevel@tonic-gate 537*0Sstevel@tonic-gate /* 538*0Sstevel@tonic-gate * Memory space 539*0Sstevel@tonic-gate */ 540*0Sstevel@tonic-gate pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 541*0Sstevel@tonic-gate npages = mmu_btopr(rp->regspec_size + pgoffset); 542*0Sstevel@tonic-gate hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 543*0Sstevel@tonic-gate device_arena_free(addr - pgoffset, ptob(npages)); 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* 546*0Sstevel@tonic-gate * Destroy the pointer - the mapping has logically gone 547*0Sstevel@tonic-gate */ 548*0Sstevel@tonic-gate *vaddrp = NULL; 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate return (DDI_SUCCESS); 551*0Sstevel@tonic-gate } 552*0Sstevel@tonic-gate 553*0Sstevel@tonic-gate static int 554*0Sstevel@tonic-gate rootnex_map_handle(ddi_map_req_t *mp) 555*0Sstevel@tonic-gate { 556*0Sstevel@tonic-gate ddi_acc_hdl_t *hp; 557*0Sstevel@tonic-gate ulong_t base; 558*0Sstevel@tonic-gate uint_t pgoffset; 559*0Sstevel@tonic-gate struct regspec *rp; 560*0Sstevel@tonic-gate 561*0Sstevel@tonic-gate rp = mp->map_obj.rp; 562*0Sstevel@tonic-gate 563*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 564*0Sstevel@tonic-gate ddi_map_debug( 565*0Sstevel@tonic-gate "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 566*0Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, 567*0Sstevel@tonic-gate rp->regspec_size, mp->map_handlep); 568*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 569*0Sstevel@tonic-gate 570*0Sstevel@tonic-gate /* 571*0Sstevel@tonic-gate * I/O or memory mapping: 572*0Sstevel@tonic-gate * 573*0Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 574*0Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 575*0Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 576*0Sstevel@tonic-gate */ 577*0Sstevel@tonic-gate if (rp->regspec_bustype != 0) { 578*0Sstevel@tonic-gate /* 579*0Sstevel@tonic-gate * This refers to I/O space, and we don't support "mapping" 580*0Sstevel@tonic-gate * I/O space to a user. 581*0Sstevel@tonic-gate */ 582*0Sstevel@tonic-gate return (DDI_FAILURE); 583*0Sstevel@tonic-gate } 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate /* 586*0Sstevel@tonic-gate * Set up the hat_flags for the mapping. 587*0Sstevel@tonic-gate */ 588*0Sstevel@tonic-gate hp = mp->map_handlep; 589*0Sstevel@tonic-gate 590*0Sstevel@tonic-gate switch (hp->ah_acc.devacc_attr_endian_flags) { 591*0Sstevel@tonic-gate case DDI_NEVERSWAP_ACC: 592*0Sstevel@tonic-gate hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 593*0Sstevel@tonic-gate break; 594*0Sstevel@tonic-gate case DDI_STRUCTURE_LE_ACC: 595*0Sstevel@tonic-gate hp->ah_hat_flags = HAT_STRUCTURE_LE; 596*0Sstevel@tonic-gate break; 597*0Sstevel@tonic-gate case DDI_STRUCTURE_BE_ACC: 598*0Sstevel@tonic-gate return (DDI_FAILURE); 599*0Sstevel@tonic-gate default: 600*0Sstevel@tonic-gate return (DDI_REGS_ACC_CONFLICT); 601*0Sstevel@tonic-gate } 602*0Sstevel@tonic-gate 603*0Sstevel@tonic-gate switch (hp->ah_acc.devacc_attr_dataorder) { 604*0Sstevel@tonic-gate case DDI_STRICTORDER_ACC: 605*0Sstevel@tonic-gate break; 606*0Sstevel@tonic-gate case DDI_UNORDERED_OK_ACC: 607*0Sstevel@tonic-gate hp->ah_hat_flags |= HAT_UNORDERED_OK; 608*0Sstevel@tonic-gate break; 609*0Sstevel@tonic-gate case DDI_MERGING_OK_ACC: 610*0Sstevel@tonic-gate hp->ah_hat_flags |= HAT_MERGING_OK; 611*0Sstevel@tonic-gate break; 612*0Sstevel@tonic-gate case DDI_LOADCACHING_OK_ACC: 613*0Sstevel@tonic-gate hp->ah_hat_flags |= HAT_LOADCACHING_OK; 614*0Sstevel@tonic-gate break; 615*0Sstevel@tonic-gate case DDI_STORECACHING_OK_ACC: 616*0Sstevel@tonic-gate hp->ah_hat_flags |= HAT_STORECACHING_OK; 617*0Sstevel@tonic-gate break; 618*0Sstevel@tonic-gate default: 619*0Sstevel@tonic-gate return (DDI_FAILURE); 620*0Sstevel@tonic-gate } 621*0Sstevel@tonic-gate 622*0Sstevel@tonic-gate base = (ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET); /* base addr */ 623*0Sstevel@tonic-gate pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; /* offset */ 624*0Sstevel@tonic-gate 625*0Sstevel@tonic-gate if (rp->regspec_size == 0) 626*0Sstevel@tonic-gate return (DDI_ME_INVAL); 627*0Sstevel@tonic-gate 628*0Sstevel@tonic-gate hp->ah_pfn = mmu_btop(base); 629*0Sstevel@tonic-gate hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate return (DDI_SUCCESS); 632*0Sstevel@tonic-gate } 633*0Sstevel@tonic-gate 634*0Sstevel@tonic-gate static int 635*0Sstevel@tonic-gate rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 636*0Sstevel@tonic-gate off_t offset, off_t len, caddr_t *vaddrp) 637*0Sstevel@tonic-gate { 638*0Sstevel@tonic-gate struct regspec *rp, tmp_reg; 639*0Sstevel@tonic-gate ddi_map_req_t mr = *mp; /* Get private copy of request */ 640*0Sstevel@tonic-gate int error; 641*0Sstevel@tonic-gate 642*0Sstevel@tonic-gate mp = &mr; 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate switch (mp->map_op) { 645*0Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 646*0Sstevel@tonic-gate case DDI_MO_UNMAP: 647*0Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 648*0Sstevel@tonic-gate break; 649*0Sstevel@tonic-gate default: 650*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 651*0Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 652*0Sstevel@tonic-gate mp->map_op); 653*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 654*0Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 655*0Sstevel@tonic-gate } 656*0Sstevel@tonic-gate 657*0Sstevel@tonic-gate if (mp->map_flags & DDI_MF_USER_MAPPING) { 658*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 659*0Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 660*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 661*0Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 662*0Sstevel@tonic-gate } 663*0Sstevel@tonic-gate 664*0Sstevel@tonic-gate /* 665*0Sstevel@tonic-gate * First, if given an rnumber, convert it to a regspec... 666*0Sstevel@tonic-gate * (Presumably, this is on behalf of a child of the root node?) 667*0Sstevel@tonic-gate */ 668*0Sstevel@tonic-gate 669*0Sstevel@tonic-gate if (mp->map_type == DDI_MT_RNUMBER) { 670*0Sstevel@tonic-gate 671*0Sstevel@tonic-gate int rnumber = mp->map_obj.rnumber; 672*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 673*0Sstevel@tonic-gate static char *out_of_range = 674*0Sstevel@tonic-gate "rootnex_map: Out of range rnumber <%d>, device <%s>"; 675*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 676*0Sstevel@tonic-gate 677*0Sstevel@tonic-gate rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 678*0Sstevel@tonic-gate if (rp == NULL) { 679*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 680*0Sstevel@tonic-gate cmn_err(CE_WARN, out_of_range, rnumber, 681*0Sstevel@tonic-gate ddi_get_name(rdip)); 682*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 683*0Sstevel@tonic-gate return (DDI_ME_RNUMBER_RANGE); 684*0Sstevel@tonic-gate } 685*0Sstevel@tonic-gate 686*0Sstevel@tonic-gate /* 687*0Sstevel@tonic-gate * Convert the given ddi_map_req_t from rnumber to regspec... 688*0Sstevel@tonic-gate */ 689*0Sstevel@tonic-gate 690*0Sstevel@tonic-gate mp->map_type = DDI_MT_REGSPEC; 691*0Sstevel@tonic-gate mp->map_obj.rp = rp; 692*0Sstevel@tonic-gate } 693*0Sstevel@tonic-gate 694*0Sstevel@tonic-gate /* 695*0Sstevel@tonic-gate * Adjust offset and length correspnding to called values... 696*0Sstevel@tonic-gate * XXX: A non-zero length means override the one in the regspec 697*0Sstevel@tonic-gate * XXX: (regardless of what's in the parent's range?) 698*0Sstevel@tonic-gate */ 699*0Sstevel@tonic-gate 700*0Sstevel@tonic-gate tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 701*0Sstevel@tonic-gate rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 702*0Sstevel@tonic-gate 703*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 704*0Sstevel@tonic-gate cmn_err(CE_CONT, 705*0Sstevel@tonic-gate "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d>" 706*0Sstevel@tonic-gate " offset %d len %d handle 0x%x\n", 707*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_name(rdip), 708*0Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 709*0Sstevel@tonic-gate offset, len, mp->map_handlep); 710*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 711*0Sstevel@tonic-gate 712*0Sstevel@tonic-gate /* 713*0Sstevel@tonic-gate * I/O or memory mapping: 714*0Sstevel@tonic-gate * 715*0Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 716*0Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 717*0Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 718*0Sstevel@tonic-gate */ 719*0Sstevel@tonic-gate 720*0Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 721*0Sstevel@tonic-gate cmn_err(CE_WARN, "<%s,%s> invalid register spec" 722*0Sstevel@tonic-gate " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 723*0Sstevel@tonic-gate ddi_get_name(rdip), rp->regspec_bustype, 724*0Sstevel@tonic-gate rp->regspec_addr, rp->regspec_size); 725*0Sstevel@tonic-gate return (DDI_ME_INVAL); 726*0Sstevel@tonic-gate } 727*0Sstevel@tonic-gate 728*0Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 729*0Sstevel@tonic-gate /* 730*0Sstevel@tonic-gate * compatibility i/o mapping 731*0Sstevel@tonic-gate */ 732*0Sstevel@tonic-gate rp->regspec_bustype += (uint_t)offset; 733*0Sstevel@tonic-gate } else { 734*0Sstevel@tonic-gate /* 735*0Sstevel@tonic-gate * Normal memory or i/o mapping 736*0Sstevel@tonic-gate */ 737*0Sstevel@tonic-gate rp->regspec_addr += (uint_t)offset; 738*0Sstevel@tonic-gate } 739*0Sstevel@tonic-gate 740*0Sstevel@tonic-gate if (len != 0) 741*0Sstevel@tonic-gate rp->regspec_size = (uint_t)len; 742*0Sstevel@tonic-gate 743*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 744*0Sstevel@tonic-gate cmn_err(CE_CONT, 745*0Sstevel@tonic-gate " <%s,%s> <0x%x, 0x%x, 0x%d>" 746*0Sstevel@tonic-gate " offset %d len %d handle 0x%x\n", 747*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_name(rdip), 748*0Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 749*0Sstevel@tonic-gate offset, len, mp->map_handlep); 750*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 751*0Sstevel@tonic-gate 752*0Sstevel@tonic-gate /* 753*0Sstevel@tonic-gate * Apply any parent ranges at this level, if applicable. 754*0Sstevel@tonic-gate * (This is where nexus specific regspec translation takes place. 755*0Sstevel@tonic-gate * Use of this function is implicit agreement that translation is 756*0Sstevel@tonic-gate * provided via ddi_apply_range.) 757*0Sstevel@tonic-gate */ 758*0Sstevel@tonic-gate 759*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 760*0Sstevel@tonic-gate ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 761*0Sstevel@tonic-gate ddi_get_name(dip), ddi_get_name(rdip)); 762*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 763*0Sstevel@tonic-gate 764*0Sstevel@tonic-gate if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 765*0Sstevel@tonic-gate return (error); 766*0Sstevel@tonic-gate 767*0Sstevel@tonic-gate switch (mp->map_op) { 768*0Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 769*0Sstevel@tonic-gate 770*0Sstevel@tonic-gate /* 771*0Sstevel@tonic-gate * Set up the locked down kernel mapping to the regspec... 772*0Sstevel@tonic-gate */ 773*0Sstevel@tonic-gate 774*0Sstevel@tonic-gate return (rootnex_map_regspec(mp, vaddrp)); 775*0Sstevel@tonic-gate 776*0Sstevel@tonic-gate case DDI_MO_UNMAP: 777*0Sstevel@tonic-gate 778*0Sstevel@tonic-gate /* 779*0Sstevel@tonic-gate * Release mapping... 780*0Sstevel@tonic-gate */ 781*0Sstevel@tonic-gate 782*0Sstevel@tonic-gate return (rootnex_unmap_regspec(mp, vaddrp)); 783*0Sstevel@tonic-gate 784*0Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 785*0Sstevel@tonic-gate 786*0Sstevel@tonic-gate return (rootnex_map_handle(mp)); 787*0Sstevel@tonic-gate 788*0Sstevel@tonic-gate default: 789*0Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 790*0Sstevel@tonic-gate } 791*0Sstevel@tonic-gate } 792*0Sstevel@tonic-gate 793*0Sstevel@tonic-gate 794*0Sstevel@tonic-gate /* 795*0Sstevel@tonic-gate * rootnex_map_fault: 796*0Sstevel@tonic-gate * 797*0Sstevel@tonic-gate * fault in mappings for requestors 798*0Sstevel@tonic-gate */ 799*0Sstevel@tonic-gate /*ARGSUSED*/ 800*0Sstevel@tonic-gate static int 801*0Sstevel@tonic-gate rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 802*0Sstevel@tonic-gate struct hat *hat, struct seg *seg, caddr_t addr, 803*0Sstevel@tonic-gate struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 804*0Sstevel@tonic-gate { 805*0Sstevel@tonic-gate extern struct seg_ops segdev_ops; 806*0Sstevel@tonic-gate 807*0Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 808*0Sstevel@tonic-gate ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 809*0Sstevel@tonic-gate ddi_map_debug(" Seg <%s>\n", 810*0Sstevel@tonic-gate seg->s_ops == &segdev_ops ? "segdev" : 811*0Sstevel@tonic-gate seg == &kvseg ? "segkmem" : "NONE!"); 812*0Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate /* 815*0Sstevel@tonic-gate * This is all terribly broken, but it is a start 816*0Sstevel@tonic-gate * 817*0Sstevel@tonic-gate * XXX Note that this test means that segdev_ops 818*0Sstevel@tonic-gate * must be exported from seg_dev.c. 819*0Sstevel@tonic-gate * XXX What about devices with their own segment drivers? 820*0Sstevel@tonic-gate */ 821*0Sstevel@tonic-gate if (seg->s_ops == &segdev_ops) { 822*0Sstevel@tonic-gate struct segdev_data *sdp = 823*0Sstevel@tonic-gate (struct segdev_data *)seg->s_data; 824*0Sstevel@tonic-gate 825*0Sstevel@tonic-gate if (hat == NULL) { 826*0Sstevel@tonic-gate /* 827*0Sstevel@tonic-gate * This is one plausible interpretation of 828*0Sstevel@tonic-gate * a null hat i.e. use the first hat on the 829*0Sstevel@tonic-gate * address space hat list which by convention is 830*0Sstevel@tonic-gate * the hat of the system MMU. At alternative 831*0Sstevel@tonic-gate * would be to panic .. this might well be better .. 832*0Sstevel@tonic-gate */ 833*0Sstevel@tonic-gate ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 834*0Sstevel@tonic-gate hat = seg->s_as->a_hat; 835*0Sstevel@tonic-gate cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 836*0Sstevel@tonic-gate } 837*0Sstevel@tonic-gate hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 838*0Sstevel@tonic-gate (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 839*0Sstevel@tonic-gate } else if (seg == &kvseg && dp == NULL) { 840*0Sstevel@tonic-gate hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 841*0Sstevel@tonic-gate HAT_LOAD_LOCK); 842*0Sstevel@tonic-gate } else 843*0Sstevel@tonic-gate return (DDI_FAILURE); 844*0Sstevel@tonic-gate return (DDI_SUCCESS); 845*0Sstevel@tonic-gate } 846*0Sstevel@tonic-gate 847*0Sstevel@tonic-gate 848*0Sstevel@tonic-gate /* 849*0Sstevel@tonic-gate * DMA routines- for all 80x86 machines. 850*0Sstevel@tonic-gate */ 851*0Sstevel@tonic-gate 852*0Sstevel@tonic-gate /* 853*0Sstevel@tonic-gate * Shorthand defines 854*0Sstevel@tonic-gate */ 855*0Sstevel@tonic-gate 856*0Sstevel@tonic-gate #define MAP 0 857*0Sstevel@tonic-gate #define BIND 1 858*0Sstevel@tonic-gate #define MAX_INT_BUF (16*MMU_PAGESIZE) 859*0Sstevel@tonic-gate #define AHI_LIM dma_lim->dlim_addr_hi 860*0Sstevel@tonic-gate #define AHI_ATTR dma_attr->dma_attr_addr_hi 861*0Sstevel@tonic-gate #define OBJSIZE dmareq->dmar_object.dmao_size 862*0Sstevel@tonic-gate #define OBJTYPE dmareq->dmar_object.dmao_type 863*0Sstevel@tonic-gate #define FOURG 0x100000000ULL 864*0Sstevel@tonic-gate #define SIXTEEN_MB 0x1000000 865*0Sstevel@tonic-gate 866*0Sstevel@tonic-gate /* #define DMADEBUG */ 867*0Sstevel@tonic-gate #if defined(DEBUG) || defined(lint) 868*0Sstevel@tonic-gate #define DMADEBUG 869*0Sstevel@tonic-gate static int dmadebug = 0; 870*0Sstevel@tonic-gate #define DMAPRINT(a) if (dmadebug) prom_printf a 871*0Sstevel@tonic-gate #else 872*0Sstevel@tonic-gate #define DMAPRINT(a) { } 873*0Sstevel@tonic-gate #endif /* DEBUG */ 874*0Sstevel@tonic-gate 875*0Sstevel@tonic-gate 876*0Sstevel@tonic-gate 877*0Sstevel@tonic-gate /* 878*0Sstevel@tonic-gate * allocate DMA handle 879*0Sstevel@tonic-gate */ 880*0Sstevel@tonic-gate static int 881*0Sstevel@tonic-gate rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 882*0Sstevel@tonic-gate int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 883*0Sstevel@tonic-gate { 884*0Sstevel@tonic-gate ddi_dma_impl_t *hp; 885*0Sstevel@tonic-gate uint64_t maxsegmentsize_ll; 886*0Sstevel@tonic-gate uint_t maxsegmentsize; 887*0Sstevel@tonic-gate 888*0Sstevel@tonic-gate #ifdef lint 889*0Sstevel@tonic-gate dip = dip; 890*0Sstevel@tonic-gate #endif 891*0Sstevel@tonic-gate 892*0Sstevel@tonic-gate /* 893*0Sstevel@tonic-gate * Validate the dma request. 894*0Sstevel@tonic-gate */ 895*0Sstevel@tonic-gate #ifdef DMADEBUG 896*0Sstevel@tonic-gate if (attr->dma_attr_seg < MMU_PAGEOFFSET || 897*0Sstevel@tonic-gate attr->dma_attr_count_max < MMU_PAGEOFFSET || 898*0Sstevel@tonic-gate attr->dma_attr_granular > MMU_PAGESIZE || 899*0Sstevel@tonic-gate attr->dma_attr_maxxfer < MMU_PAGESIZE) { 900*0Sstevel@tonic-gate DMAPRINT((" bad_limits\n")); 901*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 902*0Sstevel@tonic-gate } 903*0Sstevel@tonic-gate #endif 904*0Sstevel@tonic-gate /* 905*0Sstevel@tonic-gate * validate the attribute structure. For now we do not support 906*0Sstevel@tonic-gate * negative sgllen. 907*0Sstevel@tonic-gate */ 908*0Sstevel@tonic-gate if ((attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) || 909*0Sstevel@tonic-gate (attr->dma_attr_sgllen <= 0)) { 910*0Sstevel@tonic-gate return (DDI_DMA_BADATTR); 911*0Sstevel@tonic-gate } 912*0Sstevel@tonic-gate if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 913*0Sstevel@tonic-gate MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 914*0Sstevel@tonic-gate attr->dma_attr_sgllen < 0) { 915*0Sstevel@tonic-gate return (DDI_DMA_BADATTR); 916*0Sstevel@tonic-gate } 917*0Sstevel@tonic-gate 918*0Sstevel@tonic-gate 919*0Sstevel@tonic-gate maxsegmentsize_ll = MIN(attr->dma_attr_seg, 920*0Sstevel@tonic-gate MIN((attr->dma_attr_count_max + 1) * 921*0Sstevel@tonic-gate attr->dma_attr_minxfer, 922*0Sstevel@tonic-gate attr->dma_attr_maxxfer) - 1) + 1; 923*0Sstevel@tonic-gate /* 924*0Sstevel@tonic-gate * We will calculate a 64 bit segment size, if the segment size 925*0Sstevel@tonic-gate * is greater that 4G, we will limit it to (4G - 1). 926*0Sstevel@tonic-gate * The size of dma object (ddi_dma_obj_t.dmao_size) 927*0Sstevel@tonic-gate * is 32 bits. 928*0Sstevel@tonic-gate */ 929*0Sstevel@tonic-gate if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > FOURG)) 930*0Sstevel@tonic-gate maxsegmentsize = FOURG - 1; 931*0Sstevel@tonic-gate else 932*0Sstevel@tonic-gate maxsegmentsize = maxsegmentsize_ll; 933*0Sstevel@tonic-gate 934*0Sstevel@tonic-gate /* 935*0Sstevel@tonic-gate * We should be able to DMA into every byte offset in a page. 936*0Sstevel@tonic-gate */ 937*0Sstevel@tonic-gate if (maxsegmentsize < MMU_PAGESIZE) { 938*0Sstevel@tonic-gate DMAPRINT((" bad_limits, maxsegmentsize\n")); 939*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 940*0Sstevel@tonic-gate } 941*0Sstevel@tonic-gate 942*0Sstevel@tonic-gate 943*0Sstevel@tonic-gate hp = kmem_zalloc(sizeof (*hp), 944*0Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 945*0Sstevel@tonic-gate if (hp == NULL) { 946*0Sstevel@tonic-gate if (waitfp != DDI_DMA_DONTWAIT) { 947*0Sstevel@tonic-gate ddi_set_callback(waitfp, arg, &dvma_call_list_id); 948*0Sstevel@tonic-gate } 949*0Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 950*0Sstevel@tonic-gate } 951*0Sstevel@tonic-gate /* 952*0Sstevel@tonic-gate * Preallocate space for cookie structures. We will use this when 953*0Sstevel@tonic-gate * the request does not span more than (DMAI_SOMEMORE_COOKIES - 1) 954*0Sstevel@tonic-gate * pages. 955*0Sstevel@tonic-gate */ 956*0Sstevel@tonic-gate hp->dmai_additionalcookiep = 957*0Sstevel@tonic-gate kmem_zalloc(sizeof (ddi_dma_cookie_t) * DMAI_SOMEMORE_COOKIES, 958*0Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 959*0Sstevel@tonic-gate 960*0Sstevel@tonic-gate /* 961*0Sstevel@tonic-gate * Save requestor's information 962*0Sstevel@tonic-gate */ 963*0Sstevel@tonic-gate hp->dmai_wins = NULL; 964*0Sstevel@tonic-gate hp->dmai_kaddr = 965*0Sstevel@tonic-gate hp->dmai_ibufp = NULL; 966*0Sstevel@tonic-gate hp->dmai_inuse = 0; 967*0Sstevel@tonic-gate hp->dmai_minxfer = attr->dma_attr_minxfer; 968*0Sstevel@tonic-gate hp->dmai_burstsizes = attr->dma_attr_burstsizes; 969*0Sstevel@tonic-gate hp->dmai_minfo = NULL; 970*0Sstevel@tonic-gate hp->dmai_rdip = rdip; 971*0Sstevel@tonic-gate hp->dmai_attr = *attr; 972*0Sstevel@tonic-gate hp->dmai_mctl = rootnex_dma_mctl; 973*0Sstevel@tonic-gate hp->dmai_segmentsize = maxsegmentsize; 974*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)hp; 975*0Sstevel@tonic-gate 976*0Sstevel@tonic-gate return (DDI_SUCCESS); 977*0Sstevel@tonic-gate } 978*0Sstevel@tonic-gate 979*0Sstevel@tonic-gate /*ARGSUSED*/ 980*0Sstevel@tonic-gate static int 981*0Sstevel@tonic-gate rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 982*0Sstevel@tonic-gate ddi_dma_handle_t handle) 983*0Sstevel@tonic-gate { 984*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 985*0Sstevel@tonic-gate 986*0Sstevel@tonic-gate /* 987*0Sstevel@tonic-gate * free the additional cookie space. 988*0Sstevel@tonic-gate */ 989*0Sstevel@tonic-gate if (hp->dmai_additionalcookiep) 990*0Sstevel@tonic-gate kmem_free(hp->dmai_additionalcookiep, 991*0Sstevel@tonic-gate sizeof (ddi_dma_cookie_t) * DMAI_SOMEMORE_COOKIES); 992*0Sstevel@tonic-gate 993*0Sstevel@tonic-gate kmem_free(hp, sizeof (*hp)); 994*0Sstevel@tonic-gate if (dvma_call_list_id) 995*0Sstevel@tonic-gate ddi_run_callback(&dvma_call_list_id); 996*0Sstevel@tonic-gate return (DDI_SUCCESS); 997*0Sstevel@tonic-gate } 998*0Sstevel@tonic-gate 999*0Sstevel@tonic-gate static int 1000*0Sstevel@tonic-gate rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1001*0Sstevel@tonic-gate ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1002*0Sstevel@tonic-gate ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1003*0Sstevel@tonic-gate { 1004*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 1005*0Sstevel@tonic-gate ddi_dma_attr_t *dma_attr = &hp->dmai_attr; 1006*0Sstevel@tonic-gate ddi_dma_cookie_t *cp; 1007*0Sstevel@tonic-gate impl_dma_segment_t *segp; 1008*0Sstevel@tonic-gate uint_t segcount = 1; 1009*0Sstevel@tonic-gate int rval; 1010*0Sstevel@tonic-gate struct priv_handle php; 1011*0Sstevel@tonic-gate uint_t size, offset; 1012*0Sstevel@tonic-gate uint64_t padr; 1013*0Sstevel@tonic-gate major_t mnum; 1014*0Sstevel@tonic-gate 1015*0Sstevel@tonic-gate /* 1016*0Sstevel@tonic-gate * no mutex for speed 1017*0Sstevel@tonic-gate */ 1018*0Sstevel@tonic-gate if (hp->dmai_inuse) { 1019*0Sstevel@tonic-gate return (DDI_DMA_INUSE); 1020*0Sstevel@tonic-gate } 1021*0Sstevel@tonic-gate hp->dmai_inuse = 1; 1022*0Sstevel@tonic-gate 1023*0Sstevel@tonic-gate size = OBJSIZE; 1024*0Sstevel@tonic-gate /* 1025*0Sstevel@tonic-gate * get the physical address of the first page of an object 1026*0Sstevel@tonic-gate * defined through 'dmareq' structure. 1027*0Sstevel@tonic-gate */ 1028*0Sstevel@tonic-gate padr = rootnex_get_phyaddr(dmareq, 0, &php); 1029*0Sstevel@tonic-gate offset = padr & MMU_PAGEOFFSET; 1030*0Sstevel@tonic-gate if (offset & (dma_attr->dma_attr_minxfer - 1)) { 1031*0Sstevel@tonic-gate DMAPRINT((" bad_limits/mapping\n")); 1032*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1033*0Sstevel@tonic-gate } else if ((dma_attr->dma_attr_sgllen > 1) && 1034*0Sstevel@tonic-gate (size <= MMU_PAGESIZE) && (padr < AHI_ATTR)) { 1035*0Sstevel@tonic-gate /* 1036*0Sstevel@tonic-gate * The object is not more than a PAGESIZE and we could DMA into 1037*0Sstevel@tonic-gate * the physical page. 1038*0Sstevel@tonic-gate * The cache is completely coherent, set the NOSYNC flag. 1039*0Sstevel@tonic-gate */ 1040*0Sstevel@tonic-gate hp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) | 1041*0Sstevel@tonic-gate DMP_NOSYNC; 1042*0Sstevel@tonic-gate /* 1043*0Sstevel@tonic-gate * Fill in the physical address in the cookie pointer. 1044*0Sstevel@tonic-gate */ 1045*0Sstevel@tonic-gate cookiep->dmac_type = php.ph_mapinfo; 1046*0Sstevel@tonic-gate cookiep->dmac_laddress = padr; 1047*0Sstevel@tonic-gate if ((offset + size) <= MMU_PAGESIZE) { 1048*0Sstevel@tonic-gate cookiep->dmac_size = size; 1049*0Sstevel@tonic-gate hp->dmai_cookie = NULL; 1050*0Sstevel@tonic-gate *ccountp = 1; 1051*0Sstevel@tonic-gate } else if (hp->dmai_additionalcookiep) { 1052*0Sstevel@tonic-gate /* 1053*0Sstevel@tonic-gate * The object spans a page boundary. We will use the space 1054*0Sstevel@tonic-gate * that we preallocated to store the additional cookie. 1055*0Sstevel@tonic-gate */ 1056*0Sstevel@tonic-gate cookiep->dmac_size = MMU_PAGESIZE - offset; 1057*0Sstevel@tonic-gate hp->dmai_cookie = hp->dmai_additionalcookiep; 1058*0Sstevel@tonic-gate padr = rootnex_get_phyaddr(dmareq, 1059*0Sstevel@tonic-gate (uint_t)cookiep->dmac_size, &php); 1060*0Sstevel@tonic-gate if (padr > AHI_ATTR) { 1061*0Sstevel@tonic-gate /* 1062*0Sstevel@tonic-gate * We can not DMA into this physical page. We will 1063*0Sstevel@tonic-gate * need intermediate buffers. Reset the state in 1064*0Sstevel@tonic-gate * the php structure. 1065*0Sstevel@tonic-gate */ 1066*0Sstevel@tonic-gate padr = rootnex_get_phyaddr(dmareq, 0, &php); 1067*0Sstevel@tonic-gate goto io_brkup_attr; 1068*0Sstevel@tonic-gate } 1069*0Sstevel@tonic-gate hp->dmai_additionalcookiep->dmac_type = php.ph_mapinfo; 1070*0Sstevel@tonic-gate hp->dmai_additionalcookiep->dmac_laddress = padr; 1071*0Sstevel@tonic-gate hp->dmai_additionalcookiep->dmac_size = 1072*0Sstevel@tonic-gate size - cookiep->dmac_size; 1073*0Sstevel@tonic-gate *ccountp = 2; 1074*0Sstevel@tonic-gate } else { 1075*0Sstevel@tonic-gate goto io_brkup_attr; 1076*0Sstevel@tonic-gate } 1077*0Sstevel@tonic-gate hp->dmai_kaddr = NULL; 1078*0Sstevel@tonic-gate hp->dmai_segp = NULL; 1079*0Sstevel@tonic-gate hp->dmai_ibufp = NULL; 1080*0Sstevel@tonic-gate return (DDI_DMA_MAPPED); 1081*0Sstevel@tonic-gate } 1082*0Sstevel@tonic-gate io_brkup_attr: 1083*0Sstevel@tonic-gate /* 1084*0Sstevel@tonic-gate * The function rootnex_get_phyaddr() does not save the physical 1085*0Sstevel@tonic-gate * address in the php structure. Save it here for 1086*0Sstevel@tonic-gate * rootnext_io_brkup_attr(). 1087*0Sstevel@tonic-gate */ 1088*0Sstevel@tonic-gate php.ph_padr = padr; 1089*0Sstevel@tonic-gate rval = rootnex_io_brkup_attr(dip, rdip, dmareq, handle, &php); 1090*0Sstevel@tonic-gate if (rval && (rval != DDI_DMA_PARTIAL_MAP)) { 1091*0Sstevel@tonic-gate hp->dmai_inuse = 0; 1092*0Sstevel@tonic-gate return (rval); 1093*0Sstevel@tonic-gate } 1094*0Sstevel@tonic-gate hp->dmai_wins = segp = hp->dmai_hds; 1095*0Sstevel@tonic-gate if (hp->dmai_ibufp) { 1096*0Sstevel@tonic-gate (void) rootnex_io_wtsync(hp, BIND); 1097*0Sstevel@tonic-gate } 1098*0Sstevel@tonic-gate 1099*0Sstevel@tonic-gate while ((segp->dmais_flags & DMAIS_WINEND) == 0) { 1100*0Sstevel@tonic-gate segp = segp->dmais_link; 1101*0Sstevel@tonic-gate segcount++; 1102*0Sstevel@tonic-gate } 1103*0Sstevel@tonic-gate *ccountp = segcount; 1104*0Sstevel@tonic-gate cp = hp->dmai_cookie; 1105*0Sstevel@tonic-gate ASSERT(cp); 1106*0Sstevel@tonic-gate cookiep->dmac_type = cp->dmac_type; 1107*0Sstevel@tonic-gate cookiep->dmac_laddress = cp->dmac_laddress; 1108*0Sstevel@tonic-gate cookiep->dmac_size = cp->dmac_size; 1109*0Sstevel@tonic-gate hp->dmai_cookie++; 1110*0Sstevel@tonic-gate 1111*0Sstevel@tonic-gate /* 1112*0Sstevel@tonic-gate * If we ended up with more cookies that the caller specified as 1113*0Sstevel@tonic-gate * the maximum that it can handle (sgllen), and they didn't specify 1114*0Sstevel@tonic-gate * DDI_DMA_PARTIAL, cleanup and return failure. 1115*0Sstevel@tonic-gate * 1116*0Sstevel@tonic-gate * Not the cleanest fix, but lowest risk. The DMA code in 1117*0Sstevel@tonic-gate * this file should get a good cleaning for some performance 1118*0Sstevel@tonic-gate * improvement. This should be cleaned up also during that work. 1119*0Sstevel@tonic-gate */ 1120*0Sstevel@tonic-gate if ((dma_attr->dma_attr_sgllen < *ccountp) && 1121*0Sstevel@tonic-gate ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)) { 1122*0Sstevel@tonic-gate 1123*0Sstevel@tonic-gate mnum = ddi_driver_major(rdip); 1124*0Sstevel@tonic-gate 1125*0Sstevel@tonic-gate /* 1126*0Sstevel@tonic-gate * patchable which allows us to print one warning per major 1127*0Sstevel@tonic-gate * number. 1128*0Sstevel@tonic-gate */ 1129*0Sstevel@tonic-gate if ((rootnex_bind_warn) && 1130*0Sstevel@tonic-gate ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 1131*0Sstevel@tonic-gate rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 1132*0Sstevel@tonic-gate cmn_err(CE_WARN, "!%s: coding error detected, the " 1133*0Sstevel@tonic-gate "driver is using ddi_dma_attr(9S) incorrectly. " 1134*0Sstevel@tonic-gate "There is a small risk of data corruption in " 1135*0Sstevel@tonic-gate "particular with large I/Os. The driver should be " 1136*0Sstevel@tonic-gate "replaced with a corrected version for proper " 1137*0Sstevel@tonic-gate "system operation. To disable this warning, add " 1138*0Sstevel@tonic-gate "'set rootnex:rootnex_bind_warn=0' to " 1139*0Sstevel@tonic-gate "/etc/system(4).", ddi_driver_name(rdip)); 1140*0Sstevel@tonic-gate } 1141*0Sstevel@tonic-gate 1142*0Sstevel@tonic-gate /* 1143*0Sstevel@tonic-gate * Patchable which allows us to fail or pass the bind. The 1144*0Sstevel@tonic-gate * correct behavior should be to fail the bind. To be safe for 1145*0Sstevel@tonic-gate * now, the patchable allows the previous behavior to be set 1146*0Sstevel@tonic-gate * via /etc/system 1147*0Sstevel@tonic-gate */ 1148*0Sstevel@tonic-gate if (rootnex_bind_fail) { 1149*0Sstevel@tonic-gate if (hp->dmai_ibufp) 1150*0Sstevel@tonic-gate ddi_mem_free(hp->dmai_ibufp); 1151*0Sstevel@tonic-gate if (hp->dmai_kaddr) 1152*0Sstevel@tonic-gate vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE); 1153*0Sstevel@tonic-gate if (hp->dmai_segp) 1154*0Sstevel@tonic-gate kmem_free(hp->dmai_segp, hp->dmai_kmsize); 1155*0Sstevel@tonic-gate hp->dmai_inuse = 0; 1156*0Sstevel@tonic-gate *ccountp = 0; 1157*0Sstevel@tonic-gate 1158*0Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 1159*0Sstevel@tonic-gate } 1160*0Sstevel@tonic-gate } 1161*0Sstevel@tonic-gate 1162*0Sstevel@tonic-gate return (rval); 1163*0Sstevel@tonic-gate } 1164*0Sstevel@tonic-gate 1165*0Sstevel@tonic-gate /*ARGSUSED*/ 1166*0Sstevel@tonic-gate static int 1167*0Sstevel@tonic-gate rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1168*0Sstevel@tonic-gate ddi_dma_handle_t handle) 1169*0Sstevel@tonic-gate { 1170*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 1171*0Sstevel@tonic-gate int rval = DDI_SUCCESS; 1172*0Sstevel@tonic-gate 1173*0Sstevel@tonic-gate if (hp->dmai_ibufp) { 1174*0Sstevel@tonic-gate rval = rootnex_io_rdsync(hp); 1175*0Sstevel@tonic-gate ddi_mem_free(hp->dmai_ibufp); 1176*0Sstevel@tonic-gate } 1177*0Sstevel@tonic-gate if (hp->dmai_kaddr) 1178*0Sstevel@tonic-gate vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE); 1179*0Sstevel@tonic-gate if (hp->dmai_segp) 1180*0Sstevel@tonic-gate kmem_free(hp->dmai_segp, hp->dmai_kmsize); 1181*0Sstevel@tonic-gate if (dvma_call_list_id) 1182*0Sstevel@tonic-gate ddi_run_callback(&dvma_call_list_id); 1183*0Sstevel@tonic-gate hp->dmai_inuse = 0; 1184*0Sstevel@tonic-gate return (rval); 1185*0Sstevel@tonic-gate } 1186*0Sstevel@tonic-gate 1187*0Sstevel@tonic-gate /*ARGSUSED*/ 1188*0Sstevel@tonic-gate static int 1189*0Sstevel@tonic-gate rootnex_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1190*0Sstevel@tonic-gate ddi_dma_handle_t handle, off_t off, size_t len, 1191*0Sstevel@tonic-gate uint_t cache_flags) 1192*0Sstevel@tonic-gate { 1193*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 1194*0Sstevel@tonic-gate int rval = DDI_SUCCESS; 1195*0Sstevel@tonic-gate 1196*0Sstevel@tonic-gate if (hp->dmai_ibufp) { 1197*0Sstevel@tonic-gate if (cache_flags == DDI_DMA_SYNC_FORDEV) { 1198*0Sstevel@tonic-gate rval = rootnex_io_wtsync(hp, MAP); 1199*0Sstevel@tonic-gate } else { 1200*0Sstevel@tonic-gate rval = rootnex_io_rdsync(hp); 1201*0Sstevel@tonic-gate } 1202*0Sstevel@tonic-gate } 1203*0Sstevel@tonic-gate return (rval); 1204*0Sstevel@tonic-gate } 1205*0Sstevel@tonic-gate 1206*0Sstevel@tonic-gate /*ARGSUSED*/ 1207*0Sstevel@tonic-gate static int 1208*0Sstevel@tonic-gate rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 1209*0Sstevel@tonic-gate ddi_dma_handle_t handle, uint_t win, off_t *offp, 1210*0Sstevel@tonic-gate size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1211*0Sstevel@tonic-gate { 1212*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 1213*0Sstevel@tonic-gate impl_dma_segment_t *segp, *winp = hp->dmai_hds; 1214*0Sstevel@tonic-gate uint_t len, segcount = 1; 1215*0Sstevel@tonic-gate ddi_dma_cookie_t *cp; 1216*0Sstevel@tonic-gate int i; 1217*0Sstevel@tonic-gate 1218*0Sstevel@tonic-gate /* 1219*0Sstevel@tonic-gate * win is in the range [0 .. dmai_nwin-1] 1220*0Sstevel@tonic-gate */ 1221*0Sstevel@tonic-gate if (win >= hp->dmai_nwin) { 1222*0Sstevel@tonic-gate return (DDI_FAILURE); 1223*0Sstevel@tonic-gate } 1224*0Sstevel@tonic-gate if (hp->dmai_wins && hp->dmai_ibufp) { 1225*0Sstevel@tonic-gate (void) rootnex_io_rdsync(hp); 1226*0Sstevel@tonic-gate } 1227*0Sstevel@tonic-gate ASSERT(winp->dmais_flags & DMAIS_WINSTRT); 1228*0Sstevel@tonic-gate for (i = 0; i < win; i++) { 1229*0Sstevel@tonic-gate winp = winp->_win._dmais_nex; 1230*0Sstevel@tonic-gate ASSERT(winp); 1231*0Sstevel@tonic-gate ASSERT(winp->dmais_flags & DMAIS_WINSTRT); 1232*0Sstevel@tonic-gate } 1233*0Sstevel@tonic-gate 1234*0Sstevel@tonic-gate hp->dmai_wins = (impl_dma_segment_t *)winp; 1235*0Sstevel@tonic-gate if (hp->dmai_ibufp) 1236*0Sstevel@tonic-gate (void) rootnex_io_wtsync(hp, BIND); 1237*0Sstevel@tonic-gate segp = winp; 1238*0Sstevel@tonic-gate len = segp->dmais_size; 1239*0Sstevel@tonic-gate *offp = segp->dmais_ofst; 1240*0Sstevel@tonic-gate while ((segp->dmais_flags & DMAIS_WINEND) == 0) { 1241*0Sstevel@tonic-gate segp = segp->dmais_link; 1242*0Sstevel@tonic-gate len += segp->dmais_size; 1243*0Sstevel@tonic-gate segcount++; 1244*0Sstevel@tonic-gate } 1245*0Sstevel@tonic-gate 1246*0Sstevel@tonic-gate *lenp = len; 1247*0Sstevel@tonic-gate *ccountp = segcount; 1248*0Sstevel@tonic-gate cp = hp->dmai_cookie = winp->dmais_cookie; 1249*0Sstevel@tonic-gate ASSERT(cp); 1250*0Sstevel@tonic-gate cookiep->dmac_type = cp->dmac_type; 1251*0Sstevel@tonic-gate cookiep->dmac_laddress = cp->dmac_laddress; 1252*0Sstevel@tonic-gate cookiep->dmac_size = cp->dmac_size; 1253*0Sstevel@tonic-gate hp->dmai_cookie++; 1254*0Sstevel@tonic-gate DMAPRINT(("getwin win %p mapping %llx size %lx\n", 1255*0Sstevel@tonic-gate (void *)winp, (unsigned long long)cp->dmac_laddress, 1256*0Sstevel@tonic-gate cp->dmac_size)); 1257*0Sstevel@tonic-gate 1258*0Sstevel@tonic-gate return (DDI_SUCCESS); 1259*0Sstevel@tonic-gate } 1260*0Sstevel@tonic-gate 1261*0Sstevel@tonic-gate static int 1262*0Sstevel@tonic-gate rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 1263*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 1264*0Sstevel@tonic-gate { 1265*0Sstevel@tonic-gate ddi_dma_lim_t *dma_lim = dmareq->dmar_limits; 1266*0Sstevel@tonic-gate impl_dma_segment_t *segmentp; 1267*0Sstevel@tonic-gate ddi_dma_impl_t *hp; 1268*0Sstevel@tonic-gate struct priv_handle php; 1269*0Sstevel@tonic-gate uint64_t padr; 1270*0Sstevel@tonic-gate uint_t offset, size; 1271*0Sstevel@tonic-gate int sizehandle; 1272*0Sstevel@tonic-gate int mapinfo; 1273*0Sstevel@tonic-gate 1274*0Sstevel@tonic-gate #ifdef lint 1275*0Sstevel@tonic-gate dip = dip; 1276*0Sstevel@tonic-gate #endif 1277*0Sstevel@tonic-gate 1278*0Sstevel@tonic-gate DMAPRINT(("dma_map: %s (%s) reqp %p ", (handlep)? "alloc" : "advisory", 1279*0Sstevel@tonic-gate ddi_get_name(rdip), (void *)dmareq)); 1280*0Sstevel@tonic-gate 1281*0Sstevel@tonic-gate #ifdef DMADEBUG 1282*0Sstevel@tonic-gate /* 1283*0Sstevel@tonic-gate * Validate range checks on DMA limits 1284*0Sstevel@tonic-gate */ 1285*0Sstevel@tonic-gate if ((dma_lim->dlim_adreg_max & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 1286*0Sstevel@tonic-gate dma_lim->dlim_granular > MMU_PAGESIZE || 1287*0Sstevel@tonic-gate dma_lim->dlim_sgllen <= 0) { 1288*0Sstevel@tonic-gate DMAPRINT((" bad_limits\n")); 1289*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 1290*0Sstevel@tonic-gate } 1291*0Sstevel@tonic-gate #endif 1292*0Sstevel@tonic-gate size = OBJSIZE; 1293*0Sstevel@tonic-gate /* 1294*0Sstevel@tonic-gate * get the physical address of the first page of an object 1295*0Sstevel@tonic-gate * defined through 'dmareq' structure. 1296*0Sstevel@tonic-gate */ 1297*0Sstevel@tonic-gate padr = rootnex_get_phyaddr(dmareq, 0, &php); 1298*0Sstevel@tonic-gate mapinfo = php.ph_mapinfo; 1299*0Sstevel@tonic-gate offset = padr & MMU_PAGEOFFSET; 1300*0Sstevel@tonic-gate if (offset & (dma_lim->dlim_minxfer - 1)) { 1301*0Sstevel@tonic-gate DMAPRINT((" bad_limits/mapping\n")); 1302*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1303*0Sstevel@tonic-gate } else if (((offset + size) < MMU_PAGESIZE) && (padr < AHI_LIM)) { 1304*0Sstevel@tonic-gate /* 1305*0Sstevel@tonic-gate * The object is less than a PAGESIZE and we could DMA into 1306*0Sstevel@tonic-gate * the physical page. 1307*0Sstevel@tonic-gate */ 1308*0Sstevel@tonic-gate if (!handlep) 1309*0Sstevel@tonic-gate return (DDI_DMA_MAPOK); 1310*0Sstevel@tonic-gate sizehandle = sizeof (ddi_dma_impl_t) + 1311*0Sstevel@tonic-gate sizeof (impl_dma_segment_t); 1312*0Sstevel@tonic-gate 1313*0Sstevel@tonic-gate hp = kmem_alloc(sizehandle, (dmareq->dmar_fp == DDI_DMA_SLEEP) ? 1314*0Sstevel@tonic-gate KM_SLEEP : KM_NOSLEEP); 1315*0Sstevel@tonic-gate if (!hp) { 1316*0Sstevel@tonic-gate /* let other routine do callback */ 1317*0Sstevel@tonic-gate goto breakup_req; 1318*0Sstevel@tonic-gate } 1319*0Sstevel@tonic-gate hp->dmai_kmsize = sizehandle; 1320*0Sstevel@tonic-gate 1321*0Sstevel@tonic-gate /* 1322*0Sstevel@tonic-gate * locate segments after dma_impl handle structure 1323*0Sstevel@tonic-gate */ 1324*0Sstevel@tonic-gate segmentp = (impl_dma_segment_t *)(hp + 1); 1325*0Sstevel@tonic-gate 1326*0Sstevel@tonic-gate /* FMA related initialization */ 1327*0Sstevel@tonic-gate hp->dmai_fault = 0; 1328*0Sstevel@tonic-gate hp->dmai_fault_check = NULL; 1329*0Sstevel@tonic-gate hp->dmai_fault_notify = NULL; 1330*0Sstevel@tonic-gate hp->dmai_error.err_ena = 0; 1331*0Sstevel@tonic-gate hp->dmai_error.err_status = DDI_FM_OK; 1332*0Sstevel@tonic-gate hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 1333*0Sstevel@tonic-gate hp->dmai_error.err_ontrap = NULL; 1334*0Sstevel@tonic-gate hp->dmai_error.err_fep = NULL; 1335*0Sstevel@tonic-gate 1336*0Sstevel@tonic-gate /* 1337*0Sstevel@tonic-gate * Save requestor's information 1338*0Sstevel@tonic-gate */ 1339*0Sstevel@tonic-gate hp->dmai_minxfer = dma_lim->dlim_minxfer; 1340*0Sstevel@tonic-gate hp->dmai_burstsizes = dma_lim->dlim_burstsizes; 1341*0Sstevel@tonic-gate hp->dmai_rdip = rdip; 1342*0Sstevel@tonic-gate hp->dmai_mctl = rootnex_dma_mctl; 1343*0Sstevel@tonic-gate hp->dmai_wins = NULL; 1344*0Sstevel@tonic-gate hp->dmai_kaddr = hp->dmai_ibufp = NULL; 1345*0Sstevel@tonic-gate hp->dmai_hds = segmentp; 1346*0Sstevel@tonic-gate hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1347*0Sstevel@tonic-gate hp->dmai_minfo = (void *)(uintptr_t)mapinfo; 1348*0Sstevel@tonic-gate hp->dmai_object = dmareq->dmar_object; 1349*0Sstevel@tonic-gate if (mapinfo == DMAMI_PAGES) { 1350*0Sstevel@tonic-gate segmentp->_vdmu._dmais_pp = php.ph_u.pp; 1351*0Sstevel@tonic-gate segmentp->dmais_ofst = (uint_t)offset; 1352*0Sstevel@tonic-gate } else { 1353*0Sstevel@tonic-gate segmentp->_vdmu._dmais_va = php.ph_vaddr; 1354*0Sstevel@tonic-gate segmentp->dmais_ofst = 0; 1355*0Sstevel@tonic-gate } 1356*0Sstevel@tonic-gate segmentp->_win._dmais_nex = NULL; 1357*0Sstevel@tonic-gate segmentp->dmais_link = NULL; 1358*0Sstevel@tonic-gate segmentp->_pdmu._dmais_lpd = padr; 1359*0Sstevel@tonic-gate segmentp->dmais_size = size; 1360*0Sstevel@tonic-gate segmentp->dmais_flags = DMAIS_WINSTRT | DMAIS_WINEND; 1361*0Sstevel@tonic-gate segmentp->dmais_hndl = hp; 1362*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)hp; 1363*0Sstevel@tonic-gate DMAPRINT((" QUICKIE handle %p\n", (void *)hp)); 1364*0Sstevel@tonic-gate return (DDI_DMA_MAPPED); 1365*0Sstevel@tonic-gate } else if (!handlep) { 1366*0Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 1367*0Sstevel@tonic-gate } 1368*0Sstevel@tonic-gate breakup_req: 1369*0Sstevel@tonic-gate /* 1370*0Sstevel@tonic-gate * The function rootnex_get_phyaddr() does not save the physical 1371*0Sstevel@tonic-gate * address in the php structure. Save it here for 1372*0Sstevel@tonic-gate * rootnext_io_brkup_attr(). 1373*0Sstevel@tonic-gate */ 1374*0Sstevel@tonic-gate php.ph_padr = padr; 1375*0Sstevel@tonic-gate return (rootnex_io_brkup_lim(dip, rdip, dmareq, handlep, 1376*0Sstevel@tonic-gate dma_lim, &php)); 1377*0Sstevel@tonic-gate } 1378*0Sstevel@tonic-gate 1379*0Sstevel@tonic-gate /* CSTYLED */ 1380*0Sstevel@tonic-gate #define CAN_COMBINE(psegp, paddr, segsize, sgsize, mxsegsize, attr, flg) \ 1381*0Sstevel@tonic-gate ((psegp) && \ 1382*0Sstevel@tonic-gate ((psegp)->_pdmu._dmais_lpd + (psegp)->dmais_size) == (paddr) && \ 1383*0Sstevel@tonic-gate (((psegp)->dmais_flags & (DMAIS_NEEDINTBUF | DMAIS_COMPLEMENT)) == 0) && \ 1384*0Sstevel@tonic-gate (((flg) & DMAIS_NEEDINTBUF) == 0) && \ 1385*0Sstevel@tonic-gate (((psegp)->dmais_size + (segsize)) <= (mxsegsize)) && \ 1386*0Sstevel@tonic-gate ((paddr) & (attr)->dma_attr_seg)) 1387*0Sstevel@tonic-gate 1388*0Sstevel@tonic-gate /* CSTYLED */ 1389*0Sstevel@tonic-gate #define MARK_WIN_END(segp, prvwinp, cwinp) \ 1390*0Sstevel@tonic-gate (segp)->dmais_flags |= DMAIS_WINEND; \ 1391*0Sstevel@tonic-gate (prvwinp) = (cwinp); \ 1392*0Sstevel@tonic-gate (cwinp)->dmais_flags |= DMAIS_WINUIB; \ 1393*0Sstevel@tonic-gate (cwinp) = NULL; 1394*0Sstevel@tonic-gate 1395*0Sstevel@tonic-gate /* 1396*0Sstevel@tonic-gate * This function works with the ddi_dma_attr structure. 1397*0Sstevel@tonic-gate * Bugs fixed 1398*0Sstevel@tonic-gate * 1. The old code would ignore the size of the first segment when 1399*0Sstevel@tonic-gate * computing the total size of the reuqest (sglistsize) for sgllen == 1 1400*0Sstevel@tonic-gate */ 1401*0Sstevel@tonic-gate 1402*0Sstevel@tonic-gate /*ARGSUSED*/ 1403*0Sstevel@tonic-gate int 1404*0Sstevel@tonic-gate rootnex_io_brkup_attr(dev_info_t *dip, dev_info_t *rdip, 1405*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t handle, 1406*0Sstevel@tonic-gate struct priv_handle *php) 1407*0Sstevel@tonic-gate { 1408*0Sstevel@tonic-gate impl_dma_segment_t *segmentp; 1409*0Sstevel@tonic-gate impl_dma_segment_t *curwinp; 1410*0Sstevel@tonic-gate impl_dma_segment_t *previousp; 1411*0Sstevel@tonic-gate impl_dma_segment_t *prewinp; 1412*0Sstevel@tonic-gate ddi_dma_cookie_t *cookiep; 1413*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 1414*0Sstevel@tonic-gate caddr_t basevadr; 1415*0Sstevel@tonic-gate caddr_t segmentvadr; 1416*0Sstevel@tonic-gate uint64_t segmentpadr; 1417*0Sstevel@tonic-gate uint_t maxsegmentsize, sizesegment, residual_size; 1418*0Sstevel@tonic-gate uint_t offset, needintbuf, sglistsize, trim; 1419*0Sstevel@tonic-gate int nsegments; 1420*0Sstevel@tonic-gate int mapinfo; 1421*0Sstevel@tonic-gate int reqneedintbuf; 1422*0Sstevel@tonic-gate int rval; 1423*0Sstevel@tonic-gate int segment_flags, win_flags; 1424*0Sstevel@tonic-gate int sgcount; 1425*0Sstevel@tonic-gate int wcount; 1426*0Sstevel@tonic-gate ddi_dma_attr_t *dma_attr = &hp->dmai_attr; 1427*0Sstevel@tonic-gate int sizehandle; 1428*0Sstevel@tonic-gate 1429*0Sstevel@tonic-gate #ifdef lint 1430*0Sstevel@tonic-gate dip = dip; 1431*0Sstevel@tonic-gate #endif 1432*0Sstevel@tonic-gate 1433*0Sstevel@tonic-gate /* 1434*0Sstevel@tonic-gate * Initialize our local variables from the php structure. 1435*0Sstevel@tonic-gate * rootnex_get_phyaddr() has populated php structure on its 1436*0Sstevel@tonic-gate * previous invocation in rootnex_dma_bindhdl(). 1437*0Sstevel@tonic-gate */ 1438*0Sstevel@tonic-gate residual_size = OBJSIZE; 1439*0Sstevel@tonic-gate mapinfo = php->ph_mapinfo; 1440*0Sstevel@tonic-gate segmentpadr = php->ph_padr; 1441*0Sstevel@tonic-gate segmentvadr = php->ph_vaddr; 1442*0Sstevel@tonic-gate basevadr = (mapinfo == DMAMI_PAGES) ? 0 : segmentvadr; 1443*0Sstevel@tonic-gate offset = segmentpadr & MMU_PAGEOFFSET; 1444*0Sstevel@tonic-gate /* 1445*0Sstevel@tonic-gate * maxsegmentsize was computed and saved in rootnex_dma_allochdl(). 1446*0Sstevel@tonic-gate */ 1447*0Sstevel@tonic-gate maxsegmentsize = hp->dmai_segmentsize; 1448*0Sstevel@tonic-gate 1449*0Sstevel@tonic-gate /* 1450*0Sstevel@tonic-gate * The number of segments is the number of 4k pages that the 1451*0Sstevel@tonic-gate * object spans. 1452*0Sstevel@tonic-gate * Each 4k segment may need another segment to satisfy 1453*0Sstevel@tonic-gate * device granularity reqirements. 1454*0Sstevel@tonic-gate * We will never need more than two segments per page. 1455*0Sstevel@tonic-gate * This may be an overestimate in some cases but it avoids 1456*0Sstevel@tonic-gate * 64 bit divide operations. 1457*0Sstevel@tonic-gate */ 1458*0Sstevel@tonic-gate nsegments = (offset + residual_size + MMU_PAGEOFFSET) >> 1459*0Sstevel@tonic-gate (MMU_PAGESHIFT - 1); 1460*0Sstevel@tonic-gate 1461*0Sstevel@tonic-gate 1462*0Sstevel@tonic-gate 1463*0Sstevel@tonic-gate sizehandle = nsegments * (sizeof (impl_dma_segment_t) + 1464*0Sstevel@tonic-gate sizeof (ddi_dma_cookie_t)); 1465*0Sstevel@tonic-gate 1466*0Sstevel@tonic-gate hp->dmai_segp = kmem_zalloc(sizehandle, 1467*0Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 1468*0Sstevel@tonic-gate if (!hp->dmai_segp) { 1469*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 1470*0Sstevel@tonic-gate goto bad; 1471*0Sstevel@tonic-gate } 1472*0Sstevel@tonic-gate hp->dmai_kmsize = sizehandle; 1473*0Sstevel@tonic-gate segmentp = (impl_dma_segment_t *)hp->dmai_segp; 1474*0Sstevel@tonic-gate cookiep = (ddi_dma_cookie_t *)(segmentp + nsegments); 1475*0Sstevel@tonic-gate hp->dmai_cookie = cookiep; 1476*0Sstevel@tonic-gate hp->dmai_wins = NULL; 1477*0Sstevel@tonic-gate hp->dmai_kaddr = hp->dmai_ibufp = NULL; 1478*0Sstevel@tonic-gate hp->dmai_hds = prewinp = segmentp; 1479*0Sstevel@tonic-gate hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1480*0Sstevel@tonic-gate hp->dmai_minfo = (void *)(uintptr_t)mapinfo; 1481*0Sstevel@tonic-gate hp->dmai_object = dmareq->dmar_object; 1482*0Sstevel@tonic-gate 1483*0Sstevel@tonic-gate /* 1484*0Sstevel@tonic-gate * Breakup the memory object 1485*0Sstevel@tonic-gate * and build an i/o segment at each boundary condition 1486*0Sstevel@tonic-gate */ 1487*0Sstevel@tonic-gate curwinp = 0; 1488*0Sstevel@tonic-gate needintbuf = 0; 1489*0Sstevel@tonic-gate previousp = 0; 1490*0Sstevel@tonic-gate reqneedintbuf = 0; 1491*0Sstevel@tonic-gate sglistsize = 0; 1492*0Sstevel@tonic-gate wcount = 0; 1493*0Sstevel@tonic-gate sgcount = 1; 1494*0Sstevel@tonic-gate do { 1495*0Sstevel@tonic-gate sizesegment = MIN((MMU_PAGESIZE - offset), residual_size); 1496*0Sstevel@tonic-gate segment_flags = (segmentpadr > AHI_ATTR) ? DMAIS_NEEDINTBUF : 0; 1497*0Sstevel@tonic-gate sglistsize += sizesegment; 1498*0Sstevel@tonic-gate if (sglistsize >= dma_attr->dma_attr_maxxfer) { 1499*0Sstevel@tonic-gate /* 1500*0Sstevel@tonic-gate * limit the number of bytes to dma_attr_maxxfer 1501*0Sstevel@tonic-gate */ 1502*0Sstevel@tonic-gate sizesegment -= 1503*0Sstevel@tonic-gate (sglistsize - dma_attr->dma_attr_maxxfer); 1504*0Sstevel@tonic-gate sglistsize = dma_attr->dma_attr_maxxfer; 1505*0Sstevel@tonic-gate sgcount = dma_attr->dma_attr_sgllen + 1; 1506*0Sstevel@tonic-gate } 1507*0Sstevel@tonic-gate if ((dma_attr->dma_attr_sgllen == 1) && 1508*0Sstevel@tonic-gate (segmentpadr & (dma_attr->dma_attr_granular - 1)) && 1509*0Sstevel@tonic-gate (residual_size != sizesegment)) { 1510*0Sstevel@tonic-gate /* 1511*0Sstevel@tonic-gate * _no_ scatter/gather capability, 1512*0Sstevel@tonic-gate * so ensure that size of each segment is a 1513*0Sstevel@tonic-gate * multiple of dma_attr_granular (== sector size) 1514*0Sstevel@tonic-gate */ 1515*0Sstevel@tonic-gate sizesegment = MIN((uint_t)MMU_PAGESIZE, residual_size); 1516*0Sstevel@tonic-gate segment_flags |= DMAIS_NEEDINTBUF; 1517*0Sstevel@tonic-gate sglistsize = sizesegment; 1518*0Sstevel@tonic-gate } 1519*0Sstevel@tonic-gate if (CAN_COMBINE(previousp, segmentpadr, sizesegment, 1520*0Sstevel@tonic-gate sglistsize, maxsegmentsize, dma_attr, segment_flags)) { 1521*0Sstevel@tonic-gate previousp->dmais_flags |= segment_flags; 1522*0Sstevel@tonic-gate previousp->dmais_size += sizesegment; 1523*0Sstevel@tonic-gate previousp->dmais_cookie->dmac_size += sizesegment; 1524*0Sstevel@tonic-gate } else { 1525*0Sstevel@tonic-gate if (dma_attr->dma_attr_sgllen == 1) 1526*0Sstevel@tonic-gate /* 1527*0Sstevel@tonic-gate * If we can not combine this segment with the 1528*0Sstevel@tonic-gate * previous segment or if there are no previous 1529*0Sstevel@tonic-gate * segments, sglistsize should be set to 1530*0Sstevel@tonic-gate * segmentsize. 1531*0Sstevel@tonic-gate */ 1532*0Sstevel@tonic-gate sglistsize = sizesegment; 1533*0Sstevel@tonic-gate 1534*0Sstevel@tonic-gate if (previousp) { 1535*0Sstevel@tonic-gate previousp->dmais_link = segmentp; 1536*0Sstevel@tonic-gate } 1537*0Sstevel@tonic-gate segmentp->dmais_cookie = cookiep; 1538*0Sstevel@tonic-gate segmentp->dmais_hndl = hp; 1539*0Sstevel@tonic-gate if (curwinp == 0) { 1540*0Sstevel@tonic-gate prewinp->_win._dmais_nex = curwinp = segmentp; 1541*0Sstevel@tonic-gate segment_flags |= DMAIS_WINSTRT; 1542*0Sstevel@tonic-gate win_flags = segment_flags; 1543*0Sstevel@tonic-gate wcount++; 1544*0Sstevel@tonic-gate } else { 1545*0Sstevel@tonic-gate segmentp->_win._dmais_cur = curwinp; 1546*0Sstevel@tonic-gate win_flags |= segment_flags; 1547*0Sstevel@tonic-gate } 1548*0Sstevel@tonic-gate segmentp->dmais_ofst = segmentvadr - basevadr; 1549*0Sstevel@tonic-gate if (mapinfo == DMAMI_PAGES) 1550*0Sstevel@tonic-gate segmentp->_vdmu._dmais_pp = php->ph_u.pp; 1551*0Sstevel@tonic-gate else 1552*0Sstevel@tonic-gate segmentp->_vdmu._dmais_va = (caddr_t)segmentvadr; 1553*0Sstevel@tonic-gate segmentp->_pdmu._dmais_lpd = segmentpadr; 1554*0Sstevel@tonic-gate segmentp->dmais_flags = (ushort_t)segment_flags; 1555*0Sstevel@tonic-gate segmentp->dmais_size = sizesegment; 1556*0Sstevel@tonic-gate cookiep->dmac_laddress = segmentpadr; 1557*0Sstevel@tonic-gate cookiep->dmac_type = (ulong_t)segmentp; 1558*0Sstevel@tonic-gate cookiep->dmac_size = sizesegment; 1559*0Sstevel@tonic-gate cookiep++; 1560*0Sstevel@tonic-gate --nsegments; 1561*0Sstevel@tonic-gate if (dma_attr->dma_attr_sgllen > 1) 1562*0Sstevel@tonic-gate sgcount++; 1563*0Sstevel@tonic-gate if (segment_flags & DMAIS_NEEDINTBUF) { 1564*0Sstevel@tonic-gate if ((dma_attr->dma_attr_sgllen > 1) && 1565*0Sstevel@tonic-gate (needintbuf += ptob(btopr(sizesegment))) 1566*0Sstevel@tonic-gate == MAX_INT_BUF) { 1567*0Sstevel@tonic-gate /* 1568*0Sstevel@tonic-gate * Intermediate buffers need not be contiguous. 1569*0Sstevel@tonic-gate * we allocate a page of intermediate buffer 1570*0Sstevel@tonic-gate * for every segment. 1571*0Sstevel@tonic-gate */ 1572*0Sstevel@tonic-gate reqneedintbuf = needintbuf; 1573*0Sstevel@tonic-gate needintbuf = 0; 1574*0Sstevel@tonic-gate sgcount = dma_attr->dma_attr_sgllen + 1; 1575*0Sstevel@tonic-gate MARK_WIN_END(segmentp, prewinp, curwinp); 1576*0Sstevel@tonic-gate } else if (dma_attr->dma_attr_sgllen == 1) { 1577*0Sstevel@tonic-gate needintbuf = MMU_PAGESIZE; 1578*0Sstevel@tonic-gate MARK_WIN_END(segmentp, prewinp, curwinp); 1579*0Sstevel@tonic-gate } 1580*0Sstevel@tonic-gate } 1581*0Sstevel@tonic-gate previousp = segmentp++; 1582*0Sstevel@tonic-gate } 1583*0Sstevel@tonic-gate 1584*0Sstevel@tonic-gate if (sgcount > dma_attr->dma_attr_sgllen) { 1585*0Sstevel@tonic-gate previousp->dmais_flags |= DMAIS_COMPLEMENT; 1586*0Sstevel@tonic-gate sgcount = 1; 1587*0Sstevel@tonic-gate trim = sglistsize & (dma_attr->dma_attr_granular - 1); 1588*0Sstevel@tonic-gate 1589*0Sstevel@tonic-gate if ((sizesegment != residual_size) && 1590*0Sstevel@tonic-gate (trim == sizesegment)) { 1591*0Sstevel@tonic-gate 1592*0Sstevel@tonic-gate /* 1593*0Sstevel@tonic-gate * Normally we would trim the buffer to make it a 1594*0Sstevel@tonic-gate * multiple of the granularity. But in this case, 1595*0Sstevel@tonic-gate * the size is < the granularity so we'll roll back 1596*0Sstevel@tonic-gate * this segment and pick this up the next time around. 1597*0Sstevel@tonic-gate * 1598*0Sstevel@tonic-gate * This case occurs when sgcount naturally (i.e. not 1599*0Sstevel@tonic-gate * forced) is greater than > dma_attr_sgllen. In this 1600*0Sstevel@tonic-gate * case, if the very next segment fills up the 1601*0Sstevel@tonic-gate * intermediate buffer, and the amount required to fill 1602*0Sstevel@tonic-gate * the intermediate buffer < granularity, we would end 1603*0Sstevel@tonic-gate * up with a zero sized cookie if we didn't roll back 1604*0Sstevel@tonic-gate * the segment. 1605*0Sstevel@tonic-gate */ 1606*0Sstevel@tonic-gate 1607*0Sstevel@tonic-gate /* 1608*0Sstevel@tonic-gate * Make sure we really understand the code path here, 1609*0Sstevel@tonic-gate * we should only get here if we are at an end of a 1610*0Sstevel@tonic-gate * window which is a single page long < granularity 1611*0Sstevel@tonic-gate */ 1612*0Sstevel@tonic-gate ASSERT(previousp->dmais_flags & DMAIS_WINEND); 1613*0Sstevel@tonic-gate ASSERT(sizesegment == sglistsize); 1614*0Sstevel@tonic-gate 1615*0Sstevel@tonic-gate /* Zero out this segment and add it back to the count */ 1616*0Sstevel@tonic-gate sizesegment = 0; 1617*0Sstevel@tonic-gate sglistsize = 0; 1618*0Sstevel@tonic-gate nsegments++; 1619*0Sstevel@tonic-gate 1620*0Sstevel@tonic-gate /* fix the segment and cookie pointers */ 1621*0Sstevel@tonic-gate segmentp = previousp; 1622*0Sstevel@tonic-gate bzero(previousp, sizeof (impl_dma_segment_t)); 1623*0Sstevel@tonic-gate previousp--; 1624*0Sstevel@tonic-gate bzero(cookiep, sizeof (ddi_dma_cookie_t)); 1625*0Sstevel@tonic-gate cookiep--; 1626*0Sstevel@tonic-gate 1627*0Sstevel@tonic-gate /* 1628*0Sstevel@tonic-gate * cleanup the new previous pointer. Make sure we 1629*0Sstevel@tonic-gate * carry over the WINEND maker. 1630*0Sstevel@tonic-gate */ 1631*0Sstevel@tonic-gate previousp->dmais_link = NULL; 1632*0Sstevel@tonic-gate previousp->dmais_flags |= DMAIS_WINEND; 1633*0Sstevel@tonic-gate 1634*0Sstevel@tonic-gate } else if ((sizesegment != residual_size) && trim) { 1635*0Sstevel@tonic-gate /* 1636*0Sstevel@tonic-gate * end of a scatter/gather list! 1637*0Sstevel@tonic-gate * ensure that total length of list is a 1638*0Sstevel@tonic-gate * multiple of granular (sector size) 1639*0Sstevel@tonic-gate */ 1640*0Sstevel@tonic-gate previousp->dmais_size -= trim; 1641*0Sstevel@tonic-gate previousp->dmais_cookie->dmac_size -= trim; 1642*0Sstevel@tonic-gate sizesegment -= trim; 1643*0Sstevel@tonic-gate } 1644*0Sstevel@tonic-gate sglistsize = 0; 1645*0Sstevel@tonic-gate } 1646*0Sstevel@tonic-gate if (sizesegment && (residual_size -= sizesegment)) { 1647*0Sstevel@tonic-gate /* 1648*0Sstevel@tonic-gate * Get the physical address of the next page in the 1649*0Sstevel@tonic-gate * dma object. 1650*0Sstevel@tonic-gate */ 1651*0Sstevel@tonic-gate segmentpadr = 1652*0Sstevel@tonic-gate rootnex_get_phyaddr(dmareq, sizesegment, php); 1653*0Sstevel@tonic-gate offset = segmentpadr & MMU_PAGEOFFSET; 1654*0Sstevel@tonic-gate segmentvadr += sizesegment; 1655*0Sstevel@tonic-gate } 1656*0Sstevel@tonic-gate } while (residual_size && nsegments); 1657*0Sstevel@tonic-gate ASSERT(residual_size == 0); 1658*0Sstevel@tonic-gate 1659*0Sstevel@tonic-gate previousp->dmais_link = NULL; 1660*0Sstevel@tonic-gate previousp->dmais_flags |= DMAIS_WINEND; 1661*0Sstevel@tonic-gate if (curwinp) { 1662*0Sstevel@tonic-gate if (win_flags & DMAIS_NEEDINTBUF) 1663*0Sstevel@tonic-gate curwinp->dmais_flags |= DMAIS_WINUIB; 1664*0Sstevel@tonic-gate curwinp->_win._dmais_nex = NULL; 1665*0Sstevel@tonic-gate } else 1666*0Sstevel@tonic-gate prewinp->_win._dmais_nex = NULL; 1667*0Sstevel@tonic-gate 1668*0Sstevel@tonic-gate if ((needintbuf = MAX(needintbuf, reqneedintbuf)) != 0) { 1669*0Sstevel@tonic-gate uint64_t saved_align; 1670*0Sstevel@tonic-gate 1671*0Sstevel@tonic-gate saved_align = dma_attr->dma_attr_align; 1672*0Sstevel@tonic-gate /* 1673*0Sstevel@tonic-gate * Allocate intermediate buffer. To start with we request 1674*0Sstevel@tonic-gate * for a page aligned area. This request is satisfied from 1675*0Sstevel@tonic-gate * the system page free list pool. 1676*0Sstevel@tonic-gate */ 1677*0Sstevel@tonic-gate dma_attr->dma_attr_align = MMU_PAGESIZE; 1678*0Sstevel@tonic-gate if (i_ddi_mem_alloc(dip, dma_attr, needintbuf, 1679*0Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? 0x1 : 0, 1, 0, 1680*0Sstevel@tonic-gate &hp->dmai_ibufp, (ulong_t *)&hp->dmai_ibfsz, 1681*0Sstevel@tonic-gate NULL) != DDI_SUCCESS) { 1682*0Sstevel@tonic-gate dma_attr->dma_attr_align = saved_align; 1683*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 1684*0Sstevel@tonic-gate goto bad; 1685*0Sstevel@tonic-gate } 1686*0Sstevel@tonic-gate if (mapinfo != DMAMI_KVADR) { 1687*0Sstevel@tonic-gate hp->dmai_kaddr = vmem_alloc(heap_arena, PAGESIZE, 1688*0Sstevel@tonic-gate VM_SLEEP); 1689*0Sstevel@tonic-gate } 1690*0Sstevel@tonic-gate dma_attr->dma_attr_align = saved_align; 1691*0Sstevel@tonic-gate } 1692*0Sstevel@tonic-gate 1693*0Sstevel@tonic-gate /* 1694*0Sstevel@tonic-gate * return success 1695*0Sstevel@tonic-gate */ 1696*0Sstevel@tonic-gate ASSERT(wcount > 0); 1697*0Sstevel@tonic-gate if (wcount == 1) { 1698*0Sstevel@tonic-gate hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 1699*0Sstevel@tonic-gate rval = DDI_DMA_MAPPED; 1700*0Sstevel@tonic-gate } else if (hp->dmai_rflags & DDI_DMA_PARTIAL) { 1701*0Sstevel@tonic-gate rval = DDI_DMA_PARTIAL_MAP; 1702*0Sstevel@tonic-gate } else { 1703*0Sstevel@tonic-gate if (hp->dmai_segp) 1704*0Sstevel@tonic-gate kmem_free(hp->dmai_segp, hp->dmai_kmsize); 1705*0Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 1706*0Sstevel@tonic-gate } 1707*0Sstevel@tonic-gate hp->dmai_nwin = wcount; 1708*0Sstevel@tonic-gate return (rval); 1709*0Sstevel@tonic-gate bad: 1710*0Sstevel@tonic-gate hp->dmai_cookie = NULL; 1711*0Sstevel@tonic-gate if (hp->dmai_segp) 1712*0Sstevel@tonic-gate kmem_free(hp->dmai_segp, hp->dmai_kmsize); 1713*0Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 1714*0Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT && 1715*0Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_SLEEP) 1716*0Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg, 1717*0Sstevel@tonic-gate &dvma_call_list_id); 1718*0Sstevel@tonic-gate return (rval); 1719*0Sstevel@tonic-gate } 1720*0Sstevel@tonic-gate 1721*0Sstevel@tonic-gate /* 1722*0Sstevel@tonic-gate * This function works with the limit structure and does 32 bit arithmetic. 1723*0Sstevel@tonic-gate */ 1724*0Sstevel@tonic-gate int 1725*0Sstevel@tonic-gate rootnex_io_brkup_lim(dev_info_t *dip, dev_info_t *rdip, 1726*0Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep, 1727*0Sstevel@tonic-gate ddi_dma_lim_t *dma_lim, struct priv_handle *php) 1728*0Sstevel@tonic-gate { 1729*0Sstevel@tonic-gate impl_dma_segment_t *segmentp; 1730*0Sstevel@tonic-gate impl_dma_segment_t *curwinp; 1731*0Sstevel@tonic-gate impl_dma_segment_t *previousp; 1732*0Sstevel@tonic-gate impl_dma_segment_t *prewinp; 1733*0Sstevel@tonic-gate ddi_dma_impl_t *hp = 0; 1734*0Sstevel@tonic-gate caddr_t basevadr; 1735*0Sstevel@tonic-gate caddr_t segmentvadr; 1736*0Sstevel@tonic-gate uint64_t segmentpadr; 1737*0Sstevel@tonic-gate uint_t maxsegmentsize, sizesegment; 1738*0Sstevel@tonic-gate uint_t needintbuf; 1739*0Sstevel@tonic-gate uint_t offset; 1740*0Sstevel@tonic-gate uint_t residual_size; 1741*0Sstevel@tonic-gate uint_t sglistsize; 1742*0Sstevel@tonic-gate int nsegments; 1743*0Sstevel@tonic-gate int mapinfo; 1744*0Sstevel@tonic-gate int reqneedintbuf; 1745*0Sstevel@tonic-gate int rval; 1746*0Sstevel@tonic-gate int segment_flags, win_flags; 1747*0Sstevel@tonic-gate int sgcount; 1748*0Sstevel@tonic-gate int wcount; 1749*0Sstevel@tonic-gate #ifdef DMADEBUG 1750*0Sstevel@tonic-gate int numsegments; 1751*0Sstevel@tonic-gate #endif 1752*0Sstevel@tonic-gate int sizehandle; 1753*0Sstevel@tonic-gate 1754*0Sstevel@tonic-gate #ifdef lint 1755*0Sstevel@tonic-gate dip = dip; 1756*0Sstevel@tonic-gate #endif 1757*0Sstevel@tonic-gate 1758*0Sstevel@tonic-gate /* 1759*0Sstevel@tonic-gate * Validate the dma request. 1760*0Sstevel@tonic-gate */ 1761*0Sstevel@tonic-gate #ifdef DMADEBUG 1762*0Sstevel@tonic-gate if (dma_lim->dlim_adreg_max < MMU_PAGEOFFSET || 1763*0Sstevel@tonic-gate dma_lim->dlim_ctreg_max < MMU_PAGEOFFSET || 1764*0Sstevel@tonic-gate dma_lim->dlim_granular > MMU_PAGESIZE || 1765*0Sstevel@tonic-gate dma_lim->dlim_reqsize < MMU_PAGESIZE) { 1766*0Sstevel@tonic-gate DMAPRINT((" bad_limits\n")); 1767*0Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 1768*0Sstevel@tonic-gate } 1769*0Sstevel@tonic-gate #endif 1770*0Sstevel@tonic-gate 1771*0Sstevel@tonic-gate /* 1772*0Sstevel@tonic-gate * Initialize our local variables from the php structure. 1773*0Sstevel@tonic-gate * rootnex_get_phyaddr() has populated php structure on its 1774*0Sstevel@tonic-gate * previous invocation in rootnex_dma_map(). 1775*0Sstevel@tonic-gate */ 1776*0Sstevel@tonic-gate residual_size = OBJSIZE; 1777*0Sstevel@tonic-gate mapinfo = php->ph_mapinfo; 1778*0Sstevel@tonic-gate segmentpadr = php->ph_padr; 1779*0Sstevel@tonic-gate segmentvadr = php->ph_vaddr; 1780*0Sstevel@tonic-gate basevadr = (mapinfo == DMAMI_PAGES) ? 0 : segmentvadr; 1781*0Sstevel@tonic-gate offset = segmentpadr & MMU_PAGEOFFSET; 1782*0Sstevel@tonic-gate if (dma_lim->dlim_sgllen <= 0 || 1783*0Sstevel@tonic-gate (offset & (dma_lim->dlim_minxfer - 1))) { 1784*0Sstevel@tonic-gate DMAPRINT((" bad_limits/mapping\n")); 1785*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 1786*0Sstevel@tonic-gate goto bad; 1787*0Sstevel@tonic-gate } 1788*0Sstevel@tonic-gate 1789*0Sstevel@tonic-gate maxsegmentsize = MIN(dma_lim->dlim_adreg_max, 1790*0Sstevel@tonic-gate MIN((dma_lim->dlim_ctreg_max + 1) * dma_lim->dlim_minxfer, 1791*0Sstevel@tonic-gate dma_lim->dlim_reqsize) - 1) + 1; 1792*0Sstevel@tonic-gate if (maxsegmentsize == 0) 1793*0Sstevel@tonic-gate maxsegmentsize = FOURG - 1; 1794*0Sstevel@tonic-gate if (maxsegmentsize < MMU_PAGESIZE) { 1795*0Sstevel@tonic-gate DMAPRINT((" bad_limits, maxsegmentsize\n")); 1796*0Sstevel@tonic-gate rval = DDI_DMA_BADLIMITS; 1797*0Sstevel@tonic-gate goto bad; 1798*0Sstevel@tonic-gate } 1799*0Sstevel@tonic-gate 1800*0Sstevel@tonic-gate 1801*0Sstevel@tonic-gate /* 1802*0Sstevel@tonic-gate * The number of segments is the number of 4k pages that the 1803*0Sstevel@tonic-gate * object spans. 1804*0Sstevel@tonic-gate * Each 4k segment may need another segment to satisfy 1805*0Sstevel@tonic-gate * device granularity reqirements. 1806*0Sstevel@tonic-gate * We will never need more than two segments per page. 1807*0Sstevel@tonic-gate * This may be an overestimate in some cases but it avoids 1808*0Sstevel@tonic-gate * 64 bit divide operations. 1809*0Sstevel@tonic-gate */ 1810*0Sstevel@tonic-gate nsegments = (offset + residual_size + MMU_PAGEOFFSET) >> 1811*0Sstevel@tonic-gate (MMU_PAGESHIFT - 1); 1812*0Sstevel@tonic-gate 1813*0Sstevel@tonic-gate #ifdef DMADEBUG 1814*0Sstevel@tonic-gate numsegments = nsegments; 1815*0Sstevel@tonic-gate #endif 1816*0Sstevel@tonic-gate ASSERT(nsegments > 0); 1817*0Sstevel@tonic-gate 1818*0Sstevel@tonic-gate 1819*0Sstevel@tonic-gate sizehandle = sizeof (ddi_dma_impl_t) + 1820*0Sstevel@tonic-gate (nsegments * sizeof (impl_dma_segment_t)); 1821*0Sstevel@tonic-gate 1822*0Sstevel@tonic-gate hp = kmem_alloc(sizehandle, 1823*0Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 1824*0Sstevel@tonic-gate if (!hp) { 1825*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 1826*0Sstevel@tonic-gate goto bad; 1827*0Sstevel@tonic-gate } 1828*0Sstevel@tonic-gate hp->dmai_kmsize = sizehandle; 1829*0Sstevel@tonic-gate 1830*0Sstevel@tonic-gate /* 1831*0Sstevel@tonic-gate * locate segments after dma_impl handle structure 1832*0Sstevel@tonic-gate */ 1833*0Sstevel@tonic-gate segmentp = (impl_dma_segment_t *)(hp + 1); 1834*0Sstevel@tonic-gate 1835*0Sstevel@tonic-gate /* FMA related initialization */ 1836*0Sstevel@tonic-gate hp->dmai_fault = 0; 1837*0Sstevel@tonic-gate hp->dmai_fault_check = NULL; 1838*0Sstevel@tonic-gate hp->dmai_fault_notify = NULL; 1839*0Sstevel@tonic-gate hp->dmai_error.err_ena = 0; 1840*0Sstevel@tonic-gate hp->dmai_error.err_status = DDI_FM_OK; 1841*0Sstevel@tonic-gate hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 1842*0Sstevel@tonic-gate hp->dmai_error.err_ontrap = NULL; 1843*0Sstevel@tonic-gate hp->dmai_error.err_fep = NULL; 1844*0Sstevel@tonic-gate 1845*0Sstevel@tonic-gate /* 1846*0Sstevel@tonic-gate * Save requestor's information 1847*0Sstevel@tonic-gate */ 1848*0Sstevel@tonic-gate hp->dmai_minxfer = dma_lim->dlim_minxfer; 1849*0Sstevel@tonic-gate hp->dmai_burstsizes = dma_lim->dlim_burstsizes; 1850*0Sstevel@tonic-gate hp->dmai_rdip = rdip; 1851*0Sstevel@tonic-gate hp->dmai_mctl = rootnex_dma_mctl; 1852*0Sstevel@tonic-gate hp->dmai_wins = NULL; 1853*0Sstevel@tonic-gate hp->dmai_kaddr = hp->dmai_ibufp = NULL; 1854*0Sstevel@tonic-gate hp->dmai_hds = prewinp = segmentp; 1855*0Sstevel@tonic-gate hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 1856*0Sstevel@tonic-gate hp->dmai_minfo = (void *)(uintptr_t)mapinfo; 1857*0Sstevel@tonic-gate hp->dmai_object = dmareq->dmar_object; 1858*0Sstevel@tonic-gate 1859*0Sstevel@tonic-gate /* 1860*0Sstevel@tonic-gate * Breakup the memory object 1861*0Sstevel@tonic-gate * and build an i/o segment at each boundary condition 1862*0Sstevel@tonic-gate */ 1863*0Sstevel@tonic-gate curwinp = 0; 1864*0Sstevel@tonic-gate needintbuf = 0; 1865*0Sstevel@tonic-gate previousp = 0; 1866*0Sstevel@tonic-gate reqneedintbuf = 0; 1867*0Sstevel@tonic-gate sglistsize = 0; 1868*0Sstevel@tonic-gate wcount = 0; 1869*0Sstevel@tonic-gate sgcount = 1; 1870*0Sstevel@tonic-gate do { 1871*0Sstevel@tonic-gate sizesegment = 1872*0Sstevel@tonic-gate MIN(((uint_t)MMU_PAGESIZE - offset), residual_size); 1873*0Sstevel@tonic-gate segment_flags = (segmentpadr > AHI_LIM) ? DMAIS_NEEDINTBUF : 0; 1874*0Sstevel@tonic-gate 1875*0Sstevel@tonic-gate if (dma_lim->dlim_sgllen == 1) { 1876*0Sstevel@tonic-gate /* 1877*0Sstevel@tonic-gate * _no_ scatter/gather capability, 1878*0Sstevel@tonic-gate * so ensure that size of each segment is a 1879*0Sstevel@tonic-gate * multiple of dlim_granular (== sector size) 1880*0Sstevel@tonic-gate */ 1881*0Sstevel@tonic-gate if ((segmentpadr & (dma_lim->dlim_granular - 1)) && 1882*0Sstevel@tonic-gate residual_size != sizesegment) { 1883*0Sstevel@tonic-gate /* 1884*0Sstevel@tonic-gate * this segment needs an intermediate buffer 1885*0Sstevel@tonic-gate */ 1886*0Sstevel@tonic-gate sizesegment = 1887*0Sstevel@tonic-gate MIN((uint_t)MMU_PAGESIZE, residual_size); 1888*0Sstevel@tonic-gate segment_flags |= DMAIS_NEEDINTBUF; 1889*0Sstevel@tonic-gate } 1890*0Sstevel@tonic-gate } 1891*0Sstevel@tonic-gate 1892*0Sstevel@tonic-gate if (previousp && 1893*0Sstevel@tonic-gate (previousp->_pdmu._dmais_lpd + previousp->dmais_size) == 1894*0Sstevel@tonic-gate segmentpadr && 1895*0Sstevel@tonic-gate (previousp->dmais_flags & 1896*0Sstevel@tonic-gate (DMAIS_NEEDINTBUF | DMAIS_COMPLEMENT)) == 0 && 1897*0Sstevel@tonic-gate (segment_flags & DMAIS_NEEDINTBUF) == 0 && 1898*0Sstevel@tonic-gate (previousp->dmais_size + sizesegment) <= maxsegmentsize && 1899*0Sstevel@tonic-gate (segmentpadr & dma_lim->dlim_adreg_max) && 1900*0Sstevel@tonic-gate (sglistsize + sizesegment) <= dma_lim->dlim_reqsize) { 1901*0Sstevel@tonic-gate /* 1902*0Sstevel@tonic-gate * combine new segment with previous segment 1903*0Sstevel@tonic-gate */ 1904*0Sstevel@tonic-gate previousp->dmais_flags |= segment_flags; 1905*0Sstevel@tonic-gate previousp->dmais_size += sizesegment; 1906*0Sstevel@tonic-gate if ((sglistsize += sizesegment) == 1907*0Sstevel@tonic-gate dma_lim->dlim_reqsize) 1908*0Sstevel@tonic-gate /* 1909*0Sstevel@tonic-gate * force end of scatter/gather list 1910*0Sstevel@tonic-gate */ 1911*0Sstevel@tonic-gate sgcount = dma_lim->dlim_sgllen + 1; 1912*0Sstevel@tonic-gate } else { 1913*0Sstevel@tonic-gate /* 1914*0Sstevel@tonic-gate * add new segment to linked list 1915*0Sstevel@tonic-gate */ 1916*0Sstevel@tonic-gate if (previousp) { 1917*0Sstevel@tonic-gate previousp->dmais_link = segmentp; 1918*0Sstevel@tonic-gate } 1919*0Sstevel@tonic-gate segmentp->dmais_hndl = hp; 1920*0Sstevel@tonic-gate if (curwinp == 0) { 1921*0Sstevel@tonic-gate prewinp->_win._dmais_nex = 1922*0Sstevel@tonic-gate curwinp = segmentp; 1923*0Sstevel@tonic-gate segment_flags |= DMAIS_WINSTRT; 1924*0Sstevel@tonic-gate win_flags = segment_flags; 1925*0Sstevel@tonic-gate wcount++; 1926*0Sstevel@tonic-gate } else { 1927*0Sstevel@tonic-gate segmentp->_win._dmais_cur = curwinp; 1928*0Sstevel@tonic-gate win_flags |= segment_flags; 1929*0Sstevel@tonic-gate } 1930*0Sstevel@tonic-gate segmentp->dmais_ofst = segmentvadr - basevadr; 1931*0Sstevel@tonic-gate if (mapinfo == DMAMI_PAGES) { 1932*0Sstevel@tonic-gate segmentp->_vdmu._dmais_pp = php->ph_u.pp; 1933*0Sstevel@tonic-gate } else { 1934*0Sstevel@tonic-gate segmentp->_vdmu._dmais_va = segmentvadr; 1935*0Sstevel@tonic-gate } 1936*0Sstevel@tonic-gate segmentp->_pdmu._dmais_lpd = segmentpadr; 1937*0Sstevel@tonic-gate segmentp->dmais_flags = (ushort_t)segment_flags; 1938*0Sstevel@tonic-gate 1939*0Sstevel@tonic-gate if (dma_lim->dlim_sgllen > 1) { 1940*0Sstevel@tonic-gate if (segment_flags & DMAIS_NEEDINTBUF) { 1941*0Sstevel@tonic-gate needintbuf += ptob(btopr(sizesegment)); 1942*0Sstevel@tonic-gate if (needintbuf >= MAX_INT_BUF) { 1943*0Sstevel@tonic-gate /* 1944*0Sstevel@tonic-gate * limit size of intermediate 1945*0Sstevel@tonic-gate * buffer 1946*0Sstevel@tonic-gate */ 1947*0Sstevel@tonic-gate reqneedintbuf = MAX_INT_BUF; 1948*0Sstevel@tonic-gate needintbuf = 0; 1949*0Sstevel@tonic-gate /* 1950*0Sstevel@tonic-gate * end of current window 1951*0Sstevel@tonic-gate */ 1952*0Sstevel@tonic-gate segmentp->dmais_flags |= 1953*0Sstevel@tonic-gate DMAIS_WINEND; 1954*0Sstevel@tonic-gate prewinp = curwinp; 1955*0Sstevel@tonic-gate curwinp->dmais_flags |= 1956*0Sstevel@tonic-gate DMAIS_WINUIB; 1957*0Sstevel@tonic-gate curwinp = NULL; 1958*0Sstevel@tonic-gate /* 1959*0Sstevel@tonic-gate * force end of scatter/gather 1960*0Sstevel@tonic-gate * list 1961*0Sstevel@tonic-gate */ 1962*0Sstevel@tonic-gate sgcount = dma_lim->dlim_sgllen; 1963*0Sstevel@tonic-gate } 1964*0Sstevel@tonic-gate } 1965*0Sstevel@tonic-gate sglistsize += sizesegment; 1966*0Sstevel@tonic-gate if (sglistsize >= dma_lim->dlim_reqsize) { 1967*0Sstevel@tonic-gate /* 1968*0Sstevel@tonic-gate * limit size of xfer 1969*0Sstevel@tonic-gate */ 1970*0Sstevel@tonic-gate sizesegment -= (sglistsize - 1971*0Sstevel@tonic-gate dma_lim->dlim_reqsize); 1972*0Sstevel@tonic-gate sglistsize = dma_lim->dlim_reqsize; 1973*0Sstevel@tonic-gate sgcount = dma_lim->dlim_sgllen; 1974*0Sstevel@tonic-gate } 1975*0Sstevel@tonic-gate sgcount++; 1976*0Sstevel@tonic-gate } else { 1977*0Sstevel@tonic-gate /* 1978*0Sstevel@tonic-gate * _no_ scatter/gather capability, 1979*0Sstevel@tonic-gate */ 1980*0Sstevel@tonic-gate if (segment_flags & DMAIS_NEEDINTBUF) { 1981*0Sstevel@tonic-gate /* 1982*0Sstevel@tonic-gate * end of window 1983*0Sstevel@tonic-gate */ 1984*0Sstevel@tonic-gate needintbuf = MMU_PAGESIZE; 1985*0Sstevel@tonic-gate segmentp->dmais_flags |= DMAIS_WINEND; 1986*0Sstevel@tonic-gate prewinp = curwinp; 1987*0Sstevel@tonic-gate curwinp->dmais_flags |= DMAIS_WINUIB; 1988*0Sstevel@tonic-gate curwinp = NULL; 1989*0Sstevel@tonic-gate } 1990*0Sstevel@tonic-gate } 1991*0Sstevel@tonic-gate segmentp->dmais_size = sizesegment; 1992*0Sstevel@tonic-gate previousp = segmentp++; 1993*0Sstevel@tonic-gate --nsegments; 1994*0Sstevel@tonic-gate } 1995*0Sstevel@tonic-gate 1996*0Sstevel@tonic-gate if (sgcount > dma_lim->dlim_sgllen) { 1997*0Sstevel@tonic-gate /* 1998*0Sstevel@tonic-gate * end of a scatter/gather list! 1999*0Sstevel@tonic-gate * ensure that total length of list is a 2000*0Sstevel@tonic-gate * multiple of granular (sector size) 2001*0Sstevel@tonic-gate */ 2002*0Sstevel@tonic-gate if (sizesegment != residual_size) { 2003*0Sstevel@tonic-gate uint_t trim; 2004*0Sstevel@tonic-gate 2005*0Sstevel@tonic-gate trim = sglistsize & 2006*0Sstevel@tonic-gate (dma_lim->dlim_granular - 1); 2007*0Sstevel@tonic-gate if (trim >= sizesegment) { 2008*0Sstevel@tonic-gate cmn_err(CE_WARN, 2009*0Sstevel@tonic-gate "unable to reduce segment size"); 2010*0Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 2011*0Sstevel@tonic-gate goto bad; 2012*0Sstevel@tonic-gate } 2013*0Sstevel@tonic-gate previousp->dmais_size -= trim; 2014*0Sstevel@tonic-gate sizesegment -= trim; 2015*0Sstevel@tonic-gate /* start new scatter/gather list */ 2016*0Sstevel@tonic-gate sgcount = 1; 2017*0Sstevel@tonic-gate sglistsize = 0; 2018*0Sstevel@tonic-gate } 2019*0Sstevel@tonic-gate previousp->dmais_flags |= DMAIS_COMPLEMENT; 2020*0Sstevel@tonic-gate } 2021*0Sstevel@tonic-gate if (sizesegment && (residual_size -= sizesegment)) { 2022*0Sstevel@tonic-gate segmentpadr = 2023*0Sstevel@tonic-gate rootnex_get_phyaddr(dmareq, sizesegment, php); 2024*0Sstevel@tonic-gate offset = segmentpadr & MMU_PAGEOFFSET; 2025*0Sstevel@tonic-gate segmentvadr += sizesegment; 2026*0Sstevel@tonic-gate } 2027*0Sstevel@tonic-gate } while (residual_size && nsegments); 2028*0Sstevel@tonic-gate ASSERT(residual_size == 0); 2029*0Sstevel@tonic-gate 2030*0Sstevel@tonic-gate previousp->dmais_link = NULL; 2031*0Sstevel@tonic-gate previousp->dmais_flags |= DMAIS_WINEND; 2032*0Sstevel@tonic-gate if (curwinp) { 2033*0Sstevel@tonic-gate if (win_flags & DMAIS_NEEDINTBUF) 2034*0Sstevel@tonic-gate curwinp->dmais_flags |= DMAIS_WINUIB; 2035*0Sstevel@tonic-gate curwinp->_win._dmais_nex = NULL; 2036*0Sstevel@tonic-gate } else 2037*0Sstevel@tonic-gate prewinp->_win._dmais_nex = NULL; 2038*0Sstevel@tonic-gate 2039*0Sstevel@tonic-gate if ((needintbuf = MAX(needintbuf, reqneedintbuf)) != 0) { 2040*0Sstevel@tonic-gate ddi_dma_attr_t dma_attr; 2041*0Sstevel@tonic-gate 2042*0Sstevel@tonic-gate 2043*0Sstevel@tonic-gate dma_attr.dma_attr_version = DMA_ATTR_V0; 2044*0Sstevel@tonic-gate dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 2045*0Sstevel@tonic-gate dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 2046*0Sstevel@tonic-gate dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 2047*0Sstevel@tonic-gate dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 2048*0Sstevel@tonic-gate dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 2049*0Sstevel@tonic-gate dma_attr.dma_attr_granular = dma_lim->dlim_granular; 2050*0Sstevel@tonic-gate dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 2051*0Sstevel@tonic-gate dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 2052*0Sstevel@tonic-gate dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 2053*0Sstevel@tonic-gate dma_attr.dma_attr_align = MMU_PAGESIZE; 2054*0Sstevel@tonic-gate dma_attr.dma_attr_flags = 0; 2055*0Sstevel@tonic-gate 2056*0Sstevel@tonic-gate /* 2057*0Sstevel@tonic-gate * Allocate intermediate buffer. 2058*0Sstevel@tonic-gate */ 2059*0Sstevel@tonic-gate if (i_ddi_mem_alloc(dip, &dma_attr, needintbuf, 2060*0Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? 0x1 : 0, 1, 0, 2061*0Sstevel@tonic-gate &hp->dmai_ibufp, (ulong_t *)&hp->dmai_ibfsz, 2062*0Sstevel@tonic-gate NULL) != DDI_SUCCESS) { 2063*0Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 2064*0Sstevel@tonic-gate goto bad; 2065*0Sstevel@tonic-gate } 2066*0Sstevel@tonic-gate if (mapinfo != DMAMI_KVADR) { 2067*0Sstevel@tonic-gate hp->dmai_kaddr = vmem_alloc(heap_arena, PAGESIZE, 2068*0Sstevel@tonic-gate VM_SLEEP); 2069*0Sstevel@tonic-gate } 2070*0Sstevel@tonic-gate } 2071*0Sstevel@tonic-gate 2072*0Sstevel@tonic-gate /* 2073*0Sstevel@tonic-gate * return success 2074*0Sstevel@tonic-gate */ 2075*0Sstevel@tonic-gate #ifdef DMADEBUG 2076*0Sstevel@tonic-gate DMAPRINT(("dma_brkup: handle %p nsegments %x \n", 2077*0Sstevel@tonic-gate (void *)hp, numsegments - nsegments)); 2078*0Sstevel@tonic-gate #endif 2079*0Sstevel@tonic-gate hp->dmai_cookie = NULL; 2080*0Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)hp; 2081*0Sstevel@tonic-gate return (DDI_DMA_MAPPED); 2082*0Sstevel@tonic-gate bad: 2083*0Sstevel@tonic-gate if (hp) 2084*0Sstevel@tonic-gate kmem_free(hp, hp->dmai_kmsize); 2085*0Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 2086*0Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT && 2087*0Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_SLEEP) 2088*0Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg, 2089*0Sstevel@tonic-gate &dvma_call_list_id); 2090*0Sstevel@tonic-gate return (rval); 2091*0Sstevel@tonic-gate } 2092*0Sstevel@tonic-gate 2093*0Sstevel@tonic-gate int 2094*0Sstevel@tonic-gate rootnex_io_wtsync(ddi_dma_impl_t *hp, int type) 2095*0Sstevel@tonic-gate { 2096*0Sstevel@tonic-gate impl_dma_segment_t *sp = hp->dmai_wins; 2097*0Sstevel@tonic-gate caddr_t kviradr, addr; 2098*0Sstevel@tonic-gate caddr_t vsrc; 2099*0Sstevel@tonic-gate ulong_t segoffset, vsoffset; 2100*0Sstevel@tonic-gate int cpycnt; 2101*0Sstevel@tonic-gate 2102*0Sstevel@tonic-gate addr = hp->dmai_ibufp; 2103*0Sstevel@tonic-gate if ((uintptr_t)addr & MMU_PAGEOFFSET) { 2104*0Sstevel@tonic-gate addr = (caddr_t)(((uintptr_t)addr + MMU_PAGEOFFSET) & 2105*0Sstevel@tonic-gate ~MMU_PAGEOFFSET); 2106*0Sstevel@tonic-gate } 2107*0Sstevel@tonic-gate if ((sp->dmais_flags & DMAIS_WINUIB) == 0) 2108*0Sstevel@tonic-gate return (DDI_SUCCESS); 2109*0Sstevel@tonic-gate 2110*0Sstevel@tonic-gate switch ((intptr_t)hp->dmai_minfo) { 2111*0Sstevel@tonic-gate 2112*0Sstevel@tonic-gate case DMAMI_KVADR: 2113*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2114*0Sstevel@tonic-gate 2115*0Sstevel@tonic-gate if (hp->dmai_rflags & DDI_DMA_WRITE) 2116*0Sstevel@tonic-gate /* 2117*0Sstevel@tonic-gate * copy from segment to buffer 2118*0Sstevel@tonic-gate */ 2119*0Sstevel@tonic-gate bcopy(sp->_vdmu._dmais_va, addr, 2120*0Sstevel@tonic-gate sp->dmais_size); 2121*0Sstevel@tonic-gate /* 2122*0Sstevel@tonic-gate * save phys addr of intermediate buffer 2123*0Sstevel@tonic-gate */ 2124*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd = 2125*0Sstevel@tonic-gate ptob64(hat_getpfnum(kas.a_hat, addr)); 2126*0Sstevel@tonic-gate if (type == BIND) { 2127*0Sstevel@tonic-gate sp->dmais_cookie->dmac_laddress = 2128*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd; 2129*0Sstevel@tonic-gate } 2130*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2131*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2132*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2133*0Sstevel@tonic-gate break; 2134*0Sstevel@tonic-gate 2135*0Sstevel@tonic-gate case DMAMI_PAGES: 2136*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2137*0Sstevel@tonic-gate 2138*0Sstevel@tonic-gate if (hp->dmai_rflags & DDI_DMA_WRITE) { 2139*0Sstevel@tonic-gate /* 2140*0Sstevel@tonic-gate * need to mapin page so we can have a 2141*0Sstevel@tonic-gate * virtual address to do copying 2142*0Sstevel@tonic-gate */ 2143*0Sstevel@tonic-gate i86_pp_map(sp->_vdmu._dmais_pp, hp->dmai_kaddr); 2144*0Sstevel@tonic-gate /* 2145*0Sstevel@tonic-gate * copy from segment to buffer 2146*0Sstevel@tonic-gate */ 2147*0Sstevel@tonic-gate bcopy(hp->dmai_kaddr + 2148*0Sstevel@tonic-gate (sp->dmais_ofst & MMU_PAGEOFFSET), 2149*0Sstevel@tonic-gate addr, sp->dmais_size); 2150*0Sstevel@tonic-gate /* 2151*0Sstevel@tonic-gate * need to mapout page 2152*0Sstevel@tonic-gate */ 2153*0Sstevel@tonic-gate hat_unload(kas.a_hat, hp->dmai_kaddr, 2154*0Sstevel@tonic-gate MMU_PAGESIZE, HAT_UNLOAD); 2155*0Sstevel@tonic-gate } 2156*0Sstevel@tonic-gate /* 2157*0Sstevel@tonic-gate * save phys addr of intemediate buffer 2158*0Sstevel@tonic-gate */ 2159*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd = 2160*0Sstevel@tonic-gate ptob64(hat_getpfnum(kas.a_hat, addr)); 2161*0Sstevel@tonic-gate if (type == BIND) { 2162*0Sstevel@tonic-gate sp->dmais_cookie->dmac_laddress = 2163*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd; 2164*0Sstevel@tonic-gate } 2165*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2166*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2167*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2168*0Sstevel@tonic-gate break; 2169*0Sstevel@tonic-gate 2170*0Sstevel@tonic-gate case DMAMI_UVADR: 2171*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2172*0Sstevel@tonic-gate 2173*0Sstevel@tonic-gate if (hp->dmai_rflags & DDI_DMA_WRITE) { 2174*0Sstevel@tonic-gate struct page **pplist; 2175*0Sstevel@tonic-gate segoffset = 0; 2176*0Sstevel@tonic-gate do { 2177*0Sstevel@tonic-gate /* 2178*0Sstevel@tonic-gate * need to mapin page so we can have a 2179*0Sstevel@tonic-gate * virtual address to do copying 2180*0Sstevel@tonic-gate */ 2181*0Sstevel@tonic-gate vsrc = sp->_vdmu._dmais_va + segoffset; 2182*0Sstevel@tonic-gate vsoffset = 2183*0Sstevel@tonic-gate (ulong_t)vsrc & MMU_PAGEOFFSET; 2184*0Sstevel@tonic-gate pplist = hp->dmai_object.dmao_obj. 2185*0Sstevel@tonic-gate virt_obj.v_priv; 2186*0Sstevel@tonic-gate /* 2187*0Sstevel@tonic-gate * check if we have to use the 2188*0Sstevel@tonic-gate * shadow list or the CPU mapping. 2189*0Sstevel@tonic-gate */ 2190*0Sstevel@tonic-gate if (pplist != NULL) { 2191*0Sstevel@tonic-gate ulong_t base, off; 2192*0Sstevel@tonic-gate 2193*0Sstevel@tonic-gate base = (ulong_t)hp->dmai_object. 2194*0Sstevel@tonic-gate dmao_obj.virt_obj.v_addr; 2195*0Sstevel@tonic-gate off = (base & MMU_PAGEOFFSET) + 2196*0Sstevel@tonic-gate (ulong_t)vsrc - base; 2197*0Sstevel@tonic-gate i86_pp_map(pplist[btop(off)], 2198*0Sstevel@tonic-gate hp->dmai_kaddr); 2199*0Sstevel@tonic-gate } else { 2200*0Sstevel@tonic-gate i86_va_map(vsrc, 2201*0Sstevel@tonic-gate hp->dmai_object.dmao_obj. 2202*0Sstevel@tonic-gate virt_obj.v_as, 2203*0Sstevel@tonic-gate hp->dmai_kaddr); 2204*0Sstevel@tonic-gate } 2205*0Sstevel@tonic-gate kviradr = hp->dmai_kaddr + vsoffset; 2206*0Sstevel@tonic-gate cpycnt = sp->dmais_size - segoffset; 2207*0Sstevel@tonic-gate if (vsoffset + cpycnt > MMU_PAGESIZE) 2208*0Sstevel@tonic-gate cpycnt = MMU_PAGESIZE - 2209*0Sstevel@tonic-gate vsoffset; 2210*0Sstevel@tonic-gate /* 2211*0Sstevel@tonic-gate * copy from segment to buffer 2212*0Sstevel@tonic-gate */ 2213*0Sstevel@tonic-gate bcopy(kviradr, addr + segoffset, 2214*0Sstevel@tonic-gate cpycnt); 2215*0Sstevel@tonic-gate /* 2216*0Sstevel@tonic-gate * need to mapout page 2217*0Sstevel@tonic-gate */ 2218*0Sstevel@tonic-gate hat_unload(kas.a_hat, hp->dmai_kaddr, 2219*0Sstevel@tonic-gate MMU_PAGESIZE, HAT_UNLOAD); 2220*0Sstevel@tonic-gate segoffset += cpycnt; 2221*0Sstevel@tonic-gate } while (segoffset < sp->dmais_size); 2222*0Sstevel@tonic-gate } 2223*0Sstevel@tonic-gate /* 2224*0Sstevel@tonic-gate * save phys addr of intermediate buffer 2225*0Sstevel@tonic-gate */ 2226*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd = 2227*0Sstevel@tonic-gate ptob64(hat_getpfnum(kas.a_hat, addr)); 2228*0Sstevel@tonic-gate if (type == BIND) { 2229*0Sstevel@tonic-gate sp->dmais_cookie->dmac_laddress = 2230*0Sstevel@tonic-gate sp->_pdmu._dmais_lpd; 2231*0Sstevel@tonic-gate } 2232*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2233*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2234*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2235*0Sstevel@tonic-gate break; 2236*0Sstevel@tonic-gate 2237*0Sstevel@tonic-gate default: 2238*0Sstevel@tonic-gate cmn_err(CE_WARN, "Invalid dma handle/map info"); 2239*0Sstevel@tonic-gate } 2240*0Sstevel@tonic-gate return (DDI_SUCCESS); 2241*0Sstevel@tonic-gate } 2242*0Sstevel@tonic-gate 2243*0Sstevel@tonic-gate int 2244*0Sstevel@tonic-gate rootnex_io_rdsync(ddi_dma_impl_t *hp) 2245*0Sstevel@tonic-gate { 2246*0Sstevel@tonic-gate impl_dma_segment_t *sp = hp->dmai_wins; 2247*0Sstevel@tonic-gate caddr_t kviradr; 2248*0Sstevel@tonic-gate caddr_t vdest, addr; 2249*0Sstevel@tonic-gate ulong_t segoffset, vdoffset; 2250*0Sstevel@tonic-gate int cpycnt; 2251*0Sstevel@tonic-gate 2252*0Sstevel@tonic-gate addr = hp->dmai_ibufp; 2253*0Sstevel@tonic-gate if ((uintptr_t)addr & MMU_PAGEOFFSET) { 2254*0Sstevel@tonic-gate addr = (caddr_t) 2255*0Sstevel@tonic-gate (((uintptr_t)addr + MMU_PAGEOFFSET) & ~MMU_PAGEOFFSET); 2256*0Sstevel@tonic-gate } 2257*0Sstevel@tonic-gate if (!(sp->dmais_flags & DMAIS_WINUIB) || 2258*0Sstevel@tonic-gate !(hp->dmai_rflags & DDI_DMA_READ)) 2259*0Sstevel@tonic-gate return (DDI_SUCCESS); 2260*0Sstevel@tonic-gate 2261*0Sstevel@tonic-gate switch ((intptr_t)hp->dmai_minfo) { 2262*0Sstevel@tonic-gate 2263*0Sstevel@tonic-gate case DMAMI_KVADR: 2264*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2265*0Sstevel@tonic-gate /* 2266*0Sstevel@tonic-gate * copy from buffer to segment 2267*0Sstevel@tonic-gate */ 2268*0Sstevel@tonic-gate bcopy(addr, sp->_vdmu._dmais_va, sp->dmais_size); 2269*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2270*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2271*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2272*0Sstevel@tonic-gate break; 2273*0Sstevel@tonic-gate 2274*0Sstevel@tonic-gate case DMAMI_PAGES: 2275*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2276*0Sstevel@tonic-gate /* 2277*0Sstevel@tonic-gate * need to mapin page 2278*0Sstevel@tonic-gate */ 2279*0Sstevel@tonic-gate i86_pp_map(sp->_vdmu._dmais_pp, hp->dmai_kaddr); 2280*0Sstevel@tonic-gate /* 2281*0Sstevel@tonic-gate * copy from buffer to segment 2282*0Sstevel@tonic-gate */ 2283*0Sstevel@tonic-gate bcopy(addr, 2284*0Sstevel@tonic-gate (hp->dmai_kaddr + 2285*0Sstevel@tonic-gate (sp->dmais_ofst & MMU_PAGEOFFSET)), 2286*0Sstevel@tonic-gate sp->dmais_size); 2287*0Sstevel@tonic-gate 2288*0Sstevel@tonic-gate /* 2289*0Sstevel@tonic-gate * need to mapout page 2290*0Sstevel@tonic-gate */ 2291*0Sstevel@tonic-gate hat_unload(kas.a_hat, hp->dmai_kaddr, 2292*0Sstevel@tonic-gate MMU_PAGESIZE, HAT_UNLOAD); 2293*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2294*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2295*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2296*0Sstevel@tonic-gate break; 2297*0Sstevel@tonic-gate 2298*0Sstevel@tonic-gate case DMAMI_UVADR: 2299*0Sstevel@tonic-gate do if (sp->dmais_flags & DMAIS_NEEDINTBUF) { 2300*0Sstevel@tonic-gate struct page **pplist; 2301*0Sstevel@tonic-gate segoffset = 0; 2302*0Sstevel@tonic-gate do { 2303*0Sstevel@tonic-gate /* 2304*0Sstevel@tonic-gate * need to map_in user virtual address 2305*0Sstevel@tonic-gate */ 2306*0Sstevel@tonic-gate vdest = sp->_vdmu._dmais_va + segoffset; 2307*0Sstevel@tonic-gate vdoffset = (ulong_t)vdest & MMU_PAGEOFFSET; 2308*0Sstevel@tonic-gate pplist = hp->dmai_object.dmao_obj. 2309*0Sstevel@tonic-gate virt_obj.v_priv; 2310*0Sstevel@tonic-gate /* 2311*0Sstevel@tonic-gate * check if we have to use the 2312*0Sstevel@tonic-gate * shadow list or the CPU mapping. 2313*0Sstevel@tonic-gate */ 2314*0Sstevel@tonic-gate if (pplist != NULL) { 2315*0Sstevel@tonic-gate ulong_t base, off; 2316*0Sstevel@tonic-gate 2317*0Sstevel@tonic-gate base = (ulong_t)hp->dmai_object. 2318*0Sstevel@tonic-gate dmao_obj.virt_obj.v_addr; 2319*0Sstevel@tonic-gate off = (base & MMU_PAGEOFFSET) + 2320*0Sstevel@tonic-gate (ulong_t)vdest - base; 2321*0Sstevel@tonic-gate i86_pp_map(pplist[btop(off)], 2322*0Sstevel@tonic-gate hp->dmai_kaddr); 2323*0Sstevel@tonic-gate } else { 2324*0Sstevel@tonic-gate i86_va_map(vdest, 2325*0Sstevel@tonic-gate hp->dmai_object.dmao_obj. 2326*0Sstevel@tonic-gate virt_obj.v_as, 2327*0Sstevel@tonic-gate hp->dmai_kaddr); 2328*0Sstevel@tonic-gate } 2329*0Sstevel@tonic-gate kviradr = hp->dmai_kaddr + vdoffset; 2330*0Sstevel@tonic-gate cpycnt = sp->dmais_size - segoffset; 2331*0Sstevel@tonic-gate if (vdoffset + cpycnt > MMU_PAGESIZE) 2332*0Sstevel@tonic-gate cpycnt = MMU_PAGESIZE - vdoffset; 2333*0Sstevel@tonic-gate /* 2334*0Sstevel@tonic-gate * copy from buffer to segment 2335*0Sstevel@tonic-gate */ 2336*0Sstevel@tonic-gate bcopy(addr + segoffset, kviradr, cpycnt); 2337*0Sstevel@tonic-gate /* 2338*0Sstevel@tonic-gate * need to map_out page 2339*0Sstevel@tonic-gate */ 2340*0Sstevel@tonic-gate hat_unload(kas.a_hat, hp->dmai_kaddr, 2341*0Sstevel@tonic-gate MMU_PAGESIZE, HAT_UNLOAD); 2342*0Sstevel@tonic-gate segoffset += cpycnt; 2343*0Sstevel@tonic-gate } while (segoffset < sp->dmais_size); 2344*0Sstevel@tonic-gate addr += MMU_PAGESIZE; 2345*0Sstevel@tonic-gate } while (!(sp->dmais_flags & DMAIS_WINEND) && 2346*0Sstevel@tonic-gate (sp = sp->dmais_link)); 2347*0Sstevel@tonic-gate break; 2348*0Sstevel@tonic-gate 2349*0Sstevel@tonic-gate default: 2350*0Sstevel@tonic-gate cmn_err(CE_WARN, "Invalid dma handle/map info"); 2351*0Sstevel@tonic-gate } 2352*0Sstevel@tonic-gate return (DDI_SUCCESS); 2353*0Sstevel@tonic-gate } 2354*0Sstevel@tonic-gate 2355*0Sstevel@tonic-gate static int 2356*0Sstevel@tonic-gate rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 2357*0Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 2358*0Sstevel@tonic-gate off_t *offp, size_t *lenp, 2359*0Sstevel@tonic-gate caddr_t *objpp, uint_t cache_flags) 2360*0Sstevel@tonic-gate { 2361*0Sstevel@tonic-gate ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 2362*0Sstevel@tonic-gate impl_dma_segment_t *sp = (impl_dma_segment_t *)lenp; 2363*0Sstevel@tonic-gate impl_dma_segment_t *wp = (impl_dma_segment_t *)offp; 2364*0Sstevel@tonic-gate #if !defined(__amd64) 2365*0Sstevel@tonic-gate ddi_dma_cookie_t *cp; 2366*0Sstevel@tonic-gate #endif 2367*0Sstevel@tonic-gate int rval = DDI_SUCCESS; 2368*0Sstevel@tonic-gate 2369*0Sstevel@tonic-gate #ifdef lint 2370*0Sstevel@tonic-gate dip = dip; 2371*0Sstevel@tonic-gate rdip = rdip; 2372*0Sstevel@tonic-gate #endif 2373*0Sstevel@tonic-gate 2374*0Sstevel@tonic-gate DMAPRINT(("io_mctl: handle %p ", (void *)hp)); 2375*0Sstevel@tonic-gate 2376*0Sstevel@tonic-gate switch (request) { 2377*0Sstevel@tonic-gate 2378*0Sstevel@tonic-gate case DDI_DMA_SEGTOC: 2379*0Sstevel@tonic-gate #if defined(__amd64) 2380*0Sstevel@tonic-gate /* 2381*0Sstevel@tonic-gate * ddi_dma_segtocookie(9F) is Obsolete, and the whole 2382*0Sstevel@tonic-gate * passing-the-pointer-through-the-cache-flags thing just 2383*0Sstevel@tonic-gate * doesn't work when pointers are 64-bit and cache_flags 2384*0Sstevel@tonic-gate * are 32-bit. 2385*0Sstevel@tonic-gate */ 2386*0Sstevel@tonic-gate DMAPRINT(("stoc invoked but not implemented.\n")); 2387*0Sstevel@tonic-gate return (DDI_FAILURE); 2388*0Sstevel@tonic-gate #else 2389*0Sstevel@tonic-gate /* return device specific dma cookie for segment */ 2390*0Sstevel@tonic-gate sp = (impl_dma_segment_t *)(uintptr_t)cache_flags; 2391*0Sstevel@tonic-gate if (!sp) { 2392*0Sstevel@tonic-gate DMAPRINT(("stoc segment %p end\n", (void *)sp)); 2393*0Sstevel@tonic-gate return (DDI_FAILURE); 2394*0Sstevel@tonic-gate } 2395*0Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objpp; 2396*0Sstevel@tonic-gate 2397*0Sstevel@tonic-gate /* 2398*0Sstevel@tonic-gate * use phys addr of actual buffer or intermediate buffer 2399*0Sstevel@tonic-gate */ 2400*0Sstevel@tonic-gate cp->dmac_laddress = sp->_pdmu._dmais_lpd; 2401*0Sstevel@tonic-gate 2402*0Sstevel@tonic-gate DMAPRINT(("stoc segment %p mapping %lx size %lx\n", 2403*0Sstevel@tonic-gate (void *)sp, (ulong_t)sp->_vdmu._dmais_va, sp->dmais_size)); 2404*0Sstevel@tonic-gate 2405*0Sstevel@tonic-gate cp->dmac_type = (ulong_t)sp; 2406*0Sstevel@tonic-gate *lenp = cp->dmac_size = sp->dmais_size; 2407*0Sstevel@tonic-gate *offp = sp->dmais_ofst; 2408*0Sstevel@tonic-gate return (DDI_SUCCESS); 2409*0Sstevel@tonic-gate #endif 2410*0Sstevel@tonic-gate 2411*0Sstevel@tonic-gate case DDI_DMA_NEXTSEG: /* get next DMA segment */ 2412*0Sstevel@tonic-gate ASSERT(wp->dmais_flags & DMAIS_WINSTRT); 2413*0Sstevel@tonic-gate if (wp != hp->dmai_wins) { 2414*0Sstevel@tonic-gate DMAPRINT(("nxseg: not current window %p\n", 2415*0Sstevel@tonic-gate (void *)wp)); 2416*0Sstevel@tonic-gate return (DDI_DMA_STALE); 2417*0Sstevel@tonic-gate } 2418*0Sstevel@tonic-gate if (!sp) { 2419*0Sstevel@tonic-gate /* 2420*0Sstevel@tonic-gate * reset to first segment in current window 2421*0Sstevel@tonic-gate */ 2422*0Sstevel@tonic-gate *objpp = (caddr_t)wp; 2423*0Sstevel@tonic-gate } else { 2424*0Sstevel@tonic-gate if (sp->dmais_flags & DMAIS_WINEND) { 2425*0Sstevel@tonic-gate DMAPRINT(("nxseg: seg %p eow\n", (void *)sp)); 2426*0Sstevel@tonic-gate return (DDI_DMA_DONE); 2427*0Sstevel@tonic-gate } 2428*0Sstevel@tonic-gate /* check if segment is really in window */ 2429*0Sstevel@tonic-gate ASSERT((sp->dmais_flags & DMAIS_WINSTRT) && sp == wp || 2430*0Sstevel@tonic-gate !(sp->dmais_flags & DMAIS_WINSTRT) && 2431*0Sstevel@tonic-gate sp->_win._dmais_cur == wp); 2432*0Sstevel@tonic-gate *objpp = (caddr_t)sp->dmais_link; 2433*0Sstevel@tonic-gate } 2434*0Sstevel@tonic-gate DMAPRINT(("nxseg: new seg %p\n", (void *)*objpp)); 2435*0Sstevel@tonic-gate return (DDI_SUCCESS); 2436*0Sstevel@tonic-gate 2437*0Sstevel@tonic-gate case DDI_DMA_NEXTWIN: /* get next DMA window */ 2438*0Sstevel@tonic-gate if (hp->dmai_wins && hp->dmai_ibufp) 2439*0Sstevel@tonic-gate /* 2440*0Sstevel@tonic-gate * do implied sync on current window 2441*0Sstevel@tonic-gate */ 2442*0Sstevel@tonic-gate (void) rootnex_io_rdsync(hp); 2443*0Sstevel@tonic-gate if (!wp) { 2444*0Sstevel@tonic-gate /* 2445*0Sstevel@tonic-gate * reset to (first segment of) first window 2446*0Sstevel@tonic-gate */ 2447*0Sstevel@tonic-gate *objpp = (caddr_t)hp->dmai_hds; 2448*0Sstevel@tonic-gate DMAPRINT(("nxwin: first win %p\n", (void *)*objpp)); 2449*0Sstevel@tonic-gate } else { 2450*0Sstevel@tonic-gate ASSERT(wp->dmais_flags & DMAIS_WINSTRT); 2451*0Sstevel@tonic-gate if (wp != hp->dmai_wins) { 2452*0Sstevel@tonic-gate DMAPRINT(("nxwin: win %p not current\n", 2453*0Sstevel@tonic-gate (void *)wp)); 2454*0Sstevel@tonic-gate return (DDI_DMA_STALE); 2455*0Sstevel@tonic-gate } 2456*0Sstevel@tonic-gate if (wp->_win._dmais_nex == 0) { 2457*0Sstevel@tonic-gate DMAPRINT(("nxwin: win %p end\n", (void *)wp)); 2458*0Sstevel@tonic-gate return (DDI_DMA_DONE); 2459*0Sstevel@tonic-gate } 2460*0Sstevel@tonic-gate *objpp = (caddr_t)wp->_win._dmais_nex; 2461*0Sstevel@tonic-gate DMAPRINT(("nxwin: new win %p\n", (void *)*objpp)); 2462*0Sstevel@tonic-gate } 2463*0Sstevel@tonic-gate hp->dmai_wins = (impl_dma_segment_t *)*objpp; 2464*0Sstevel@tonic-gate if (hp->dmai_ibufp) 2465*0Sstevel@tonic-gate return (rootnex_io_wtsync(hp, MAP)); 2466*0Sstevel@tonic-gate return (DDI_SUCCESS); 2467*0Sstevel@tonic-gate 2468*0Sstevel@tonic-gate case DDI_DMA_FREE: 2469*0Sstevel@tonic-gate DMAPRINT(("free handle\n")); 2470*0Sstevel@tonic-gate if (hp->dmai_ibufp) { 2471*0Sstevel@tonic-gate rval = rootnex_io_rdsync(hp); 2472*0Sstevel@tonic-gate ddi_mem_free(hp->dmai_ibufp); 2473*0Sstevel@tonic-gate } 2474*0Sstevel@tonic-gate if (hp->dmai_kaddr) 2475*0Sstevel@tonic-gate vmem_free(heap_arena, hp->dmai_kaddr, PAGESIZE); 2476*0Sstevel@tonic-gate kmem_free(hp, hp->dmai_kmsize); 2477*0Sstevel@tonic-gate if (dvma_call_list_id) 2478*0Sstevel@tonic-gate ddi_run_callback(&dvma_call_list_id); 2479*0Sstevel@tonic-gate break; 2480*0Sstevel@tonic-gate 2481*0Sstevel@tonic-gate case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 2482*0Sstevel@tonic-gate DMAPRINT(("iopb alloc\n")); 2483*0Sstevel@tonic-gate rval = i_ddi_mem_alloc_lim(rdip, (ddi_dma_lim_t *)offp, 2484*0Sstevel@tonic-gate *lenp, 0, 0, 0, objpp, NULL, NULL); 2485*0Sstevel@tonic-gate break; 2486*0Sstevel@tonic-gate 2487*0Sstevel@tonic-gate case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 2488*0Sstevel@tonic-gate DMAPRINT(("mem alloc\n")); 2489*0Sstevel@tonic-gate rval = i_ddi_mem_alloc_lim(rdip, (ddi_dma_lim_t *)offp, 2490*0Sstevel@tonic-gate *lenp, cache_flags, 1, 0, objpp, (uint_t *)handle, NULL); 2491*0Sstevel@tonic-gate break; 2492*0Sstevel@tonic-gate 2493*0Sstevel@tonic-gate case DDI_DMA_KVADDR: 2494*0Sstevel@tonic-gate DMAPRINT(("kvaddr of phys mapping\n")); 2495*0Sstevel@tonic-gate return (DDI_FAILURE); 2496*0Sstevel@tonic-gate 2497*0Sstevel@tonic-gate case DDI_DMA_GETERR: 2498*0Sstevel@tonic-gate DMAPRINT(("geterr\n")); 2499*0Sstevel@tonic-gate rval = DDI_FAILURE; 2500*0Sstevel@tonic-gate break; 2501*0Sstevel@tonic-gate 2502*0Sstevel@tonic-gate case DDI_DMA_COFF: 2503*0Sstevel@tonic-gate DMAPRINT(("coff off %p mapping %llx size %lx\n", 2504*0Sstevel@tonic-gate (void *)*objpp, 2505*0Sstevel@tonic-gate (unsigned long long)hp->dmai_wins->_pdmu._dmais_lpd, 2506*0Sstevel@tonic-gate hp->dmai_wins->dmais_size)); 2507*0Sstevel@tonic-gate rval = DDI_FAILURE; 2508*0Sstevel@tonic-gate break; 2509*0Sstevel@tonic-gate 2510*0Sstevel@tonic-gate default: 2511*0Sstevel@tonic-gate DMAPRINT(("unknown 0x%x\n", request)); 2512*0Sstevel@tonic-gate return (DDI_FAILURE); 2513*0Sstevel@tonic-gate } 2514*0Sstevel@tonic-gate return (rval); 2515*0Sstevel@tonic-gate } 2516*0Sstevel@tonic-gate 2517*0Sstevel@tonic-gate /* 2518*0Sstevel@tonic-gate * Root nexus ctl functions 2519*0Sstevel@tonic-gate */ 2520*0Sstevel@tonic-gate #define REPORTDEV_BUFSIZE 1024 2521*0Sstevel@tonic-gate 2522*0Sstevel@tonic-gate static int 2523*0Sstevel@tonic-gate rootnex_ctl_reportdev(dev_info_t *dev) 2524*0Sstevel@tonic-gate { 2525*0Sstevel@tonic-gate int i, n, len, f_len = 0; 2526*0Sstevel@tonic-gate char *buf; 2527*0Sstevel@tonic-gate 2528*0Sstevel@tonic-gate buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 2529*0Sstevel@tonic-gate f_len += snprintf(buf, REPORTDEV_BUFSIZE, 2530*0Sstevel@tonic-gate "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 2531*0Sstevel@tonic-gate len = strlen(buf); 2532*0Sstevel@tonic-gate 2533*0Sstevel@tonic-gate for (i = 0; i < sparc_pd_getnreg(dev); i++) { 2534*0Sstevel@tonic-gate 2535*0Sstevel@tonic-gate struct regspec *rp = sparc_pd_getreg(dev, i); 2536*0Sstevel@tonic-gate 2537*0Sstevel@tonic-gate if (i == 0) 2538*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2539*0Sstevel@tonic-gate ": "); 2540*0Sstevel@tonic-gate else 2541*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2542*0Sstevel@tonic-gate " and "); 2543*0Sstevel@tonic-gate len = strlen(buf); 2544*0Sstevel@tonic-gate 2545*0Sstevel@tonic-gate switch (rp->regspec_bustype) { 2546*0Sstevel@tonic-gate 2547*0Sstevel@tonic-gate case BTEISA: 2548*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2549*0Sstevel@tonic-gate "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 2550*0Sstevel@tonic-gate break; 2551*0Sstevel@tonic-gate 2552*0Sstevel@tonic-gate case BTISA: 2553*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2554*0Sstevel@tonic-gate "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 2555*0Sstevel@tonic-gate break; 2556*0Sstevel@tonic-gate 2557*0Sstevel@tonic-gate default: 2558*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2559*0Sstevel@tonic-gate "space %x offset %x", 2560*0Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr); 2561*0Sstevel@tonic-gate break; 2562*0Sstevel@tonic-gate } 2563*0Sstevel@tonic-gate len = strlen(buf); 2564*0Sstevel@tonic-gate } 2565*0Sstevel@tonic-gate for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 2566*0Sstevel@tonic-gate int pri; 2567*0Sstevel@tonic-gate 2568*0Sstevel@tonic-gate if (i != 0) { 2569*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2570*0Sstevel@tonic-gate ","); 2571*0Sstevel@tonic-gate len = strlen(buf); 2572*0Sstevel@tonic-gate } 2573*0Sstevel@tonic-gate pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 2574*0Sstevel@tonic-gate f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 2575*0Sstevel@tonic-gate " sparc ipl %d", pri); 2576*0Sstevel@tonic-gate len = strlen(buf); 2577*0Sstevel@tonic-gate } 2578*0Sstevel@tonic-gate #ifdef DEBUG 2579*0Sstevel@tonic-gate if (f_len + 1 >= REPORTDEV_BUFSIZE) { 2580*0Sstevel@tonic-gate cmn_err(CE_NOTE, "next message is truncated: " 2581*0Sstevel@tonic-gate "printed length 1024, real length %d", f_len); 2582*0Sstevel@tonic-gate } 2583*0Sstevel@tonic-gate #endif /* DEBUG */ 2584*0Sstevel@tonic-gate cmn_err(CE_CONT, "?%s\n", buf); 2585*0Sstevel@tonic-gate kmem_free(buf, REPORTDEV_BUFSIZE); 2586*0Sstevel@tonic-gate return (DDI_SUCCESS); 2587*0Sstevel@tonic-gate } 2588*0Sstevel@tonic-gate 2589*0Sstevel@tonic-gate /* 2590*0Sstevel@tonic-gate * For the x86 rootnexus, we're prepared to claim that the interrupt string 2591*0Sstevel@tonic-gate * is in the form of a list of <ipl,vec> specifications. 2592*0Sstevel@tonic-gate */ 2593*0Sstevel@tonic-gate 2594*0Sstevel@tonic-gate #define VEC_MIN 1 2595*0Sstevel@tonic-gate #define VEC_MAX 255 2596*0Sstevel@tonic-gate static int 2597*0Sstevel@tonic-gate rootnex_xlate_intrs(dev_info_t *dip, dev_info_t *rdip, int *in, 2598*0Sstevel@tonic-gate struct ddi_parent_private_data *pdptr) 2599*0Sstevel@tonic-gate { 2600*0Sstevel@tonic-gate size_t size; 2601*0Sstevel@tonic-gate int n; 2602*0Sstevel@tonic-gate struct intrspec *new; 2603*0Sstevel@tonic-gate caddr_t got_prop; 2604*0Sstevel@tonic-gate int *inpri; 2605*0Sstevel@tonic-gate int got_len; 2606*0Sstevel@tonic-gate extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 2607*0Sstevel@tonic-gate 2608*0Sstevel@tonic-gate static char bad_intr_fmt[] = 2609*0Sstevel@tonic-gate "rootnex: bad interrupt spec from %s%d - ipl %d, irq %d\n"; 2610*0Sstevel@tonic-gate 2611*0Sstevel@tonic-gate #ifdef lint 2612*0Sstevel@tonic-gate dip = dip; 2613*0Sstevel@tonic-gate #endif 2614*0Sstevel@tonic-gate /* 2615*0Sstevel@tonic-gate * determine if the driver is expecting the new style "interrupts" 2616*0Sstevel@tonic-gate * property which just contains the IRQ, or the old style which 2617*0Sstevel@tonic-gate * contains pairs of <IPL,IRQ>. if it is the new style, we always 2618*0Sstevel@tonic-gate * assign IPL 5 unless an "interrupt-priorities" property exists. 2619*0Sstevel@tonic-gate * in that case, the "interrupt-priorities" property contains the 2620*0Sstevel@tonic-gate * IPL values that match, one for one, the IRQ values in the 2621*0Sstevel@tonic-gate * "interrupts" property. 2622*0Sstevel@tonic-gate */ 2623*0Sstevel@tonic-gate inpri = NULL; 2624*0Sstevel@tonic-gate if ((ddi_getprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 2625*0Sstevel@tonic-gate "ignore-hardware-nodes", -1) != -1) || 2626*0Sstevel@tonic-gate ignore_hardware_nodes) { 2627*0Sstevel@tonic-gate /* the old style "interrupts" property... */ 2628*0Sstevel@tonic-gate 2629*0Sstevel@tonic-gate /* 2630*0Sstevel@tonic-gate * The list consists of <ipl,vec> elements 2631*0Sstevel@tonic-gate */ 2632*0Sstevel@tonic-gate if ((n = (*in++ >> 1)) < 1) 2633*0Sstevel@tonic-gate return (DDI_FAILURE); 2634*0Sstevel@tonic-gate 2635*0Sstevel@tonic-gate pdptr->par_nintr = n; 2636*0Sstevel@tonic-gate size = n * sizeof (struct intrspec); 2637*0Sstevel@tonic-gate new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP); 2638*0Sstevel@tonic-gate 2639*0Sstevel@tonic-gate while (n--) { 2640*0Sstevel@tonic-gate int level = *in++; 2641*0Sstevel@tonic-gate int vec = *in++; 2642*0Sstevel@tonic-gate 2643*0Sstevel@tonic-gate if (level < 1 || level > MAXIPL || 2644*0Sstevel@tonic-gate vec < VEC_MIN || vec > VEC_MAX) { 2645*0Sstevel@tonic-gate cmn_err(CE_CONT, bad_intr_fmt, 2646*0Sstevel@tonic-gate DEVI(rdip)->devi_name, 2647*0Sstevel@tonic-gate DEVI(rdip)->devi_instance, level, vec); 2648*0Sstevel@tonic-gate goto broken; 2649*0Sstevel@tonic-gate } 2650*0Sstevel@tonic-gate new->intrspec_pri = level; 2651*0Sstevel@tonic-gate if (vec != 2) 2652*0Sstevel@tonic-gate new->intrspec_vec = vec; 2653*0Sstevel@tonic-gate else 2654*0Sstevel@tonic-gate /* 2655*0Sstevel@tonic-gate * irq 2 on the PC bus is tied to irq 9 2656*0Sstevel@tonic-gate * on ISA, EISA and MicroChannel 2657*0Sstevel@tonic-gate */ 2658*0Sstevel@tonic-gate new->intrspec_vec = 9; 2659*0Sstevel@tonic-gate new++; 2660*0Sstevel@tonic-gate } 2661*0Sstevel@tonic-gate 2662*0Sstevel@tonic-gate return (DDI_SUCCESS); 2663*0Sstevel@tonic-gate } else { 2664*0Sstevel@tonic-gate /* the new style "interrupts" property... */ 2665*0Sstevel@tonic-gate 2666*0Sstevel@tonic-gate /* 2667*0Sstevel@tonic-gate * The list consists of <vec> elements 2668*0Sstevel@tonic-gate */ 2669*0Sstevel@tonic-gate if ((n = (*in++)) < 1) 2670*0Sstevel@tonic-gate return (DDI_FAILURE); 2671*0Sstevel@tonic-gate 2672*0Sstevel@tonic-gate pdptr->par_nintr = n; 2673*0Sstevel@tonic-gate size = n * sizeof (struct intrspec); 2674*0Sstevel@tonic-gate new = pdptr->par_intr = kmem_zalloc(size, KM_SLEEP); 2675*0Sstevel@tonic-gate 2676*0Sstevel@tonic-gate /* XXX check for "interrupt-priorities" property... */ 2677*0Sstevel@tonic-gate if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 2678*0Sstevel@tonic-gate "interrupt-priorities", (caddr_t)&got_prop, &got_len) 2679*0Sstevel@tonic-gate == DDI_PROP_SUCCESS) { 2680*0Sstevel@tonic-gate if (n != (got_len / sizeof (int))) { 2681*0Sstevel@tonic-gate cmn_err(CE_CONT, 2682*0Sstevel@tonic-gate "rootnex: bad interrupt-priorities length" 2683*0Sstevel@tonic-gate " from %s%d: expected %d, got %d\n", 2684*0Sstevel@tonic-gate DEVI(rdip)->devi_name, 2685*0Sstevel@tonic-gate DEVI(rdip)->devi_instance, n, 2686*0Sstevel@tonic-gate (int)(got_len / sizeof (int))); 2687*0Sstevel@tonic-gate goto broken; 2688*0Sstevel@tonic-gate } 2689*0Sstevel@tonic-gate inpri = (int *)got_prop; 2690*0Sstevel@tonic-gate } 2691*0Sstevel@tonic-gate 2692*0Sstevel@tonic-gate while (n--) { 2693*0Sstevel@tonic-gate int level; 2694*0Sstevel@tonic-gate int vec = *in++; 2695*0Sstevel@tonic-gate 2696*0Sstevel@tonic-gate if (inpri == NULL) 2697*0Sstevel@tonic-gate level = 5; 2698*0Sstevel@tonic-gate else 2699*0Sstevel@tonic-gate level = *inpri++; 2700*0Sstevel@tonic-gate 2701*0Sstevel@tonic-gate if (level < 1 || level > MAXIPL || 2702*0Sstevel@tonic-gate vec < VEC_MIN || vec > VEC_MAX) { 2703*0Sstevel@tonic-gate cmn_err(CE_CONT, bad_intr_fmt, 2704*0Sstevel@tonic-gate DEVI(rdip)->devi_name, 2705*0Sstevel@tonic-gate DEVI(rdip)->devi_instance, level, vec); 2706*0Sstevel@tonic-gate goto broken; 2707*0Sstevel@tonic-gate } 2708*0Sstevel@tonic-gate new->intrspec_pri = level; 2709*0Sstevel@tonic-gate if (vec != 2) 2710*0Sstevel@tonic-gate new->intrspec_vec = vec; 2711*0Sstevel@tonic-gate else 2712*0Sstevel@tonic-gate /* 2713*0Sstevel@tonic-gate * irq 2 on the PC bus is tied to irq 9 2714*0Sstevel@tonic-gate * on ISA, EISA and MicroChannel 2715*0Sstevel@tonic-gate */ 2716*0Sstevel@tonic-gate new->intrspec_vec = 9; 2717*0Sstevel@tonic-gate new++; 2718*0Sstevel@tonic-gate } 2719*0Sstevel@tonic-gate 2720*0Sstevel@tonic-gate if (inpri != NULL) 2721*0Sstevel@tonic-gate kmem_free(got_prop, got_len); 2722*0Sstevel@tonic-gate return (DDI_SUCCESS); 2723*0Sstevel@tonic-gate } 2724*0Sstevel@tonic-gate 2725*0Sstevel@tonic-gate broken: 2726*0Sstevel@tonic-gate kmem_free(pdptr->par_intr, size); 2727*0Sstevel@tonic-gate pdptr->par_intr = NULL; 2728*0Sstevel@tonic-gate pdptr->par_nintr = 0; 2729*0Sstevel@tonic-gate if (inpri != NULL) 2730*0Sstevel@tonic-gate kmem_free(got_prop, got_len); 2731*0Sstevel@tonic-gate return (DDI_FAILURE); 2732*0Sstevel@tonic-gate } 2733*0Sstevel@tonic-gate 2734*0Sstevel@tonic-gate /*ARGSUSED*/ 2735*0Sstevel@tonic-gate static int 2736*0Sstevel@tonic-gate rootnex_ctl_children(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 2737*0Sstevel@tonic-gate dev_info_t *child) 2738*0Sstevel@tonic-gate { 2739*0Sstevel@tonic-gate extern int impl_ddi_sunbus_initchild(dev_info_t *); 2740*0Sstevel@tonic-gate extern void impl_ddi_sunbus_removechild(dev_info_t *); 2741*0Sstevel@tonic-gate 2742*0Sstevel@tonic-gate switch (ctlop) { 2743*0Sstevel@tonic-gate case DDI_CTLOPS_INITCHILD: 2744*0Sstevel@tonic-gate return (impl_ddi_sunbus_initchild(child)); 2745*0Sstevel@tonic-gate 2746*0Sstevel@tonic-gate case DDI_CTLOPS_UNINITCHILD: 2747*0Sstevel@tonic-gate impl_ddi_sunbus_removechild(child); 2748*0Sstevel@tonic-gate return (DDI_SUCCESS); 2749*0Sstevel@tonic-gate default: 2750*0Sstevel@tonic-gate return (DDI_FAILURE); 2751*0Sstevel@tonic-gate } 2752*0Sstevel@tonic-gate } 2753*0Sstevel@tonic-gate 2754*0Sstevel@tonic-gate 2755*0Sstevel@tonic-gate static int 2756*0Sstevel@tonic-gate rootnex_ctlops_poke(peekpoke_ctlops_t *in_args) 2757*0Sstevel@tonic-gate { 2758*0Sstevel@tonic-gate int err = DDI_SUCCESS; 2759*0Sstevel@tonic-gate on_trap_data_t otd; 2760*0Sstevel@tonic-gate 2761*0Sstevel@tonic-gate /* Cautious access not supported. */ 2762*0Sstevel@tonic-gate if (in_args->handle != NULL) 2763*0Sstevel@tonic-gate return (DDI_FAILURE); 2764*0Sstevel@tonic-gate 2765*0Sstevel@tonic-gate mutex_enter(&pokefault_mutex); 2766*0Sstevel@tonic-gate pokefault = -1; 2767*0Sstevel@tonic-gate 2768*0Sstevel@tonic-gate /* Set up protected environment. */ 2769*0Sstevel@tonic-gate if (!on_trap(&otd, OT_DATA_ACCESS)) { 2770*0Sstevel@tonic-gate switch (in_args->size) { 2771*0Sstevel@tonic-gate case sizeof (uint8_t): 2772*0Sstevel@tonic-gate *(uint8_t *)in_args->dev_addr = 2773*0Sstevel@tonic-gate *(uint8_t *)in_args->host_addr; 2774*0Sstevel@tonic-gate break; 2775*0Sstevel@tonic-gate 2776*0Sstevel@tonic-gate case sizeof (uint16_t): 2777*0Sstevel@tonic-gate *(uint16_t *)in_args->dev_addr = 2778*0Sstevel@tonic-gate *(uint16_t *)in_args->host_addr; 2779*0Sstevel@tonic-gate break; 2780*0Sstevel@tonic-gate 2781*0Sstevel@tonic-gate case sizeof (uint32_t): 2782*0Sstevel@tonic-gate *(uint32_t *)in_args->dev_addr = 2783*0Sstevel@tonic-gate *(uint32_t *)in_args->host_addr; 2784*0Sstevel@tonic-gate break; 2785*0Sstevel@tonic-gate 2786*0Sstevel@tonic-gate case sizeof (uint64_t): 2787*0Sstevel@tonic-gate *(uint64_t *)in_args->dev_addr = 2788*0Sstevel@tonic-gate *(uint64_t *)in_args->host_addr; 2789*0Sstevel@tonic-gate break; 2790*0Sstevel@tonic-gate 2791*0Sstevel@tonic-gate default: 2792*0Sstevel@tonic-gate err = DDI_FAILURE; 2793*0Sstevel@tonic-gate break; 2794*0Sstevel@tonic-gate } 2795*0Sstevel@tonic-gate } else 2796*0Sstevel@tonic-gate err = DDI_FAILURE; 2797*0Sstevel@tonic-gate 2798*0Sstevel@tonic-gate /* Take down protected environment. */ 2799*0Sstevel@tonic-gate no_trap(); 2800*0Sstevel@tonic-gate 2801*0Sstevel@tonic-gate pokefault = 0; 2802*0Sstevel@tonic-gate mutex_exit(&pokefault_mutex); 2803*0Sstevel@tonic-gate 2804*0Sstevel@tonic-gate return (err); 2805*0Sstevel@tonic-gate } 2806*0Sstevel@tonic-gate 2807*0Sstevel@tonic-gate 2808*0Sstevel@tonic-gate static int 2809*0Sstevel@tonic-gate rootnex_ctlops_peek(peekpoke_ctlops_t *in_args, void *result) 2810*0Sstevel@tonic-gate { 2811*0Sstevel@tonic-gate int err = DDI_SUCCESS; 2812*0Sstevel@tonic-gate on_trap_data_t otd; 2813*0Sstevel@tonic-gate 2814*0Sstevel@tonic-gate /* Cautious access not supported. */ 2815*0Sstevel@tonic-gate if (in_args->handle != NULL) 2816*0Sstevel@tonic-gate return (DDI_FAILURE); 2817*0Sstevel@tonic-gate 2818*0Sstevel@tonic-gate if (!on_trap(&otd, OT_DATA_ACCESS)) { 2819*0Sstevel@tonic-gate switch (in_args->size) { 2820*0Sstevel@tonic-gate case sizeof (uint8_t): 2821*0Sstevel@tonic-gate *(uint8_t *)in_args->host_addr = 2822*0Sstevel@tonic-gate *(uint8_t *)in_args->dev_addr; 2823*0Sstevel@tonic-gate break; 2824*0Sstevel@tonic-gate 2825*0Sstevel@tonic-gate case sizeof (uint16_t): 2826*0Sstevel@tonic-gate *(uint16_t *)in_args->host_addr = 2827*0Sstevel@tonic-gate *(uint16_t *)in_args->dev_addr; 2828*0Sstevel@tonic-gate break; 2829*0Sstevel@tonic-gate 2830*0Sstevel@tonic-gate case sizeof (uint32_t): 2831*0Sstevel@tonic-gate *(uint32_t *)in_args->host_addr = 2832*0Sstevel@tonic-gate *(uint32_t *)in_args->dev_addr; 2833*0Sstevel@tonic-gate break; 2834*0Sstevel@tonic-gate 2835*0Sstevel@tonic-gate case sizeof (uint64_t): 2836*0Sstevel@tonic-gate *(uint64_t *)in_args->host_addr = 2837*0Sstevel@tonic-gate *(uint64_t *)in_args->dev_addr; 2838*0Sstevel@tonic-gate break; 2839*0Sstevel@tonic-gate 2840*0Sstevel@tonic-gate default: 2841*0Sstevel@tonic-gate err = DDI_FAILURE; 2842*0Sstevel@tonic-gate break; 2843*0Sstevel@tonic-gate } 2844*0Sstevel@tonic-gate result = (void *)in_args->host_addr; 2845*0Sstevel@tonic-gate } else 2846*0Sstevel@tonic-gate err = DDI_FAILURE; 2847*0Sstevel@tonic-gate 2848*0Sstevel@tonic-gate no_trap(); 2849*0Sstevel@tonic-gate return (err); 2850*0Sstevel@tonic-gate } 2851*0Sstevel@tonic-gate 2852*0Sstevel@tonic-gate static int 2853*0Sstevel@tonic-gate rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 2854*0Sstevel@tonic-gate ddi_ctl_enum_t ctlop, void *arg, void *result) 2855*0Sstevel@tonic-gate { 2856*0Sstevel@tonic-gate int n, *ptr; 2857*0Sstevel@tonic-gate struct ddi_parent_private_data *pdp; 2858*0Sstevel@tonic-gate 2859*0Sstevel@tonic-gate static boolean_t reserved_msg_printed = B_FALSE; 2860*0Sstevel@tonic-gate 2861*0Sstevel@tonic-gate switch (ctlop) { 2862*0Sstevel@tonic-gate case DDI_CTLOPS_DMAPMAPC: 2863*0Sstevel@tonic-gate /* 2864*0Sstevel@tonic-gate * Return 'partial' to indicate that dma mapping 2865*0Sstevel@tonic-gate * has to be done in the main MMU. 2866*0Sstevel@tonic-gate */ 2867*0Sstevel@tonic-gate return (DDI_DMA_PARTIAL); 2868*0Sstevel@tonic-gate 2869*0Sstevel@tonic-gate case DDI_CTLOPS_BTOP: 2870*0Sstevel@tonic-gate /* 2871*0Sstevel@tonic-gate * Convert byte count input to physical page units. 2872*0Sstevel@tonic-gate * (byte counts that are not a page-size multiple 2873*0Sstevel@tonic-gate * are rounded down) 2874*0Sstevel@tonic-gate */ 2875*0Sstevel@tonic-gate *(ulong_t *)result = btop(*(ulong_t *)arg); 2876*0Sstevel@tonic-gate return (DDI_SUCCESS); 2877*0Sstevel@tonic-gate 2878*0Sstevel@tonic-gate case DDI_CTLOPS_PTOB: 2879*0Sstevel@tonic-gate /* 2880*0Sstevel@tonic-gate * Convert size in physical pages to bytes 2881*0Sstevel@tonic-gate */ 2882*0Sstevel@tonic-gate *(ulong_t *)result = ptob(*(ulong_t *)arg); 2883*0Sstevel@tonic-gate return (DDI_SUCCESS); 2884*0Sstevel@tonic-gate 2885*0Sstevel@tonic-gate case DDI_CTLOPS_BTOPR: 2886*0Sstevel@tonic-gate /* 2887*0Sstevel@tonic-gate * Convert byte count input to physical page units 2888*0Sstevel@tonic-gate * (byte counts that are not a page-size multiple 2889*0Sstevel@tonic-gate * are rounded up) 2890*0Sstevel@tonic-gate */ 2891*0Sstevel@tonic-gate *(ulong_t *)result = btopr(*(ulong_t *)arg); 2892*0Sstevel@tonic-gate return (DDI_SUCCESS); 2893*0Sstevel@tonic-gate 2894*0Sstevel@tonic-gate case DDI_CTLOPS_POKE: 2895*0Sstevel@tonic-gate return (rootnex_ctlops_poke((peekpoke_ctlops_t *)arg)); 2896*0Sstevel@tonic-gate 2897*0Sstevel@tonic-gate case DDI_CTLOPS_PEEK: 2898*0Sstevel@tonic-gate return (rootnex_ctlops_peek((peekpoke_ctlops_t *)arg, result)); 2899*0Sstevel@tonic-gate 2900*0Sstevel@tonic-gate case DDI_CTLOPS_INITCHILD: 2901*0Sstevel@tonic-gate case DDI_CTLOPS_UNINITCHILD: 2902*0Sstevel@tonic-gate return (rootnex_ctl_children(dip, rdip, ctlop, arg)); 2903*0Sstevel@tonic-gate 2904*0Sstevel@tonic-gate case DDI_CTLOPS_REPORTDEV: 2905*0Sstevel@tonic-gate return (rootnex_ctl_reportdev(rdip)); 2906*0Sstevel@tonic-gate 2907*0Sstevel@tonic-gate case DDI_CTLOPS_IOMIN: 2908*0Sstevel@tonic-gate /* 2909*0Sstevel@tonic-gate * Nothing to do here but reflect back.. 2910*0Sstevel@tonic-gate */ 2911*0Sstevel@tonic-gate return (DDI_SUCCESS); 2912*0Sstevel@tonic-gate 2913*0Sstevel@tonic-gate case DDI_CTLOPS_REGSIZE: 2914*0Sstevel@tonic-gate case DDI_CTLOPS_NREGS: 2915*0Sstevel@tonic-gate case DDI_CTLOPS_NINTRS: 2916*0Sstevel@tonic-gate break; 2917*0Sstevel@tonic-gate 2918*0Sstevel@tonic-gate case DDI_CTLOPS_SIDDEV: 2919*0Sstevel@tonic-gate if (ndi_dev_is_prom_node(rdip)) 2920*0Sstevel@tonic-gate return (DDI_SUCCESS); 2921*0Sstevel@tonic-gate if (ndi_dev_is_persistent_node(rdip)) 2922*0Sstevel@tonic-gate return (DDI_SUCCESS); 2923*0Sstevel@tonic-gate return (DDI_FAILURE); 2924*0Sstevel@tonic-gate 2925*0Sstevel@tonic-gate case DDI_CTLOPS_INTR_HILEVEL: 2926*0Sstevel@tonic-gate /* 2927*0Sstevel@tonic-gate * Indicate whether the interrupt specified is to be handled 2928*0Sstevel@tonic-gate * above lock level. In other words, above the level that 2929*0Sstevel@tonic-gate * cv_signal and default type mutexes can be used. 2930*0Sstevel@tonic-gate */ 2931*0Sstevel@tonic-gate *(int *)result = 2932*0Sstevel@tonic-gate (INT_IPL(((struct intrspec *)arg)->intrspec_pri) 2933*0Sstevel@tonic-gate > LOCK_LEVEL); 2934*0Sstevel@tonic-gate return (DDI_SUCCESS); 2935*0Sstevel@tonic-gate 2936*0Sstevel@tonic-gate case DDI_CTLOPS_XLATE_INTRS: 2937*0Sstevel@tonic-gate return (rootnex_xlate_intrs(dip, rdip, arg, result)); 2938*0Sstevel@tonic-gate 2939*0Sstevel@tonic-gate case DDI_CTLOPS_POWER: 2940*0Sstevel@tonic-gate return ((*pm_platform_power)((power_req_t *)arg)); 2941*0Sstevel@tonic-gate 2942*0Sstevel@tonic-gate case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 2943*0Sstevel@tonic-gate case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 2944*0Sstevel@tonic-gate case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 2945*0Sstevel@tonic-gate if (!reserved_msg_printed) { 2946*0Sstevel@tonic-gate reserved_msg_printed = B_TRUE; 2947*0Sstevel@tonic-gate cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 2948*0Sstevel@tonic-gate "1 or more reserved/obsolete operations."); 2949*0Sstevel@tonic-gate } 2950*0Sstevel@tonic-gate return (DDI_FAILURE); 2951*0Sstevel@tonic-gate 2952*0Sstevel@tonic-gate default: 2953*0Sstevel@tonic-gate return (DDI_FAILURE); 2954*0Sstevel@tonic-gate } 2955*0Sstevel@tonic-gate /* 2956*0Sstevel@tonic-gate * The rest are for "hardware" properties 2957*0Sstevel@tonic-gate */ 2958*0Sstevel@tonic-gate if ((pdp = ddi_get_parent_data(rdip)) == NULL) 2959*0Sstevel@tonic-gate return (DDI_FAILURE); 2960*0Sstevel@tonic-gate 2961*0Sstevel@tonic-gate if (ctlop == DDI_CTLOPS_NREGS) { 2962*0Sstevel@tonic-gate ptr = (int *)result; 2963*0Sstevel@tonic-gate *ptr = pdp->par_nreg; 2964*0Sstevel@tonic-gate } else if (ctlop == DDI_CTLOPS_NINTRS) { 2965*0Sstevel@tonic-gate ptr = (int *)result; 2966*0Sstevel@tonic-gate *ptr = pdp->par_nintr; 2967*0Sstevel@tonic-gate } else { 2968*0Sstevel@tonic-gate off_t *size = (off_t *)result; 2969*0Sstevel@tonic-gate 2970*0Sstevel@tonic-gate ptr = (int *)arg; 2971*0Sstevel@tonic-gate n = *ptr; 2972*0Sstevel@tonic-gate if (n >= pdp->par_nreg) { 2973*0Sstevel@tonic-gate return (DDI_FAILURE); 2974*0Sstevel@tonic-gate } 2975*0Sstevel@tonic-gate *size = (off_t)pdp->par_reg[n].regspec_size; 2976*0Sstevel@tonic-gate } 2977*0Sstevel@tonic-gate return (DDI_SUCCESS); 2978*0Sstevel@tonic-gate } 2979*0Sstevel@tonic-gate 2980*0Sstevel@tonic-gate /* 2981*0Sstevel@tonic-gate * rootnex_get_ispec: 2982*0Sstevel@tonic-gate * convert an interrupt number to an interrupt specification. 2983*0Sstevel@tonic-gate * The interrupt number determines which interrupt spec will be 2984*0Sstevel@tonic-gate * returned if more than one exists. 2985*0Sstevel@tonic-gate * 2986*0Sstevel@tonic-gate * Look into the parent private data area of the 'rdip' to find out 2987*0Sstevel@tonic-gate * the interrupt specification. First check to make sure there is 2988*0Sstevel@tonic-gate * one that matchs "inumber" and then return a pointer to it. 2989*0Sstevel@tonic-gate * 2990*0Sstevel@tonic-gate * Return NULL if one could not be found. 2991*0Sstevel@tonic-gate * 2992*0Sstevel@tonic-gate * NOTE: This is needed for rootnex_intr_ops() 2993*0Sstevel@tonic-gate */ 2994*0Sstevel@tonic-gate static struct intrspec * 2995*0Sstevel@tonic-gate rootnex_get_ispec(dev_info_t *rdip, int inum) 2996*0Sstevel@tonic-gate { 2997*0Sstevel@tonic-gate struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 2998*0Sstevel@tonic-gate 2999*0Sstevel@tonic-gate /* 3000*0Sstevel@tonic-gate * Special case handling for drivers that provide their own 3001*0Sstevel@tonic-gate * intrspec structures instead of relying on the DDI framework. 3002*0Sstevel@tonic-gate * 3003*0Sstevel@tonic-gate * A broken hardware driver in ON could potentially provide its 3004*0Sstevel@tonic-gate * own intrspec structure, instead of relying on the hardware. 3005*0Sstevel@tonic-gate * If these drivers are children of 'rootnex' then we need to 3006*0Sstevel@tonic-gate * continue to provide backward compatibility to them here. 3007*0Sstevel@tonic-gate * 3008*0Sstevel@tonic-gate * Following check is a special case for 'pcic' driver which 3009*0Sstevel@tonic-gate * was found to have broken hardwre andby provides its own intrspec. 3010*0Sstevel@tonic-gate * 3011*0Sstevel@tonic-gate * Verbatim comments from this driver are shown here: 3012*0Sstevel@tonic-gate * "Don't use the ddi_add_intr since we don't have a 3013*0Sstevel@tonic-gate * default intrspec in all cases." 3014*0Sstevel@tonic-gate * 3015*0Sstevel@tonic-gate * Since an 'ispec' may not be always created for it, 3016*0Sstevel@tonic-gate * check for that and create one if so. 3017*0Sstevel@tonic-gate * 3018*0Sstevel@tonic-gate * NOTE: Currently 'pcic' is the only driver found to do this. 3019*0Sstevel@tonic-gate */ 3020*0Sstevel@tonic-gate if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 3021*0Sstevel@tonic-gate pdp->par_nintr = 1; 3022*0Sstevel@tonic-gate pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 3023*0Sstevel@tonic-gate pdp->par_nintr, KM_SLEEP); 3024*0Sstevel@tonic-gate } 3025*0Sstevel@tonic-gate 3026*0Sstevel@tonic-gate /* Validate the interrupt number */ 3027*0Sstevel@tonic-gate if (inum >= pdp->par_nintr) 3028*0Sstevel@tonic-gate return (NULL); 3029*0Sstevel@tonic-gate 3030*0Sstevel@tonic-gate /* Get the interrupt structure pointer and return that */ 3031*0Sstevel@tonic-gate return ((struct intrspec *)&pdp->par_intr[inum]); 3032*0Sstevel@tonic-gate } 3033*0Sstevel@tonic-gate 3034*0Sstevel@tonic-gate 3035*0Sstevel@tonic-gate /* 3036*0Sstevel@tonic-gate * rootnex_intr_ops: 3037*0Sstevel@tonic-gate * bus_intr_op() function for interrupt support 3038*0Sstevel@tonic-gate */ 3039*0Sstevel@tonic-gate /* ARGSUSED */ 3040*0Sstevel@tonic-gate static int 3041*0Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 3042*0Sstevel@tonic-gate ddi_intr_handle_impl_t *hdlp, void *result) 3043*0Sstevel@tonic-gate { 3044*0Sstevel@tonic-gate struct intrspec *ispec; 3045*0Sstevel@tonic-gate struct ddi_parent_private_data *pdp; 3046*0Sstevel@tonic-gate 3047*0Sstevel@tonic-gate DDI_INTR_NEXDBG((CE_CONT, 3048*0Sstevel@tonic-gate "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 3049*0Sstevel@tonic-gate (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 3050*0Sstevel@tonic-gate 3051*0Sstevel@tonic-gate /* Process the interrupt operation */ 3052*0Sstevel@tonic-gate switch (intr_op) { 3053*0Sstevel@tonic-gate case DDI_INTROP_GETCAP: 3054*0Sstevel@tonic-gate /* First check with pcplusmp */ 3055*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3056*0Sstevel@tonic-gate return (DDI_FAILURE); 3057*0Sstevel@tonic-gate 3058*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 3059*0Sstevel@tonic-gate *(int *)result = 0; 3060*0Sstevel@tonic-gate return (DDI_FAILURE); 3061*0Sstevel@tonic-gate } 3062*0Sstevel@tonic-gate break; 3063*0Sstevel@tonic-gate case DDI_INTROP_SETCAP: 3064*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3065*0Sstevel@tonic-gate return (DDI_FAILURE); 3066*0Sstevel@tonic-gate 3067*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 3068*0Sstevel@tonic-gate return (DDI_FAILURE); 3069*0Sstevel@tonic-gate break; 3070*0Sstevel@tonic-gate case DDI_INTROP_ALLOC: 3071*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3072*0Sstevel@tonic-gate return (DDI_FAILURE); 3073*0Sstevel@tonic-gate hdlp->ih_pri = ispec->intrspec_pri; 3074*0Sstevel@tonic-gate *(int *)result = hdlp->ih_scratch1; 3075*0Sstevel@tonic-gate break; 3076*0Sstevel@tonic-gate case DDI_INTROP_FREE: 3077*0Sstevel@tonic-gate pdp = ddi_get_parent_data(rdip); 3078*0Sstevel@tonic-gate /* 3079*0Sstevel@tonic-gate * Special case for 'pcic' driver' only. 3080*0Sstevel@tonic-gate * If an intrspec was created for it, clean it up here 3081*0Sstevel@tonic-gate * See detailed comments on this in the function 3082*0Sstevel@tonic-gate * rootnex_get_ispec(). 3083*0Sstevel@tonic-gate */ 3084*0Sstevel@tonic-gate if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 3085*0Sstevel@tonic-gate kmem_free(pdp->par_intr, sizeof (struct intrspec) * 3086*0Sstevel@tonic-gate pdp->par_nintr); 3087*0Sstevel@tonic-gate /* 3088*0Sstevel@tonic-gate * Set it to zero; so that 3089*0Sstevel@tonic-gate * DDI framework doesn't free it again 3090*0Sstevel@tonic-gate */ 3091*0Sstevel@tonic-gate pdp->par_intr = NULL; 3092*0Sstevel@tonic-gate pdp->par_nintr = 0; 3093*0Sstevel@tonic-gate } 3094*0Sstevel@tonic-gate break; 3095*0Sstevel@tonic-gate case DDI_INTROP_GETPRI: 3096*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3097*0Sstevel@tonic-gate return (DDI_FAILURE); 3098*0Sstevel@tonic-gate *(int *)result = ispec->intrspec_pri; 3099*0Sstevel@tonic-gate break; 3100*0Sstevel@tonic-gate case DDI_INTROP_SETPRI: 3101*0Sstevel@tonic-gate /* Validate the interrupt priority passed to us */ 3102*0Sstevel@tonic-gate if (*(int *)result > LOCK_LEVEL) 3103*0Sstevel@tonic-gate return (DDI_FAILURE); 3104*0Sstevel@tonic-gate 3105*0Sstevel@tonic-gate /* Ensure that PSM is all initialized and ispec is ok */ 3106*0Sstevel@tonic-gate if ((psm_intr_ops == NULL) || 3107*0Sstevel@tonic-gate ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 3108*0Sstevel@tonic-gate return (DDI_FAILURE); 3109*0Sstevel@tonic-gate 3110*0Sstevel@tonic-gate /* Change the priority */ 3111*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 3112*0Sstevel@tonic-gate PSM_FAILURE) 3113*0Sstevel@tonic-gate return (DDI_FAILURE); 3114*0Sstevel@tonic-gate 3115*0Sstevel@tonic-gate /* update the ispec with the new priority */ 3116*0Sstevel@tonic-gate ispec->intrspec_pri = *(int *)result; 3117*0Sstevel@tonic-gate break; 3118*0Sstevel@tonic-gate case DDI_INTROP_ADDISR: 3119*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3120*0Sstevel@tonic-gate return (DDI_FAILURE); 3121*0Sstevel@tonic-gate ispec->intrspec_func = hdlp->ih_cb_func; 3122*0Sstevel@tonic-gate break; 3123*0Sstevel@tonic-gate case DDI_INTROP_REMISR: 3124*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3125*0Sstevel@tonic-gate return (DDI_FAILURE); 3126*0Sstevel@tonic-gate ispec->intrspec_func = (uint_t (*)()) 0; 3127*0Sstevel@tonic-gate break; 3128*0Sstevel@tonic-gate case DDI_INTROP_ENABLE: 3129*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3130*0Sstevel@tonic-gate return (DDI_FAILURE); 3131*0Sstevel@tonic-gate 3132*0Sstevel@tonic-gate /* Call psmi to translate irq with the dip */ 3133*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3134*0Sstevel@tonic-gate return (DDI_FAILURE); 3135*0Sstevel@tonic-gate 3136*0Sstevel@tonic-gate hdlp->ih_private = (void *)ispec; 3137*0Sstevel@tonic-gate (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 3138*0Sstevel@tonic-gate (int *)&hdlp->ih_vector); 3139*0Sstevel@tonic-gate 3140*0Sstevel@tonic-gate /* Add the interrupt handler */ 3141*0Sstevel@tonic-gate if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 3142*0Sstevel@tonic-gate hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 3143*0Sstevel@tonic-gate hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rdip)) 3144*0Sstevel@tonic-gate return (DDI_FAILURE); 3145*0Sstevel@tonic-gate break; 3146*0Sstevel@tonic-gate case DDI_INTROP_DISABLE: 3147*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3148*0Sstevel@tonic-gate return (DDI_FAILURE); 3149*0Sstevel@tonic-gate 3150*0Sstevel@tonic-gate /* Call psm_ops() to translate irq with the dip */ 3151*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3152*0Sstevel@tonic-gate return (DDI_FAILURE); 3153*0Sstevel@tonic-gate 3154*0Sstevel@tonic-gate hdlp->ih_private = (void *)ispec; 3155*0Sstevel@tonic-gate (void) (*psm_intr_ops)(rdip, hdlp, 3156*0Sstevel@tonic-gate PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 3157*0Sstevel@tonic-gate 3158*0Sstevel@tonic-gate /* Remove the interrupt handler */ 3159*0Sstevel@tonic-gate rem_avintr((void *)hdlp, ispec->intrspec_pri, 3160*0Sstevel@tonic-gate hdlp->ih_cb_func, hdlp->ih_vector); 3161*0Sstevel@tonic-gate break; 3162*0Sstevel@tonic-gate case DDI_INTROP_SETMASK: 3163*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3164*0Sstevel@tonic-gate return (DDI_FAILURE); 3165*0Sstevel@tonic-gate 3166*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 3167*0Sstevel@tonic-gate return (DDI_FAILURE); 3168*0Sstevel@tonic-gate break; 3169*0Sstevel@tonic-gate case DDI_INTROP_CLRMASK: 3170*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3171*0Sstevel@tonic-gate return (DDI_FAILURE); 3172*0Sstevel@tonic-gate 3173*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 3174*0Sstevel@tonic-gate return (DDI_FAILURE); 3175*0Sstevel@tonic-gate break; 3176*0Sstevel@tonic-gate case DDI_INTROP_GETPENDING: 3177*0Sstevel@tonic-gate if (psm_intr_ops == NULL) 3178*0Sstevel@tonic-gate return (DDI_FAILURE); 3179*0Sstevel@tonic-gate 3180*0Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 3181*0Sstevel@tonic-gate result)) { 3182*0Sstevel@tonic-gate *(int *)result = 0; 3183*0Sstevel@tonic-gate return (DDI_FAILURE); 3184*0Sstevel@tonic-gate } 3185*0Sstevel@tonic-gate break; 3186*0Sstevel@tonic-gate case DDI_INTROP_NINTRS: 3187*0Sstevel@tonic-gate if ((pdp = ddi_get_parent_data(rdip)) == NULL) 3188*0Sstevel@tonic-gate return (DDI_FAILURE); 3189*0Sstevel@tonic-gate *(int *)result = pdp->par_nintr; 3190*0Sstevel@tonic-gate if (pdp->par_nintr == 0) { 3191*0Sstevel@tonic-gate /* 3192*0Sstevel@tonic-gate * Special case for 'pcic' driver' only. This driver 3193*0Sstevel@tonic-gate * driver is a child of 'isa' and 'rootnex' drivers. 3194*0Sstevel@tonic-gate * 3195*0Sstevel@tonic-gate * See detailed comments on this in the function 3196*0Sstevel@tonic-gate * rootnex_get_ispec(). 3197*0Sstevel@tonic-gate * 3198*0Sstevel@tonic-gate * Children of 'pcic' send 'NINITR' request all the 3199*0Sstevel@tonic-gate * way to rootnex driver. But, the 'pdp->par_nintr' 3200*0Sstevel@tonic-gate * field may not initialized. So, we fake it here 3201*0Sstevel@tonic-gate * to return 1 (a la what PCMCIA nexus does). 3202*0Sstevel@tonic-gate */ 3203*0Sstevel@tonic-gate if (strcmp(ddi_get_name(rdip), "pcic") == 0) 3204*0Sstevel@tonic-gate *(int *)result = 1; 3205*0Sstevel@tonic-gate } 3206*0Sstevel@tonic-gate break; 3207*0Sstevel@tonic-gate case DDI_INTROP_SUPPORTED_TYPES: 3208*0Sstevel@tonic-gate *(int *)result = 0; 3209*0Sstevel@tonic-gate *(int *)result |= DDI_INTR_TYPE_FIXED; /* Always ... */ 3210*0Sstevel@tonic-gate break; 3211*0Sstevel@tonic-gate case DDI_INTROP_NAVAIL: 3212*0Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 3213*0Sstevel@tonic-gate return (DDI_FAILURE); 3214*0Sstevel@tonic-gate 3215*0Sstevel@tonic-gate if (psm_intr_ops == NULL) { 3216*0Sstevel@tonic-gate *(int *)result = 1; 3217*0Sstevel@tonic-gate break; 3218*0Sstevel@tonic-gate } 3219*0Sstevel@tonic-gate 3220*0Sstevel@tonic-gate /* Priority in the handle not initialized yet */ 3221*0Sstevel@tonic-gate hdlp->ih_pri = ispec->intrspec_pri; 3222*0Sstevel@tonic-gate (void) (*psm_intr_ops)(rdip, hdlp, 3223*0Sstevel@tonic-gate PSM_INTR_OP_NAVAIL_VECTORS, result); 3224*0Sstevel@tonic-gate break; 3225*0Sstevel@tonic-gate default: 3226*0Sstevel@tonic-gate return (DDI_FAILURE); 3227*0Sstevel@tonic-gate } 3228*0Sstevel@tonic-gate 3229*0Sstevel@tonic-gate return (DDI_SUCCESS); 3230*0Sstevel@tonic-gate } 3231*0Sstevel@tonic-gate 3232*0Sstevel@tonic-gate 3233*0Sstevel@tonic-gate /* 3234*0Sstevel@tonic-gate * Get the physical address of an object described by "dmareq". 3235*0Sstevel@tonic-gate * A "segsize" of zero is used to initialize the priv_handle *php. 3236*0Sstevel@tonic-gate * Subsequent calls with a non zero "segsize" would get the corresponding 3237*0Sstevel@tonic-gate * physical address of the dma object. 3238*0Sstevel@tonic-gate * The function returns a 64 bit physical address. 3239*0Sstevel@tonic-gate */ 3240*0Sstevel@tonic-gate uint64_t 3241*0Sstevel@tonic-gate rootnex_get_phyaddr(struct ddi_dma_req *dmareq, uint_t segsize, 3242*0Sstevel@tonic-gate struct priv_handle *php) 3243*0Sstevel@tonic-gate { 3244*0Sstevel@tonic-gate size_t offset; 3245*0Sstevel@tonic-gate page_t *pp, **pplist; 3246*0Sstevel@tonic-gate caddr_t vaddr, bvaddr; 3247*0Sstevel@tonic-gate struct as *asp; 3248*0Sstevel@tonic-gate int index; 3249*0Sstevel@tonic-gate uint64_t segmentpadr; 3250*0Sstevel@tonic-gate 3251*0Sstevel@tonic-gate switch (dmareq->dmar_object.dmao_type) { 3252*0Sstevel@tonic-gate case DMA_OTYP_PAGES: 3253*0Sstevel@tonic-gate if (segsize) { 3254*0Sstevel@tonic-gate pp = php->ph_u.pp; 3255*0Sstevel@tonic-gate vaddr = php->ph_vaddr; 3256*0Sstevel@tonic-gate offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 3257*0Sstevel@tonic-gate vaddr += segsize; 3258*0Sstevel@tonic-gate if ((offset += segsize) >= MMU_PAGESIZE) { 3259*0Sstevel@tonic-gate /* 3260*0Sstevel@tonic-gate * crossed page boundary, get to the next page. 3261*0Sstevel@tonic-gate */ 3262*0Sstevel@tonic-gate offset &= MMU_PAGEOFFSET; 3263*0Sstevel@tonic-gate pp = pp->p_next; 3264*0Sstevel@tonic-gate } 3265*0Sstevel@tonic-gate } else { 3266*0Sstevel@tonic-gate /* 3267*0Sstevel@tonic-gate * Initialize the priv_handle structure. 3268*0Sstevel@tonic-gate */ 3269*0Sstevel@tonic-gate pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 3270*0Sstevel@tonic-gate offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset; 3271*0Sstevel@tonic-gate vaddr = (caddr_t)offset; 3272*0Sstevel@tonic-gate php->ph_mapinfo = DMAMI_PAGES; 3273*0Sstevel@tonic-gate } 3274*0Sstevel@tonic-gate php->ph_u.pp = pp; 3275*0Sstevel@tonic-gate php->ph_vaddr = vaddr; 3276*0Sstevel@tonic-gate segmentpadr = (uint64_t)offset + ptob64(page_pptonum(pp)); 3277*0Sstevel@tonic-gate break; 3278*0Sstevel@tonic-gate case DMA_OTYP_VADDR: 3279*0Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 3280*0Sstevel@tonic-gate if (segsize) { 3281*0Sstevel@tonic-gate asp = php->ph_u.asp; 3282*0Sstevel@tonic-gate vaddr = php->ph_vaddr; 3283*0Sstevel@tonic-gate vaddr += segsize; 3284*0Sstevel@tonic-gate } else { 3285*0Sstevel@tonic-gate /* 3286*0Sstevel@tonic-gate * Initialize the priv_handle structure. 3287*0Sstevel@tonic-gate */ 3288*0Sstevel@tonic-gate vaddr = dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 3289*0Sstevel@tonic-gate asp = dmareq->dmar_object.dmao_obj.virt_obj.v_as; 3290*0Sstevel@tonic-gate if (asp == NULL) { 3291*0Sstevel@tonic-gate php->ph_mapinfo = DMAMI_KVADR; 3292*0Sstevel@tonic-gate asp = &kas; 3293*0Sstevel@tonic-gate } else { 3294*0Sstevel@tonic-gate php->ph_mapinfo = DMAMI_UVADR; 3295*0Sstevel@tonic-gate } 3296*0Sstevel@tonic-gate php->ph_u.asp = asp; 3297*0Sstevel@tonic-gate } 3298*0Sstevel@tonic-gate pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv; 3299*0Sstevel@tonic-gate offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 3300*0Sstevel@tonic-gate if (pplist == NULL) { 3301*0Sstevel@tonic-gate segmentpadr = (uint64_t)offset + 3302*0Sstevel@tonic-gate ptob64(hat_getpfnum(asp->a_hat, vaddr)); 3303*0Sstevel@tonic-gate } else { 3304*0Sstevel@tonic-gate bvaddr = dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 3305*0Sstevel@tonic-gate index = btop(((ulong_t)bvaddr & MMU_PAGEOFFSET) + 3306*0Sstevel@tonic-gate vaddr - bvaddr); 3307*0Sstevel@tonic-gate segmentpadr = (uint64_t)offset + 3308*0Sstevel@tonic-gate ptob64(page_pptonum(pplist[index])); 3309*0Sstevel@tonic-gate } 3310*0Sstevel@tonic-gate php->ph_vaddr = vaddr; 3311*0Sstevel@tonic-gate break; 3312*0Sstevel@tonic-gate default: 3313*0Sstevel@tonic-gate panic("rootnex_get_phyaddr"); 3314*0Sstevel@tonic-gate /*NOTREACHED*/ 3315*0Sstevel@tonic-gate } 3316*0Sstevel@tonic-gate return (segmentpadr); 3317*0Sstevel@tonic-gate } 3318