xref: /onnv-gate/usr/src/uts/i86pc/io/rootnex.c (revision 7617:1e2a36b96537)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51865Sdilpreet  * Common Development and Distribution License (the "License").
61865Sdilpreet  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
227173Smrj  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
27509Smrj  * x86 root nexus driver
280Sstevel@tonic-gate  */
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/conf.h>
320Sstevel@tonic-gate #include <sys/autoconf.h>
330Sstevel@tonic-gate #include <sys/sysmacros.h>
340Sstevel@tonic-gate #include <sys/debug.h>
350Sstevel@tonic-gate #include <sys/psw.h>
360Sstevel@tonic-gate #include <sys/ddidmareq.h>
370Sstevel@tonic-gate #include <sys/promif.h>
380Sstevel@tonic-gate #include <sys/devops.h>
390Sstevel@tonic-gate #include <sys/kmem.h>
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate #include <vm/seg.h>
420Sstevel@tonic-gate #include <vm/seg_kmem.h>
430Sstevel@tonic-gate #include <vm/seg_dev.h>
440Sstevel@tonic-gate #include <sys/vmem.h>
450Sstevel@tonic-gate #include <sys/mman.h>
460Sstevel@tonic-gate #include <vm/hat.h>
470Sstevel@tonic-gate #include <vm/as.h>
480Sstevel@tonic-gate #include <vm/page.h>
490Sstevel@tonic-gate #include <sys/avintr.h>
500Sstevel@tonic-gate #include <sys/errno.h>
510Sstevel@tonic-gate #include <sys/modctl.h>
520Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
530Sstevel@tonic-gate #include <sys/sunddi.h>
540Sstevel@tonic-gate #include <sys/sunndi.h>
55916Sschwartz #include <sys/mach_intr.h>
560Sstevel@tonic-gate #include <sys/psm.h>
570Sstevel@tonic-gate #include <sys/ontrap.h>
58509Smrj #include <sys/atomic.h>
59509Smrj #include <sys/sdt.h>
60509Smrj #include <sys/rootnex.h>
61509Smrj #include <vm/hat_i86.h>
621865Sdilpreet #include <sys/ddifm.h>
635251Smrj #include <sys/ddi_isa.h>
64509Smrj 
655084Sjohnlev #ifdef __xpv
665084Sjohnlev #include <sys/bootinfo.h>
675084Sjohnlev #include <sys/hypervisor.h>
685084Sjohnlev #include <sys/bootconf.h>
695084Sjohnlev #include <vm/kboot_mmu.h>
707613SVikram.Hegde@Sun.COM #else
717589SVikram.Hegde@Sun.COM #include <sys/intel_iommu.h>
727613SVikram.Hegde@Sun.COM #endif
737613SVikram.Hegde@Sun.COM 
747589SVikram.Hegde@Sun.COM 
75509Smrj /*
76509Smrj  * enable/disable extra checking of function parameters. Useful for debugging
77509Smrj  * drivers.
78509Smrj  */
79509Smrj #ifdef	DEBUG
80509Smrj int rootnex_alloc_check_parms = 1;
81509Smrj int rootnex_bind_check_parms = 1;
82509Smrj int rootnex_bind_check_inuse = 1;
83509Smrj int rootnex_unbind_verify_buffer = 0;
84509Smrj int rootnex_sync_check_parms = 1;
85509Smrj #else
86509Smrj int rootnex_alloc_check_parms = 0;
87509Smrj int rootnex_bind_check_parms = 0;
88509Smrj int rootnex_bind_check_inuse = 0;
89509Smrj int rootnex_unbind_verify_buffer = 0;
90509Smrj int rootnex_sync_check_parms = 0;
91509Smrj #endif
92509Smrj 
931414Scindi /* Master Abort and Target Abort panic flag */
941414Scindi int rootnex_fm_ma_ta_panic_flag = 0;
951414Scindi 
96509Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
970Sstevel@tonic-gate int rootnex_bind_fail = 1;
980Sstevel@tonic-gate int rootnex_bind_warn = 1;
990Sstevel@tonic-gate uint8_t *rootnex_warn_list;
1000Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1010Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
104509Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
105509Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
106509Smrj  * the offset and size into ddi_dma_sync().
1070Sstevel@tonic-gate  */
108509Smrj int rootnex_sync_ignore_params = 0;
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate /*
111509Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
112509Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
113509Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
114509Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
115509Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
116509Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
117509Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
118509Smrj  * attach.
1190Sstevel@tonic-gate  */
120509Smrj #if defined(__amd64)
121509Smrj int rootnex_prealloc_cookies = 65;
122509Smrj int rootnex_prealloc_windows = 4;
123509Smrj int rootnex_prealloc_copybuf = 2;
124509Smrj #else
125509Smrj int rootnex_prealloc_cookies = 33;
126509Smrj int rootnex_prealloc_windows = 4;
127509Smrj int rootnex_prealloc_copybuf = 2;
128509Smrj #endif
129509Smrj 
130509Smrj /* driver global state */
131509Smrj static rootnex_state_t *rootnex_state;
132509Smrj 
133509Smrj /* shortcut to rootnex counters */
134509Smrj static uint64_t *rootnex_cnt;
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate /*
137509Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1380Sstevel@tonic-gate  */
139509Smrj /* statically defined integer/boolean properties for the root node */
140509Smrj static rootnex_intprop_t rootnex_intprp[] = {
141509Smrj 	{ "PAGESIZE",			PAGESIZE },
142509Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
143509Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
144509Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
145509Smrj };
146509Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
147509Smrj 
1485084Sjohnlev #ifdef __xpv
1495084Sjohnlev typedef maddr_t rootnex_addr_t;
1505084Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
1515084Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
1525084Sjohnlev #else
1535084Sjohnlev typedef paddr_t rootnex_addr_t;
1545084Sjohnlev #endif
1555084Sjohnlev 
1567613SVikram.Hegde@Sun.COM #if !defined(__xpv)
1577613SVikram.Hegde@Sun.COM char _depends_on[] = "mach/pcplusmp misc/iommulib";
1587613SVikram.Hegde@Sun.COM #endif
159509Smrj 
160509Smrj static struct cb_ops rootnex_cb_ops = {
161509Smrj 	nodev,		/* open */
162509Smrj 	nodev,		/* close */
163509Smrj 	nodev,		/* strategy */
164509Smrj 	nodev,		/* print */
165509Smrj 	nodev,		/* dump */
166509Smrj 	nodev,		/* read */
167509Smrj 	nodev,		/* write */
168509Smrj 	nodev,		/* ioctl */
169509Smrj 	nodev,		/* devmap */
170509Smrj 	nodev,		/* mmap */
171509Smrj 	nodev,		/* segmap */
172509Smrj 	nochpoll,	/* chpoll */
173509Smrj 	ddi_prop_op,	/* cb_prop_op */
174509Smrj 	NULL,		/* struct streamtab */
175509Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
176509Smrj 	CB_REV,		/* Rev */
177509Smrj 	nodev,		/* cb_aread */
178509Smrj 	nodev		/* cb_awrite */
179509Smrj };
180509Smrj 
181509Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1820Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
183509Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1840Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1850Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
186509Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1870Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
188509Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
189509Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
190509Smrj     ddi_dma_handle_t *handlep);
191509Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
192509Smrj     ddi_dma_handle_t handle);
193509Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
194509Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
195509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
196509Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
197509Smrj     ddi_dma_handle_t handle);
198509Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
199509Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
200509Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
201509Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
202509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
203509Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2040Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2050Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
206509Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
207509Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
2081865Sdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
2091865Sdilpreet     ddi_iblock_cookie_t *ibc);
210509Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
211509Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
212509Smrj 
2137613SVikram.Hegde@Sun.COM static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
2147613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
2157613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep);
2167613SVikram.Hegde@Sun.COM static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
2177613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2187613SVikram.Hegde@Sun.COM static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2197613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2207613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2217613SVikram.Hegde@Sun.COM static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2227613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
223*7617SVikram.Hegde@Sun.COM #if !defined(__xpv)
2247613SVikram.Hegde@Sun.COM static void rootnex_coredma_reset_cookies(dev_info_t *dip,
2257613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2267613SVikram.Hegde@Sun.COM static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2277613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
228*7617SVikram.Hegde@Sun.COM #endif
2297613SVikram.Hegde@Sun.COM static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
2307613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
2317613SVikram.Hegde@Sun.COM static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
2327613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
2337613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2347613SVikram.Hegde@Sun.COM static int rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
2357613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
2367613SVikram.Hegde@Sun.COM static int rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip,
2377613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
2387613SVikram.Hegde@Sun.COM     size_t *lenp, caddr_t *objpp, uint_t cache_flags);
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2410Sstevel@tonic-gate 	BUSO_REV,
2420Sstevel@tonic-gate 	rootnex_map,
2430Sstevel@tonic-gate 	NULL,
2440Sstevel@tonic-gate 	NULL,
2450Sstevel@tonic-gate 	NULL,
2460Sstevel@tonic-gate 	rootnex_map_fault,
2470Sstevel@tonic-gate 	rootnex_dma_map,
2480Sstevel@tonic-gate 	rootnex_dma_allochdl,
2490Sstevel@tonic-gate 	rootnex_dma_freehdl,
2500Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2510Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
252509Smrj 	rootnex_dma_sync,
2530Sstevel@tonic-gate 	rootnex_dma_win,
2540Sstevel@tonic-gate 	rootnex_dma_mctl,
2550Sstevel@tonic-gate 	rootnex_ctlops,
2560Sstevel@tonic-gate 	ddi_bus_prop_op,
2570Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2580Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2590Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2600Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2610Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2620Sstevel@tonic-gate 	0,			/* bus_config */
2630Sstevel@tonic-gate 	0,			/* bus_unconfig */
2641865Sdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2650Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2660Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2670Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2680Sstevel@tonic-gate 	NULL,			/* bus_powr */
2690Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2700Sstevel@tonic-gate };
2710Sstevel@tonic-gate 
272509Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
273509Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2760Sstevel@tonic-gate 	DEVO_REV,
277509Smrj 	0,
278509Smrj 	ddi_no_info,
279509Smrj 	nulldev,
2800Sstevel@tonic-gate 	nulldev,
2810Sstevel@tonic-gate 	rootnex_attach,
282509Smrj 	rootnex_detach,
283509Smrj 	nulldev,
284509Smrj 	&rootnex_cb_ops,
2850Sstevel@tonic-gate 	&rootnex_bus_ops
2860Sstevel@tonic-gate };
2870Sstevel@tonic-gate 
288509Smrj static struct modldrv rootnex_modldrv = {
289509Smrj 	&mod_driverops,
2907542SRichard.Bean@Sun.COM 	"i86pc root nexus",
291509Smrj 	&rootnex_ops
292509Smrj };
293509Smrj 
294509Smrj static struct modlinkage rootnex_modlinkage = {
295509Smrj 	MODREV_1,
296509Smrj 	(void *)&rootnex_modldrv,
297509Smrj 	NULL
298509Smrj };
299509Smrj 
300*7617SVikram.Hegde@Sun.COM #if !defined(__xpv)
3017613SVikram.Hegde@Sun.COM static iommulib_nexops_t iommulib_nexops = {
3027613SVikram.Hegde@Sun.COM 	IOMMU_NEXOPS_VERSION,
3037613SVikram.Hegde@Sun.COM 	"Rootnex IOMMU ops Vers 1.1",
3047613SVikram.Hegde@Sun.COM 	NULL,
3057613SVikram.Hegde@Sun.COM 	rootnex_coredma_allochdl,
3067613SVikram.Hegde@Sun.COM 	rootnex_coredma_freehdl,
3077613SVikram.Hegde@Sun.COM 	rootnex_coredma_bindhdl,
3087613SVikram.Hegde@Sun.COM 	rootnex_coredma_unbindhdl,
3097613SVikram.Hegde@Sun.COM 	rootnex_coredma_reset_cookies,
3107613SVikram.Hegde@Sun.COM 	rootnex_coredma_get_cookies,
3117613SVikram.Hegde@Sun.COM 	rootnex_coredma_sync,
3127613SVikram.Hegde@Sun.COM 	rootnex_coredma_win,
3137613SVikram.Hegde@Sun.COM 	rootnex_coredma_map,
3147613SVikram.Hegde@Sun.COM 	rootnex_coredma_mctl
3157613SVikram.Hegde@Sun.COM };
316*7617SVikram.Hegde@Sun.COM #endif
317509Smrj 
318509Smrj /*
319509Smrj  *  extern hacks
320509Smrj  */
321509Smrj extern struct seg_ops segdev_ops;
322509Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
323509Smrj #ifdef	DDI_MAP_DEBUG
324509Smrj extern int ddi_map_debug_flag;
325509Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
326509Smrj #endif
327509Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
328509Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
329509Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
330509Smrj     psm_intr_op_t, int *);
331509Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
332509Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
3335251Smrj 
334509Smrj /*
335509Smrj  * Use device arena to use for device control register mappings.
336509Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
337509Smrj  * to avoid this address range to prevent undesired device activity.
338509Smrj  */
339509Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
340509Smrj extern void device_arena_free(void * vaddr, size_t size);
341509Smrj 
342509Smrj 
3430Sstevel@tonic-gate /*
344509Smrj  *  Internal functions
3450Sstevel@tonic-gate  */
346509Smrj static int rootnex_dma_init();
347509Smrj static void rootnex_add_props(dev_info_t *);
348509Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
349509Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
350509Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
351509Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
352509Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
353509Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
354509Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
355509Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
356509Smrj     ddi_dma_attr_t *attr);
357509Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
358509Smrj     rootnex_sglinfo_t *sglinfo);
359509Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
360509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
361509Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
362509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
363509Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
364509Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
365509Smrj     ddi_dma_attr_t *attr, int kmflag);
366509Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
367509Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
368509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
369509Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
370509Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
371509Smrj     size_t *copybuf_used, page_t **cur_pp);
372509Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
373509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
374509Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
375509Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
376509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
377509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
378509Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
379509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
380509Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
381509Smrj     off_t offset, size_t size, uint_t cache_flags);
382509Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
3831865Sdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
3841865Sdilpreet     const void *comp_addr, const void *not_used);
385509Smrj 
386509Smrj /*
387509Smrj  * _init()
388509Smrj  *
389509Smrj  */
3900Sstevel@tonic-gate int
3910Sstevel@tonic-gate _init(void)
3920Sstevel@tonic-gate {
393509Smrj 
394509Smrj 	rootnex_state = NULL;
395509Smrj 	return (mod_install(&rootnex_modlinkage));
3960Sstevel@tonic-gate }
3970Sstevel@tonic-gate 
398509Smrj 
399509Smrj /*
400509Smrj  * _info()
401509Smrj  *
402509Smrj  */
403509Smrj int
404509Smrj _info(struct modinfo *modinfop)
405509Smrj {
406509Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
407509Smrj }
408509Smrj 
409509Smrj 
410509Smrj /*
411509Smrj  * _fini()
412509Smrj  *
413509Smrj  */
4140Sstevel@tonic-gate int
4150Sstevel@tonic-gate _fini(void)
4160Sstevel@tonic-gate {
4170Sstevel@tonic-gate 	return (EBUSY);
4180Sstevel@tonic-gate }
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate /*
422509Smrj  * rootnex_attach()
4230Sstevel@tonic-gate  *
4240Sstevel@tonic-gate  */
425509Smrj static int
426509Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
427509Smrj {
4281414Scindi 	int fmcap;
429509Smrj 	int e;
430509Smrj 
431509Smrj 	switch (cmd) {
432509Smrj 	case DDI_ATTACH:
433509Smrj 		break;
434509Smrj 	case DDI_RESUME:
435509Smrj 		return (DDI_SUCCESS);
436509Smrj 	default:
437509Smrj 		return (DDI_FAILURE);
438509Smrj 	}
439509Smrj 
440509Smrj 	/*
441509Smrj 	 * We should only have one instance of rootnex. Save it away since we
442509Smrj 	 * don't have an easy way to get it back later.
443509Smrj 	 */
444509Smrj 	ASSERT(rootnex_state == NULL);
445509Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
446509Smrj 
447509Smrj 	rootnex_state->r_dip = dip;
4481414Scindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
449509Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
450509Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
4517589SVikram.Hegde@Sun.COM 	rootnex_state->r_intel_iommu_enabled = B_FALSE;
452509Smrj 
4531414Scindi 	/*
4541414Scindi 	 * Set minimum fm capability level for i86pc platforms and then
4551414Scindi 	 * initialize error handling. Since we're the rootnex, we don't
4561414Scindi 	 * care what's returned in the fmcap field.
4571414Scindi 	 */
4581865Sdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
4591865Sdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4601414Scindi 	fmcap = ddi_system_fmcap;
4611414Scindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4621414Scindi 
463509Smrj 	/* initialize DMA related state */
464509Smrj 	e = rootnex_dma_init();
465509Smrj 	if (e != DDI_SUCCESS) {
466509Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
467509Smrj 		return (DDI_FAILURE);
468509Smrj 	}
469509Smrj 
470509Smrj 	/* Add static root node properties */
471509Smrj 	rootnex_add_props(dip);
472509Smrj 
473509Smrj 	/* since we can't call ddi_report_dev() */
474509Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
475509Smrj 
476509Smrj 	/* Initialize rootnex event handle */
477509Smrj 	i_ddi_rootnex_init_events(dip);
478509Smrj 
4797613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4807589SVikram.Hegde@Sun.COM #if defined(__amd64)
4817589SVikram.Hegde@Sun.COM 	/* probe intel iommu */
4827589SVikram.Hegde@Sun.COM 	intel_iommu_probe_and_parse();
4837589SVikram.Hegde@Sun.COM 
4847589SVikram.Hegde@Sun.COM 	/* attach the iommu nodes */
4857589SVikram.Hegde@Sun.COM 	if (intel_iommu_support) {
4867589SVikram.Hegde@Sun.COM 		if (intel_iommu_attach_dmar_nodes() == DDI_SUCCESS) {
4877589SVikram.Hegde@Sun.COM 			rootnex_state->r_intel_iommu_enabled = B_TRUE;
4887589SVikram.Hegde@Sun.COM 		} else {
4897589SVikram.Hegde@Sun.COM 			intel_iommu_release_dmar_info();
4907589SVikram.Hegde@Sun.COM 		}
4917589SVikram.Hegde@Sun.COM 	}
4927589SVikram.Hegde@Sun.COM #endif
4937589SVikram.Hegde@Sun.COM 
4947613SVikram.Hegde@Sun.COM 	e = iommulib_nexus_register(dip, &iommulib_nexops,
4957613SVikram.Hegde@Sun.COM 	    &rootnex_state->r_iommulib_handle);
4967613SVikram.Hegde@Sun.COM 
4977613SVikram.Hegde@Sun.COM 	ASSERT(e == DDI_SUCCESS);
4987613SVikram.Hegde@Sun.COM #endif
4997613SVikram.Hegde@Sun.COM 
500509Smrj 	return (DDI_SUCCESS);
501509Smrj }
502509Smrj 
503509Smrj 
504509Smrj /*
505509Smrj  * rootnex_detach()
506509Smrj  *
507509Smrj  */
5080Sstevel@tonic-gate /*ARGSUSED*/
5090Sstevel@tonic-gate static int
510509Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
511509Smrj {
512509Smrj 	switch (cmd) {
513509Smrj 	case DDI_SUSPEND:
514509Smrj 		break;
515509Smrj 	default:
516509Smrj 		return (DDI_FAILURE);
517509Smrj 	}
518509Smrj 
519509Smrj 	return (DDI_SUCCESS);
520509Smrj }
521509Smrj 
522509Smrj 
523509Smrj /*
524509Smrj  * rootnex_dma_init()
525509Smrj  *
526509Smrj  */
527509Smrj /*ARGSUSED*/
528509Smrj static int
529509Smrj rootnex_dma_init()
5300Sstevel@tonic-gate {
531509Smrj 	size_t bufsize;
532509Smrj 
533509Smrj 
534509Smrj 	/*
535509Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
536509Smrj 	 * pre-alloc in dma_alloc_handle
537509Smrj 	 */
538509Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
539509Smrj 	rootnex_state->r_prealloc_size =
540509Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
541509Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
542509Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
543509Smrj 
544509Smrj 	/*
545509Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
546509Smrj 	 * allocate 16 extra bytes for struct pointer alignment
547509Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
548509Smrj 	 */
549509Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
550509Smrj 	    rootnex_state->r_prealloc_size + 0x10;
551509Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
552509Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
553509Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
554509Smrj 		return (DDI_FAILURE);
555509Smrj 	}
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	/*
5580Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
5590Sstevel@tonic-gate 	 * for.
5600Sstevel@tonic-gate 	 */
5610Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
5620Sstevel@tonic-gate 	    KM_SLEEP);
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 	return (DDI_SUCCESS);
5650Sstevel@tonic-gate }
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate /*
569509Smrj  * rootnex_add_props()
570509Smrj  *
5710Sstevel@tonic-gate  */
5720Sstevel@tonic-gate static void
573509Smrj rootnex_add_props(dev_info_t *dip)
5740Sstevel@tonic-gate {
575509Smrj 	rootnex_intprop_t *rpp;
5760Sstevel@tonic-gate 	int i;
577509Smrj 
578509Smrj 	/* Add static integer/boolean properties to the root node */
579509Smrj 	rpp = rootnex_intprp;
580509Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
581509Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
582509Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
5830Sstevel@tonic-gate 	}
5840Sstevel@tonic-gate }
5850Sstevel@tonic-gate 
586509Smrj 
587509Smrj 
588509Smrj /*
589509Smrj  * *************************
590509Smrj  *  ctlops related routines
591509Smrj  * *************************
592509Smrj  */
593509Smrj 
5940Sstevel@tonic-gate /*
595509Smrj  * rootnex_ctlops()
596509Smrj  *
5970Sstevel@tonic-gate  */
598693Sgovinda /*ARGSUSED*/
599509Smrj static int
600509Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
601509Smrj     void *arg, void *result)
602509Smrj {
603509Smrj 	int n, *ptr;
604509Smrj 	struct ddi_parent_private_data *pdp;
605509Smrj 
606509Smrj 	switch (ctlop) {
607509Smrj 	case DDI_CTLOPS_DMAPMAPC:
608509Smrj 		/*
609509Smrj 		 * Return 'partial' to indicate that dma mapping
610509Smrj 		 * has to be done in the main MMU.
611509Smrj 		 */
612509Smrj 		return (DDI_DMA_PARTIAL);
613509Smrj 
614509Smrj 	case DDI_CTLOPS_BTOP:
615509Smrj 		/*
616509Smrj 		 * Convert byte count input to physical page units.
617509Smrj 		 * (byte counts that are not a page-size multiple
618509Smrj 		 * are rounded down)
619509Smrj 		 */
620509Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
621509Smrj 		return (DDI_SUCCESS);
622509Smrj 
623509Smrj 	case DDI_CTLOPS_PTOB:
624509Smrj 		/*
625509Smrj 		 * Convert size in physical pages to bytes
626509Smrj 		 */
627509Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
628509Smrj 		return (DDI_SUCCESS);
629509Smrj 
630509Smrj 	case DDI_CTLOPS_BTOPR:
631509Smrj 		/*
632509Smrj 		 * Convert byte count input to physical page units
633509Smrj 		 * (byte counts that are not a page-size multiple
634509Smrj 		 * are rounded up)
635509Smrj 		 */
636509Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
637509Smrj 		return (DDI_SUCCESS);
638509Smrj 
639509Smrj 	case DDI_CTLOPS_INITCHILD:
640509Smrj 		return (impl_ddi_sunbus_initchild(arg));
641509Smrj 
642509Smrj 	case DDI_CTLOPS_UNINITCHILD:
643509Smrj 		impl_ddi_sunbus_removechild(arg);
644509Smrj 		return (DDI_SUCCESS);
645509Smrj 
646509Smrj 	case DDI_CTLOPS_REPORTDEV:
647509Smrj 		return (rootnex_ctl_reportdev(rdip));
648509Smrj 
649509Smrj 	case DDI_CTLOPS_IOMIN:
650509Smrj 		/*
651509Smrj 		 * Nothing to do here but reflect back..
652509Smrj 		 */
653509Smrj 		return (DDI_SUCCESS);
654509Smrj 
655509Smrj 	case DDI_CTLOPS_REGSIZE:
656509Smrj 	case DDI_CTLOPS_NREGS:
657509Smrj 		break;
658509Smrj 
659509Smrj 	case DDI_CTLOPS_SIDDEV:
660509Smrj 		if (ndi_dev_is_prom_node(rdip))
661509Smrj 			return (DDI_SUCCESS);
662509Smrj 		if (ndi_dev_is_persistent_node(rdip))
663509Smrj 			return (DDI_SUCCESS);
664509Smrj 		return (DDI_FAILURE);
665509Smrj 
666509Smrj 	case DDI_CTLOPS_POWER:
667509Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
668509Smrj 
669693Sgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
670509Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
671509Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
672509Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
673693Sgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
674693Sgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
675509Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
676509Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
677509Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
678509Smrj 			    "1 or more reserved/obsolete operations.");
679509Smrj 		}
680509Smrj 		return (DDI_FAILURE);
681509Smrj 
682509Smrj 	default:
683509Smrj 		return (DDI_FAILURE);
684509Smrj 	}
685509Smrj 	/*
686509Smrj 	 * The rest are for "hardware" properties
687509Smrj 	 */
688509Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
689509Smrj 		return (DDI_FAILURE);
690509Smrj 
691509Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
692509Smrj 		ptr = (int *)result;
693509Smrj 		*ptr = pdp->par_nreg;
694509Smrj 	} else {
695509Smrj 		off_t *size = (off_t *)result;
696509Smrj 
697509Smrj 		ptr = (int *)arg;
698509Smrj 		n = *ptr;
699509Smrj 		if (n >= pdp->par_nreg) {
700509Smrj 			return (DDI_FAILURE);
701509Smrj 		}
702509Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
703509Smrj 	}
704509Smrj 	return (DDI_SUCCESS);
705509Smrj }
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 
7080Sstevel@tonic-gate /*
709509Smrj  * rootnex_ctl_reportdev()
710509Smrj  *
7110Sstevel@tonic-gate  */
7120Sstevel@tonic-gate static int
713509Smrj rootnex_ctl_reportdev(dev_info_t *dev)
7140Sstevel@tonic-gate {
715509Smrj 	int i, n, len, f_len = 0;
716509Smrj 	char *buf;
717509Smrj 
718509Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
719509Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
720509Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
721509Smrj 	len = strlen(buf);
722509Smrj 
723509Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
724509Smrj 
725509Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
726509Smrj 
727509Smrj 		if (i == 0)
728509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
729509Smrj 			    ": ");
730509Smrj 		else
731509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
732509Smrj 			    " and ");
733509Smrj 		len = strlen(buf);
734509Smrj 
735509Smrj 		switch (rp->regspec_bustype) {
736509Smrj 
737509Smrj 		case BTEISA:
738509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
739509Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
7400Sstevel@tonic-gate 			break;
741509Smrj 
742509Smrj 		case BTISA:
743509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
744509Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
7450Sstevel@tonic-gate 			break;
746509Smrj 
747509Smrj 		default:
748509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
749509Smrj 			    "space %x offset %x",
750509Smrj 			    rp->regspec_bustype, rp->regspec_addr);
7510Sstevel@tonic-gate 			break;
7520Sstevel@tonic-gate 		}
753509Smrj 		len = strlen(buf);
7540Sstevel@tonic-gate 	}
755509Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
756509Smrj 		int pri;
757509Smrj 
758509Smrj 		if (i != 0) {
759509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
760509Smrj 			    ",");
761509Smrj 			len = strlen(buf);
762509Smrj 		}
763509Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
764509Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
765509Smrj 		    " sparc ipl %d", pri);
766509Smrj 		len = strlen(buf);
7670Sstevel@tonic-gate 	}
768509Smrj #ifdef DEBUG
769509Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
770509Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
771509Smrj 		    "printed length 1024, real length %d", f_len);
772509Smrj 	}
773509Smrj #endif /* DEBUG */
774509Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
775509Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
7760Sstevel@tonic-gate 	return (DDI_SUCCESS);
7770Sstevel@tonic-gate }
7780Sstevel@tonic-gate 
779509Smrj 
780509Smrj /*
781509Smrj  * ******************
782509Smrj  *  map related code
783509Smrj  * ******************
784509Smrj  */
785509Smrj 
786509Smrj /*
787509Smrj  * rootnex_map()
788509Smrj  *
789509Smrj  */
7900Sstevel@tonic-gate static int
791509Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
792509Smrj     off_t len, caddr_t *vaddrp)
7930Sstevel@tonic-gate {
7940Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
7950Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
7960Sstevel@tonic-gate 	int error;
7970Sstevel@tonic-gate 
7980Sstevel@tonic-gate 	mp = &mr;
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate 	switch (mp->map_op)  {
8010Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8020Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8030Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8040Sstevel@tonic-gate 		break;
8050Sstevel@tonic-gate 	default:
8060Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8070Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8080Sstevel@tonic-gate 		    mp->map_op);
8090Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8100Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8110Sstevel@tonic-gate 	}
8120Sstevel@tonic-gate 
8130Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8140Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8150Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8160Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8170Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8180Sstevel@tonic-gate 	}
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	/*
8210Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8220Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8230Sstevel@tonic-gate 	 */
8240Sstevel@tonic-gate 
8250Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8280Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8290Sstevel@tonic-gate 		static char *out_of_range =
8300Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8310Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8340Sstevel@tonic-gate 		if (rp == NULL)  {
8350Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8360Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8370Sstevel@tonic-gate 			    ddi_get_name(rdip));
8380Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8390Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8400Sstevel@tonic-gate 		}
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 		/*
8430Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8440Sstevel@tonic-gate 		 */
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8470Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8480Sstevel@tonic-gate 	}
8490Sstevel@tonic-gate 
8500Sstevel@tonic-gate 	/*
8510Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8520Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8530Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8540Sstevel@tonic-gate 	 */
8550Sstevel@tonic-gate 
8560Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8570Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8605084Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
8615084Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8625084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
8635084Sjohnlev 	    len, mp->map_handlep);
8640Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	/*
8670Sstevel@tonic-gate 	 * I/O or memory mapping:
8680Sstevel@tonic-gate 	 *
8690Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8700Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8710Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8720Sstevel@tonic-gate 	 */
8730Sstevel@tonic-gate 
8740Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8750Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8760Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8770Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8780Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8790Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8800Sstevel@tonic-gate 	}
8810Sstevel@tonic-gate 
8820Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8830Sstevel@tonic-gate 		/*
8840Sstevel@tonic-gate 		 * compatibility i/o mapping
8850Sstevel@tonic-gate 		 */
8860Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8870Sstevel@tonic-gate 	} else {
8880Sstevel@tonic-gate 		/*
8890Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8900Sstevel@tonic-gate 		 */
8910Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8920Sstevel@tonic-gate 	}
8930Sstevel@tonic-gate 
8940Sstevel@tonic-gate 	if (len != 0)
8950Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
8960Sstevel@tonic-gate 
8970Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8985084Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
8995084Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
9005084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
9015084Sjohnlev 	    offset, len, mp->map_handlep);
9020Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 	/*
9050Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9060Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9070Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9080Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9090Sstevel@tonic-gate 	 */
9100Sstevel@tonic-gate 
9110Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9120Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9130Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9140Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9170Sstevel@tonic-gate 		return (error);
9180Sstevel@tonic-gate 
9190Sstevel@tonic-gate 	switch (mp->map_op)  {
9200Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 		/*
9230Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9240Sstevel@tonic-gate 		 */
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9290Sstevel@tonic-gate 
9300Sstevel@tonic-gate 		/*
9310Sstevel@tonic-gate 		 * Release mapping...
9320Sstevel@tonic-gate 		 */
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9390Sstevel@tonic-gate 
9400Sstevel@tonic-gate 	default:
9410Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9420Sstevel@tonic-gate 	}
9430Sstevel@tonic-gate }
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate /*
947509Smrj  * rootnex_map_fault()
9480Sstevel@tonic-gate  *
9490Sstevel@tonic-gate  *	fault in mappings for requestors
9500Sstevel@tonic-gate  */
9510Sstevel@tonic-gate /*ARGSUSED*/
9520Sstevel@tonic-gate static int
953509Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
954509Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
955509Smrj     uint_t lock)
9560Sstevel@tonic-gate {
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9590Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
9600Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
9610Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
9620Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
9630Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9640Sstevel@tonic-gate 
9650Sstevel@tonic-gate 	/*
9660Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
9670Sstevel@tonic-gate 	 *
9680Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9690Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9700Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9710Sstevel@tonic-gate 	 */
9720Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
9735084Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9740Sstevel@tonic-gate 
9750Sstevel@tonic-gate 		if (hat == NULL) {
9760Sstevel@tonic-gate 			/*
9770Sstevel@tonic-gate 			 * This is one plausible interpretation of
9780Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9790Sstevel@tonic-gate 			 * address space hat list which by convention is
9800Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9810Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9820Sstevel@tonic-gate 			 */
9830Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9840Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9850Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9860Sstevel@tonic-gate 		}
9870Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9880Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9890Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9900Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9910Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9920Sstevel@tonic-gate 	} else
9930Sstevel@tonic-gate 		return (DDI_FAILURE);
9940Sstevel@tonic-gate 	return (DDI_SUCCESS);
9950Sstevel@tonic-gate }
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 
9980Sstevel@tonic-gate /*
999509Smrj  * rootnex_map_regspec()
1000509Smrj  *     we don't support mapping of I/O cards above 4Gb
10010Sstevel@tonic-gate  */
1002509Smrj static int
1003509Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1004509Smrj {
10055084Sjohnlev 	rootnex_addr_t rbase;
1006509Smrj 	void *cvaddr;
1007509Smrj 	uint_t npages, pgoffset;
1008509Smrj 	struct regspec *rp;
1009509Smrj 	ddi_acc_hdl_t *hp;
1010509Smrj 	ddi_acc_impl_t *ap;
1011509Smrj 	uint_t	hat_acc_flags;
10125084Sjohnlev 	paddr_t pbase;
1013509Smrj 
1014509Smrj 	rp = mp->map_obj.rp;
1015509Smrj 	hp = mp->map_handlep;
1016509Smrj 
1017509Smrj #ifdef	DDI_MAP_DEBUG
1018509Smrj 	ddi_map_debug(
1019509Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1020509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1021509Smrj 	    rp->regspec_size, mp->map_handlep);
1022509Smrj #endif	/* DDI_MAP_DEBUG */
1023509Smrj 
1024509Smrj 	/*
1025509Smrj 	 * I/O or memory mapping
1026509Smrj 	 *
1027509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1028509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1029509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1030509Smrj 	 */
1031509Smrj 
1032509Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1033509Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1034509Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1035509Smrj 		    rp->regspec_addr, rp->regspec_size);
1036509Smrj 		return (DDI_FAILURE);
1037509Smrj 	}
1038509Smrj 
1039509Smrj 	if (rp->regspec_bustype != 0) {
1040509Smrj 		/*
1041509Smrj 		 * I/O space - needs a handle.
1042509Smrj 		 */
1043509Smrj 		if (hp == NULL) {
1044509Smrj 			return (DDI_FAILURE);
1045509Smrj 		}
1046509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1047509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1048509Smrj 		impl_acc_hdl_init(hp);
1049509Smrj 
1050509Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1051509Smrj #ifdef  DDI_MAP_DEBUG
10525084Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
10535084Sjohnlev 			    "to I/O space is not supported.\n");
1054509Smrj #endif  /* DDI_MAP_DEBUG */
1055509Smrj 			return (DDI_ME_INVAL);
1056509Smrj 		} else {
1057509Smrj 			/*
1058509Smrj 			 * 1275-compliant vs. compatibility i/o mapping
1059509Smrj 			 */
1060509Smrj 			*vaddrp =
1061509Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
10625084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
10635084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
10645084Sjohnlev #ifdef __xpv
10655084Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
10665084Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
10675084Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
10685084Sjohnlev 				    MMU_PAGEMASK));
10695084Sjohnlev 			} else {
10705084Sjohnlev 				hp->ah_pfn = mmu_btop(
10715084Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
10725084Sjohnlev 			}
10735084Sjohnlev #else
10741865Sdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
10755084Sjohnlev 			    MMU_PAGEMASK);
10765084Sjohnlev #endif
10771865Sdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
10781865Sdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1079509Smrj 		}
1080509Smrj 
1081509Smrj #ifdef	DDI_MAP_DEBUG
1082509Smrj 		ddi_map_debug(
1083509Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1084509Smrj 		    rp->regspec_size, *vaddrp);
1085509Smrj #endif	/* DDI_MAP_DEBUG */
1086509Smrj 		return (DDI_SUCCESS);
1087509Smrj 	}
1088509Smrj 
1089509Smrj 	/*
1090509Smrj 	 * Memory space
1091509Smrj 	 */
1092509Smrj 
1093509Smrj 	if (hp != NULL) {
1094509Smrj 		/*
1095509Smrj 		 * hat layer ignores
1096509Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
1097509Smrj 		 */
1098509Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
1099509Smrj 		case DDI_STRICTORDER_ACC:
1100509Smrj 			hat_acc_flags = HAT_STRICTORDER;
1101509Smrj 			break;
1102509Smrj 		case DDI_UNORDERED_OK_ACC:
1103509Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
1104509Smrj 			break;
1105509Smrj 		case DDI_MERGING_OK_ACC:
1106509Smrj 			hat_acc_flags = HAT_MERGING_OK;
1107509Smrj 			break;
1108509Smrj 		case DDI_LOADCACHING_OK_ACC:
1109509Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
1110509Smrj 			break;
1111509Smrj 		case DDI_STORECACHING_OK_ACC:
1112509Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
1113509Smrj 			break;
1114509Smrj 		}
1115509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1116509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1117509Smrj 		impl_acc_hdl_init(hp);
1118509Smrj 		hp->ah_hat_flags = hat_acc_flags;
1119509Smrj 	} else {
1120509Smrj 		hat_acc_flags = HAT_STRICTORDER;
1121509Smrj 	}
1122509Smrj 
11235084Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
11245084Sjohnlev #ifdef __xpv
11255084Sjohnlev 	/*
11265084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
11275084Sjohnlev 	 * the MA to a PA.
11285084Sjohnlev 	 */
11295084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
11305084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
11315084Sjohnlev 	} else {
11325084Sjohnlev 		pbase = rbase;
11335084Sjohnlev 	}
11345084Sjohnlev #else
11355084Sjohnlev 	pbase = rbase;
11365084Sjohnlev #endif
11375084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1138509Smrj 
1139509Smrj 	if (rp->regspec_size == 0) {
1140509Smrj #ifdef  DDI_MAP_DEBUG
1141509Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1142509Smrj #endif  /* DDI_MAP_DEBUG */
1143509Smrj 		return (DDI_ME_INVAL);
1144509Smrj 	}
1145509Smrj 
1146509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
11475084Sjohnlev 		/* extra cast to make gcc happy */
11485084Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1149509Smrj 	} else {
1150509Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1151509Smrj 
1152509Smrj #ifdef	DDI_MAP_DEBUG
11535084Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
11545084Sjohnlev 		    "physical %llx", npages, pbase);
1155509Smrj #endif	/* DDI_MAP_DEBUG */
1156509Smrj 
1157509Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1158509Smrj 		if (cvaddr == NULL)
1159509Smrj 			return (DDI_ME_NORESOURCES);
1160509Smrj 
1161509Smrj 		/*
1162509Smrj 		 * Now map in the pages we've allocated...
1163509Smrj 		 */
11645084Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
11655084Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
11665084Sjohnlev 		    HAT_LOAD_LOCK);
1167509Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
11681865Sdilpreet 
11691865Sdilpreet 		/* save away pfn and npages for FMA */
11701865Sdilpreet 		hp = mp->map_handlep;
11711865Sdilpreet 		if (hp) {
11725084Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
11731865Sdilpreet 			hp->ah_pnum = npages;
11741865Sdilpreet 		}
1175509Smrj 	}
1176509Smrj 
1177509Smrj #ifdef	DDI_MAP_DEBUG
1178509Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1179509Smrj #endif	/* DDI_MAP_DEBUG */
1180509Smrj 	return (DDI_SUCCESS);
1181509Smrj }
1182509Smrj 
11830Sstevel@tonic-gate 
11840Sstevel@tonic-gate /*
1185509Smrj  * rootnex_unmap_regspec()
1186509Smrj  *
1187509Smrj  */
1188509Smrj static int
1189509Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1190509Smrj {
1191509Smrj 	caddr_t addr = (caddr_t)*vaddrp;
1192509Smrj 	uint_t npages, pgoffset;
1193509Smrj 	struct regspec *rp;
1194509Smrj 
1195509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1196509Smrj 		return (0);
1197509Smrj 
1198509Smrj 	rp = mp->map_obj.rp;
1199509Smrj 
1200509Smrj 	if (rp->regspec_size == 0) {
1201509Smrj #ifdef  DDI_MAP_DEBUG
1202509Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1203509Smrj #endif  /* DDI_MAP_DEBUG */
1204509Smrj 		return (DDI_ME_INVAL);
1205509Smrj 	}
1206509Smrj 
1207509Smrj 	/*
1208509Smrj 	 * I/O or memory mapping:
1209509Smrj 	 *
1210509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1211509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1212509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1213509Smrj 	 */
1214509Smrj 	if (rp->regspec_bustype != 0) {
1215509Smrj 		/*
1216509Smrj 		 * This is I/O space, which requires no particular
1217509Smrj 		 * processing on unmap since it isn't mapped in the
1218509Smrj 		 * first place.
1219509Smrj 		 */
1220509Smrj 		return (DDI_SUCCESS);
1221509Smrj 	}
1222509Smrj 
1223509Smrj 	/*
1224509Smrj 	 * Memory space
1225509Smrj 	 */
1226509Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1227509Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1228509Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1229509Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
1230509Smrj 
1231509Smrj 	/*
1232509Smrj 	 * Destroy the pointer - the mapping has logically gone
1233509Smrj 	 */
1234509Smrj 	*vaddrp = NULL;
1235509Smrj 
1236509Smrj 	return (DDI_SUCCESS);
1237509Smrj }
1238509Smrj 
1239509Smrj 
1240509Smrj /*
1241509Smrj  * rootnex_map_handle()
1242509Smrj  *
12430Sstevel@tonic-gate  */
1244509Smrj static int
1245509Smrj rootnex_map_handle(ddi_map_req_t *mp)
1246509Smrj {
12475084Sjohnlev 	rootnex_addr_t rbase;
1248509Smrj 	ddi_acc_hdl_t *hp;
1249509Smrj 	uint_t pgoffset;
1250509Smrj 	struct regspec *rp;
12515084Sjohnlev 	paddr_t pbase;
1252509Smrj 
1253509Smrj 	rp = mp->map_obj.rp;
1254509Smrj 
1255509Smrj #ifdef	DDI_MAP_DEBUG
1256509Smrj 	ddi_map_debug(
1257509Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1258509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1259509Smrj 	    rp->regspec_size, mp->map_handlep);
1260509Smrj #endif	/* DDI_MAP_DEBUG */
1261509Smrj 
1262509Smrj 	/*
1263509Smrj 	 * I/O or memory mapping:
1264509Smrj 	 *
1265509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1266509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1267509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1268509Smrj 	 */
1269509Smrj 	if (rp->regspec_bustype != 0) {
1270509Smrj 		/*
1271509Smrj 		 * This refers to I/O space, and we don't support "mapping"
1272509Smrj 		 * I/O space to a user.
1273509Smrj 		 */
1274509Smrj 		return (DDI_FAILURE);
1275509Smrj 	}
1276509Smrj 
1277509Smrj 	/*
1278509Smrj 	 * Set up the hat_flags for the mapping.
1279509Smrj 	 */
1280509Smrj 	hp = mp->map_handlep;
1281509Smrj 
1282509Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1283509Smrj 	case DDI_NEVERSWAP_ACC:
1284509Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1285509Smrj 		break;
1286509Smrj 	case DDI_STRUCTURE_LE_ACC:
1287509Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1288509Smrj 		break;
1289509Smrj 	case DDI_STRUCTURE_BE_ACC:
1290509Smrj 		return (DDI_FAILURE);
1291509Smrj 	default:
1292509Smrj 		return (DDI_REGS_ACC_CONFLICT);
1293509Smrj 	}
1294509Smrj 
1295509Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
1296509Smrj 	case DDI_STRICTORDER_ACC:
1297509Smrj 		break;
1298509Smrj 	case DDI_UNORDERED_OK_ACC:
1299509Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1300509Smrj 		break;
1301509Smrj 	case DDI_MERGING_OK_ACC:
1302509Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
1303509Smrj 		break;
1304509Smrj 	case DDI_LOADCACHING_OK_ACC:
1305509Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1306509Smrj 		break;
1307509Smrj 	case DDI_STORECACHING_OK_ACC:
1308509Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1309509Smrj 		break;
1310509Smrj 	default:
1311509Smrj 		return (DDI_FAILURE);
1312509Smrj 	}
1313509Smrj 
13145084Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
13155084Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
13165084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1317509Smrj 
1318509Smrj 	if (rp->regspec_size == 0)
1319509Smrj 		return (DDI_ME_INVAL);
1320509Smrj 
13215084Sjohnlev #ifdef __xpv
13225084Sjohnlev 	/*
13235084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
13245084Sjohnlev 	 * the MA to a PA.
13255084Sjohnlev 	 */
13265084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
13275084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
13285084Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
13295084Sjohnlev 	} else {
13305084Sjohnlev 		pbase = rbase;
13315084Sjohnlev 	}
13325084Sjohnlev #else
13335084Sjohnlev 	pbase = rbase;
13345084Sjohnlev #endif
13355084Sjohnlev 
13365084Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
1337509Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1338509Smrj 
1339509Smrj 	return (DDI_SUCCESS);
1340509Smrj }
13410Sstevel@tonic-gate 
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 
13440Sstevel@tonic-gate /*
1345509Smrj  * ************************
1346509Smrj  *  interrupt related code
1347509Smrj  * ************************
13480Sstevel@tonic-gate  */
13490Sstevel@tonic-gate 
13500Sstevel@tonic-gate /*
1351509Smrj  * rootnex_intr_ops()
13520Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13530Sstevel@tonic-gate  */
13540Sstevel@tonic-gate /* ARGSUSED */
13550Sstevel@tonic-gate static int
13560Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13570Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13580Sstevel@tonic-gate {
13590Sstevel@tonic-gate 	struct intrspec			*ispec;
13600Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
13610Sstevel@tonic-gate 
13620Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13630Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13640Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	/* Process the interrupt operation */
13670Sstevel@tonic-gate 	switch (intr_op) {
13680Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13690Sstevel@tonic-gate 		/* First check with pcplusmp */
13700Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13710Sstevel@tonic-gate 			return (DDI_FAILURE);
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13740Sstevel@tonic-gate 			*(int *)result = 0;
13750Sstevel@tonic-gate 			return (DDI_FAILURE);
13760Sstevel@tonic-gate 		}
13770Sstevel@tonic-gate 		break;
13780Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13790Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13800Sstevel@tonic-gate 			return (DDI_FAILURE);
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13830Sstevel@tonic-gate 			return (DDI_FAILURE);
13840Sstevel@tonic-gate 		break;
13850Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13860Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13870Sstevel@tonic-gate 			return (DDI_FAILURE);
13880Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13890Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13900Sstevel@tonic-gate 		break;
13910Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13920Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13930Sstevel@tonic-gate 		/*
13940Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
13950Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
13960Sstevel@tonic-gate 		 * See detailed comments on this in the function
13970Sstevel@tonic-gate 		 * rootnex_get_ispec().
13980Sstevel@tonic-gate 		 */
13990Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
14000Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
14010Sstevel@tonic-gate 			    pdp->par_nintr);
14020Sstevel@tonic-gate 			/*
14030Sstevel@tonic-gate 			 * Set it to zero; so that
14040Sstevel@tonic-gate 			 * DDI framework doesn't free it again
14050Sstevel@tonic-gate 			 */
14060Sstevel@tonic-gate 			pdp->par_intr = NULL;
14070Sstevel@tonic-gate 			pdp->par_nintr = 0;
14080Sstevel@tonic-gate 		}
14090Sstevel@tonic-gate 		break;
14100Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14110Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14120Sstevel@tonic-gate 			return (DDI_FAILURE);
14130Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14140Sstevel@tonic-gate 		break;
14150Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14160Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14170Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14180Sstevel@tonic-gate 			return (DDI_FAILURE);
14190Sstevel@tonic-gate 
14200Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14210Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14220Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14230Sstevel@tonic-gate 			return (DDI_FAILURE);
14240Sstevel@tonic-gate 
14250Sstevel@tonic-gate 		/* Change the priority */
14260Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14270Sstevel@tonic-gate 		    PSM_FAILURE)
14280Sstevel@tonic-gate 			return (DDI_FAILURE);
14290Sstevel@tonic-gate 
14300Sstevel@tonic-gate 		/* update the ispec with the new priority */
14310Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14320Sstevel@tonic-gate 		break;
14330Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14340Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14350Sstevel@tonic-gate 			return (DDI_FAILURE);
14360Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14370Sstevel@tonic-gate 		break;
14380Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14390Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14400Sstevel@tonic-gate 			return (DDI_FAILURE);
14410Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14420Sstevel@tonic-gate 		break;
14430Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14440Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14450Sstevel@tonic-gate 			return (DDI_FAILURE);
14460Sstevel@tonic-gate 
14470Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14480Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14490Sstevel@tonic-gate 			return (DDI_FAILURE);
14500Sstevel@tonic-gate 
1451916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14520Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
14530Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
14540Sstevel@tonic-gate 
14550Sstevel@tonic-gate 		/* Add the interrupt handler */
14560Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14570Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1458916Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
14590Sstevel@tonic-gate 			return (DDI_FAILURE);
14600Sstevel@tonic-gate 		break;
14610Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14620Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14630Sstevel@tonic-gate 			return (DDI_FAILURE);
14640Sstevel@tonic-gate 
14650Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14660Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14670Sstevel@tonic-gate 			return (DDI_FAILURE);
14680Sstevel@tonic-gate 
1469916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14700Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14710Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14720Sstevel@tonic-gate 
14730Sstevel@tonic-gate 		/* Remove the interrupt handler */
14740Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14750Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14760Sstevel@tonic-gate 		break;
14770Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14780Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14790Sstevel@tonic-gate 			return (DDI_FAILURE);
14800Sstevel@tonic-gate 
14810Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14820Sstevel@tonic-gate 			return (DDI_FAILURE);
14830Sstevel@tonic-gate 		break;
14840Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14850Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14860Sstevel@tonic-gate 			return (DDI_FAILURE);
14870Sstevel@tonic-gate 
14880Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14890Sstevel@tonic-gate 			return (DDI_FAILURE);
14900Sstevel@tonic-gate 		break;
14910Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14920Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14930Sstevel@tonic-gate 			return (DDI_FAILURE);
14940Sstevel@tonic-gate 
14950Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14960Sstevel@tonic-gate 		    result)) {
14970Sstevel@tonic-gate 			*(int *)result = 0;
14980Sstevel@tonic-gate 			return (DDI_FAILURE);
14990Sstevel@tonic-gate 		}
15000Sstevel@tonic-gate 		break;
15012580Sanish 	case DDI_INTROP_NAVAIL:
15020Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
15032580Sanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
15042580Sanish 		if (*(int *)result == 0) {
15050Sstevel@tonic-gate 			/*
15060Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15070Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15080Sstevel@tonic-gate 			 *
15090Sstevel@tonic-gate 			 * See detailed comments on this in the function
15100Sstevel@tonic-gate 			 * rootnex_get_ispec().
15110Sstevel@tonic-gate 			 *
15120Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15130Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15140Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15150Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15160Sstevel@tonic-gate 			 */
15170Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15180Sstevel@tonic-gate 				*(int *)result = 1;
15192580Sanish 			else
15202580Sanish 				return (DDI_FAILURE);
15210Sstevel@tonic-gate 		}
15220Sstevel@tonic-gate 		break;
15230Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
15242580Sanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
15250Sstevel@tonic-gate 		break;
15260Sstevel@tonic-gate 	default:
15270Sstevel@tonic-gate 		return (DDI_FAILURE);
15280Sstevel@tonic-gate 	}
15290Sstevel@tonic-gate 
15300Sstevel@tonic-gate 	return (DDI_SUCCESS);
15310Sstevel@tonic-gate }
15320Sstevel@tonic-gate 
15330Sstevel@tonic-gate 
15340Sstevel@tonic-gate /*
1535509Smrj  * rootnex_get_ispec()
1536509Smrj  *	convert an interrupt number to an interrupt specification.
1537509Smrj  *	The interrupt number determines which interrupt spec will be
1538509Smrj  *	returned if more than one exists.
1539509Smrj  *
1540509Smrj  *	Look into the parent private data area of the 'rdip' to find out
1541509Smrj  *	the interrupt specification.  First check to make sure there is
1542509Smrj  *	one that matchs "inumber" and then return a pointer to it.
1543509Smrj  *
1544509Smrj  *	Return NULL if one could not be found.
1545509Smrj  *
1546509Smrj  *	NOTE: This is needed for rootnex_intr_ops()
1547509Smrj  */
1548509Smrj static struct intrspec *
1549509Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
1550509Smrj {
1551509Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1552509Smrj 
1553509Smrj 	/*
1554509Smrj 	 * Special case handling for drivers that provide their own
1555509Smrj 	 * intrspec structures instead of relying on the DDI framework.
1556509Smrj 	 *
1557509Smrj 	 * A broken hardware driver in ON could potentially provide its
1558509Smrj 	 * own intrspec structure, instead of relying on the hardware.
1559509Smrj 	 * If these drivers are children of 'rootnex' then we need to
1560509Smrj 	 * continue to provide backward compatibility to them here.
1561509Smrj 	 *
1562509Smrj 	 * Following check is a special case for 'pcic' driver which
1563509Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
1564509Smrj 	 *
1565509Smrj 	 * Verbatim comments from this driver are shown here:
1566509Smrj 	 * "Don't use the ddi_add_intr since we don't have a
1567509Smrj 	 * default intrspec in all cases."
1568509Smrj 	 *
1569509Smrj 	 * Since an 'ispec' may not be always created for it,
1570509Smrj 	 * check for that and create one if so.
1571509Smrj 	 *
1572509Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1573509Smrj 	 */
1574509Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1575509Smrj 		pdp->par_nintr = 1;
1576509Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1577509Smrj 		    pdp->par_nintr, KM_SLEEP);
1578509Smrj 	}
1579509Smrj 
1580509Smrj 	/* Validate the interrupt number */
1581509Smrj 	if (inum >= pdp->par_nintr)
1582509Smrj 		return (NULL);
1583509Smrj 
1584509Smrj 	/* Get the interrupt structure pointer and return that */
1585509Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
1586509Smrj }
1587509Smrj 
1588509Smrj 
1589509Smrj /*
1590509Smrj  * ******************
1591509Smrj  *  dma related code
1592509Smrj  * ******************
1593509Smrj  */
1594509Smrj 
1595509Smrj /*ARGSUSED*/
1596509Smrj static int
15977613SVikram.Hegde@Sun.COM rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
15987613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
15997613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep)
1600509Smrj {
1601509Smrj 	uint64_t maxsegmentsize_ll;
1602509Smrj 	uint_t maxsegmentsize;
1603509Smrj 	ddi_dma_impl_t *hp;
1604509Smrj 	rootnex_dma_t *dma;
1605509Smrj 	uint64_t count_max;
1606509Smrj 	uint64_t seg;
1607509Smrj 	int kmflag;
1608509Smrj 	int e;
1609509Smrj 
1610509Smrj 
1611509Smrj 	/* convert our sleep flags */
1612509Smrj 	if (waitfp == DDI_DMA_SLEEP) {
1613509Smrj 		kmflag = KM_SLEEP;
1614509Smrj 	} else {
1615509Smrj 		kmflag = KM_NOSLEEP;
1616509Smrj 	}
1617509Smrj 
1618509Smrj 	/*
1619509Smrj 	 * We try to do only one memory allocation here. We'll do a little
1620509Smrj 	 * pointer manipulation later. If the bind ends up taking more than
1621509Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
1622509Smrj 	 * bind operation. Not great, but much better than before and the
1623509Smrj 	 * best we can do with the current bind interfaces.
1624509Smrj 	 */
1625509Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1626509Smrj 	if (hp == NULL) {
1627509Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
1628509Smrj 			ddi_set_callback(waitfp, arg,
1629509Smrj 			    &rootnex_state->r_dvma_call_list_id);
1630509Smrj 		}
1631509Smrj 		return (DDI_DMA_NORESOURCES);
1632509Smrj 	}
1633509Smrj 
1634509Smrj 	/* Do our pointer manipulation now, align the structures */
1635509Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
1636509Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1637509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1638509Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1639509Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1640509Smrj 
1641509Smrj 	/* setup the handle */
1642509Smrj 	rootnex_clean_dmahdl(hp);
1643509Smrj 	dma->dp_dip = rdip;
1644509Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1645509Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1646509Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1647509Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1648509Smrj 	hp->dmai_rdip = rdip;
1649509Smrj 	hp->dmai_attr = *attr;
1650509Smrj 
1651509Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
1652509Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1653509Smrj 
1654509Smrj 	/*
1655509Smrj 	 * Figure out our maximum segment size. If the segment size is greater
1656509Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1657509Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1658509Smrj 	 * dma_attr_count_max are size-1 type values.
1659509Smrj 	 *
1660509Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
1661509Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
1662509Smrj 	 * single cookie).
1663509Smrj 	 */
1664509Smrj 
1665509Smrj 	/* handle the rollover cases */
1666509Smrj 	seg = attr->dma_attr_seg + 1;
1667509Smrj 	if (seg < attr->dma_attr_seg) {
1668509Smrj 		seg = attr->dma_attr_seg;
1669509Smrj 	}
1670509Smrj 	count_max = attr->dma_attr_count_max + 1;
1671509Smrj 	if (count_max < attr->dma_attr_count_max) {
1672509Smrj 		count_max = attr->dma_attr_count_max;
1673509Smrj 	}
1674509Smrj 
1675509Smrj 	/*
1676509Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
1677509Smrj 	 * use a simple mask.
1678509Smrj 	 */
1679509Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1680509Smrj 		dma->dp_granularity_power_2 = B_FALSE;
1681509Smrj 	} else {
1682509Smrj 		dma->dp_granularity_power_2 = B_TRUE;
1683509Smrj 	}
1684509Smrj 
1685509Smrj 	/*
1686509Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
1687509Smrj 	 * break up a window because we're greater than maxxfer, we might as
1688509Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1689509Smrj 	 * worry about triming the window later on for this case.
1690509Smrj 	 */
1691509Smrj 	if (attr->dma_attr_granular > 1) {
1692509Smrj 		if (dma->dp_granularity_power_2) {
1693509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1694509Smrj 			    (attr->dma_attr_maxxfer &
1695509Smrj 			    (attr->dma_attr_granular - 1));
1696509Smrj 		} else {
1697509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1698509Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1699509Smrj 		}
1700509Smrj 	} else {
1701509Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1702509Smrj 	}
1703509Smrj 
1704509Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1705509Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1706509Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1707509Smrj 		maxsegmentsize = 0xFFFFFFFF;
1708509Smrj 	} else {
1709509Smrj 		maxsegmentsize = maxsegmentsize_ll;
1710509Smrj 	}
1711509Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1712509Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1713509Smrj 
1714509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1715509Smrj 	if (rootnex_alloc_check_parms) {
1716509Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1717509Smrj 		if (e != DDI_SUCCESS) {
1718509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1719509Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
1720509Smrj 			    (ddi_dma_handle_t)hp);
1721509Smrj 			return (e);
1722509Smrj 		}
1723509Smrj 	}
1724509Smrj 
1725509Smrj 	*handlep = (ddi_dma_handle_t)hp;
1726509Smrj 
1727509Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1728509Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
1729509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1730509Smrj 
1731509Smrj 	return (DDI_SUCCESS);
1732509Smrj }
1733509Smrj 
1734509Smrj 
1735509Smrj /*
17367613SVikram.Hegde@Sun.COM  * rootnex_dma_allochdl()
17377613SVikram.Hegde@Sun.COM  *    called from ddi_dma_alloc_handle().
1738509Smrj  */
17397613SVikram.Hegde@Sun.COM static int
17407613SVikram.Hegde@Sun.COM rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
17417613SVikram.Hegde@Sun.COM     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
17427613SVikram.Hegde@Sun.COM {
17437613SVikram.Hegde@Sun.COM #if !defined(__xpv)
17447613SVikram.Hegde@Sun.COM 	uint_t error = ENOTSUP;
17457613SVikram.Hegde@Sun.COM 	int retval;
17467613SVikram.Hegde@Sun.COM 
17477613SVikram.Hegde@Sun.COM 	retval = iommulib_nex_open(rdip, &error);
17487613SVikram.Hegde@Sun.COM 
17497613SVikram.Hegde@Sun.COM 	if (retval != DDI_SUCCESS && error == ENOTSUP) {
17507613SVikram.Hegde@Sun.COM 		/* No IOMMU */
17517613SVikram.Hegde@Sun.COM 		return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
17527613SVikram.Hegde@Sun.COM 		    handlep));
17537613SVikram.Hegde@Sun.COM 	} else if (retval != DDI_SUCCESS) {
17547613SVikram.Hegde@Sun.COM 		return (DDI_FAILURE);
17557613SVikram.Hegde@Sun.COM 	}
17567613SVikram.Hegde@Sun.COM 
17577613SVikram.Hegde@Sun.COM 	ASSERT(IOMMU_USED(rdip));
17587613SVikram.Hegde@Sun.COM 
17597613SVikram.Hegde@Sun.COM 	/* has an IOMMU */
17607613SVikram.Hegde@Sun.COM 	return (iommulib_nexdma_allochdl(dip, rdip, attr,
17617613SVikram.Hegde@Sun.COM 	    waitfp, arg, handlep));
17627613SVikram.Hegde@Sun.COM #else
17637613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
17647613SVikram.Hegde@Sun.COM 	    handlep));
17657613SVikram.Hegde@Sun.COM #endif
17667613SVikram.Hegde@Sun.COM }
17677613SVikram.Hegde@Sun.COM 
1768509Smrj /*ARGSUSED*/
1769509Smrj static int
17707613SVikram.Hegde@Sun.COM rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
17717613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
1772509Smrj {
1773509Smrj 	ddi_dma_impl_t *hp;
1774509Smrj 	rootnex_dma_t *dma;
1775509Smrj 
1776509Smrj 
1777509Smrj 	hp = (ddi_dma_impl_t *)handle;
1778509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1779509Smrj 
1780509Smrj 	/* unbind should have been called first */
1781509Smrj 	ASSERT(!dma->dp_inuse);
1782509Smrj 
1783509Smrj 	mutex_destroy(&dma->dp_mutex);
1784509Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1785509Smrj 
1786509Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1787509Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
1788509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1789509Smrj 
1790509Smrj 	if (rootnex_state->r_dvma_call_list_id)
1791509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1792509Smrj 
1793509Smrj 	return (DDI_SUCCESS);
1794509Smrj }
1795509Smrj 
1796509Smrj /*
17977613SVikram.Hegde@Sun.COM  * rootnex_dma_freehdl()
17987613SVikram.Hegde@Sun.COM  *    called from ddi_dma_free_handle().
1799509Smrj  */
18007613SVikram.Hegde@Sun.COM static int
18017613SVikram.Hegde@Sun.COM rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
18027613SVikram.Hegde@Sun.COM {
18037613SVikram.Hegde@Sun.COM #if !defined(__xpv)
18047613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
18057613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_freehdl(dip, rdip, handle));
18067613SVikram.Hegde@Sun.COM 	}
18077613SVikram.Hegde@Sun.COM #endif
18087613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_freehdl(dip, rdip, handle));
18097613SVikram.Hegde@Sun.COM }
18107613SVikram.Hegde@Sun.COM 
18117613SVikram.Hegde@Sun.COM 
1812509Smrj /*ARGSUSED*/
1813509Smrj static int
18147613SVikram.Hegde@Sun.COM rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
18157613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
18167613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
18170Sstevel@tonic-gate {
1818509Smrj 	rootnex_sglinfo_t *sinfo;
1819509Smrj 	ddi_dma_attr_t *attr;
1820509Smrj 	ddi_dma_impl_t *hp;
1821509Smrj 	rootnex_dma_t *dma;
1822509Smrj 	int kmflag;
1823509Smrj 	int e;
1824509Smrj 
1825509Smrj 
1826509Smrj 	hp = (ddi_dma_impl_t *)handle;
1827509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1828509Smrj 	sinfo = &dma->dp_sglinfo;
1829509Smrj 	attr = &hp->dmai_attr;
1830509Smrj 
1831509Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1832509Smrj 
1833509Smrj 	/*
1834509Smrj 	 * This is useful for debugging a driver. Not as useful in a production
1835509Smrj 	 * system. The only time this will fail is if you have a driver bug.
1836509Smrj 	 */
1837509Smrj 	if (rootnex_bind_check_inuse) {
1838509Smrj 		/*
1839509Smrj 		 * No one else should ever have this lock unless someone else
1840509Smrj 		 * is trying to use this handle. So contention on the lock
1841509Smrj 		 * is the same as inuse being set.
1842509Smrj 		 */
1843509Smrj 		e = mutex_tryenter(&dma->dp_mutex);
1844509Smrj 		if (e == 0) {
1845509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1846509Smrj 			return (DDI_DMA_INUSE);
1847509Smrj 		}
1848509Smrj 		if (dma->dp_inuse) {
1849509Smrj 			mutex_exit(&dma->dp_mutex);
1850509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1851509Smrj 			return (DDI_DMA_INUSE);
1852509Smrj 		}
1853509Smrj 		dma->dp_inuse = B_TRUE;
1854509Smrj 		mutex_exit(&dma->dp_mutex);
1855509Smrj 	}
1856509Smrj 
1857509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1858509Smrj 	if (rootnex_bind_check_parms) {
1859509Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
1860509Smrj 		if (e != DDI_SUCCESS) {
1861509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1862509Smrj 			rootnex_clean_dmahdl(hp);
1863509Smrj 			return (e);
1864509Smrj 		}
1865509Smrj 	}
1866509Smrj 
1867509Smrj 	/* save away the original bind info */
1868509Smrj 	dma->dp_dma = dmareq->dmar_object;
1869509Smrj 
18707613SVikram.Hegde@Sun.COM #if !defined(__xpv)
18717589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
18727589SVikram.Hegde@Sun.COM 		e = intel_iommu_map_sgl(handle, dmareq,
18737589SVikram.Hegde@Sun.COM 		    rootnex_state->r_prealloc_cookies);
18747589SVikram.Hegde@Sun.COM 
18757589SVikram.Hegde@Sun.COM 		switch (e) {
18767589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_SUCCESS:
18777589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_end;
18787589SVikram.Hegde@Sun.COM 
18797589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_DISABLE:
18807589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_start;
18817589SVikram.Hegde@Sun.COM 
18827589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_NORESOURCES:
18837589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN, "iommu map sgl failed for %s",
18847589SVikram.Hegde@Sun.COM 			    ddi_node_name(dma->dp_dip));
18857589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
18867589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
18877589SVikram.Hegde@Sun.COM 
18887589SVikram.Hegde@Sun.COM 		default:
18897589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN,
18907589SVikram.Hegde@Sun.COM 			    "undefined value returned from"
18917589SVikram.Hegde@Sun.COM 			    " intel_iommu_map_sgl: %d",
18927589SVikram.Hegde@Sun.COM 			    e);
18937589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
18947589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
18957589SVikram.Hegde@Sun.COM 		}
18967589SVikram.Hegde@Sun.COM 	}
18977613SVikram.Hegde@Sun.COM #endif
18987589SVikram.Hegde@Sun.COM 
18997589SVikram.Hegde@Sun.COM rootnex_sgl_start:
1900509Smrj 	/*
1901509Smrj 	 * Figure out a rough estimate of what maximum number of pages this
1902509Smrj 	 * buffer could use (a high estimate of course).
1903509Smrj 	 */
1904509Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1905509Smrj 
1906509Smrj 	/*
1907509Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
1908509Smrj 	 * fit (more important to be consistent, we don't want to create
1909509Smrj 	 * additional degenerate cases).
1910509Smrj 	 */
1911509Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1912509Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1913509Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
1914509Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1915509Smrj 		    uint_t, sinfo->si_max_pages);
1916509Smrj 
1917509Smrj 	/*
1918509Smrj 	 * For anything larger than that, we'll go ahead and allocate the
1919509Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
1920509Smrj 	 * seeing this path in the fast path for high performance devices very
1921509Smrj 	 * frequently.
1922509Smrj 	 *
1923509Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
1924509Smrj 	 * the bind interface would speed this case up.
1925509Smrj 	 */
1926509Smrj 	} else {
1927509Smrj 		/* convert the sleep flags */
1928509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1929509Smrj 			kmflag =  KM_SLEEP;
1930509Smrj 		} else {
1931509Smrj 			kmflag =  KM_NOSLEEP;
1932509Smrj 		}
1933509Smrj 
1934509Smrj 		/*
1935509Smrj 		 * Save away how much memory we allocated. If we're doing a
1936509Smrj 		 * nosleep, the alloc could fail...
1937509Smrj 		 */
1938509Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
1939509Smrj 		    sizeof (ddi_dma_cookie_t);
1940509Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
1941509Smrj 		if (dma->dp_cookies == NULL) {
1942509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1943509Smrj 			rootnex_clean_dmahdl(hp);
1944509Smrj 			return (DDI_DMA_NORESOURCES);
1945509Smrj 		}
1946509Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
1947509Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
1948509Smrj 		    sinfo->si_max_pages);
1949509Smrj 	}
1950509Smrj 	hp->dmai_cookie = dma->dp_cookies;
1951509Smrj 
1952509Smrj 	/*
1953509Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
1954509Smrj 	 * looking at the contraints in the dma structure. It will then put some
1955509Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
1956509Smrj 	 * clean, or do we need to do some munging; how many pages need to be
1957509Smrj 	 * copied, etc.)
1958509Smrj 	 */
1959509Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
1960509Smrj 	    &dma->dp_sglinfo);
19617589SVikram.Hegde@Sun.COM 
19627589SVikram.Hegde@Sun.COM rootnex_sgl_end:
1963509Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
1964509Smrj 	/* if we don't need a copy buffer, we don't need to sync */
1965509Smrj 	if (sinfo->si_copybuf_req == 0) {
1966509Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
1967509Smrj 	}
1968509Smrj 
1969509Smrj 	/*
1970509Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
1971509Smrj 	 * hit the fast path. All the high performance devices should be trying
1972509Smrj 	 * to hit this path. To hit this path, a device should be able to reach
1973509Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
1974509Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
1975509Smrj 	 * handle [sgllen]).
1976509Smrj 	 */
1977509Smrj 	if ((sinfo->si_copybuf_req == 0) &&
1978509Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
1979509Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
1980509Smrj 		/*
19815591Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
19825591Sstephh 		 * handle cache.
19835591Sstephh 		 */
19845591Sstephh 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
19855591Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
19865591Sstephh 			(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
19875591Sstephh 		}
19885591Sstephh 
19895591Sstephh 		/*
1990509Smrj 		 * copy out the first cookie and ccountp, set the cookie
1991509Smrj 		 * pointer to the second cookie. The first cookie is passed
1992509Smrj 		 * back on the stack. Additional cookies are accessed via
1993509Smrj 		 * ddi_dma_nextcookie()
1994509Smrj 		 */
1995509Smrj 		*cookiep = dma->dp_cookies[0];
1996509Smrj 		*ccountp = sinfo->si_sgl_size;
1997509Smrj 		hp->dmai_cookie++;
1998509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1999509Smrj 		hp->dmai_nwin = 1;
2000509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2001509Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
2002509Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2003509Smrj 		    dma->dp_dma.dmao_size);
2004509Smrj 		return (DDI_DMA_MAPPED);
2005509Smrj 	}
2006509Smrj 
2007509Smrj 	/*
2008509Smrj 	 * go to the slow path, we may need to alloc more memory, create
2009509Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
2010509Smrj 	 */
2011509Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
2012509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2013509Smrj 		if (dma->dp_need_to_free_cookie) {
2014509Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2015509Smrj 		}
2016509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2017509Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2018509Smrj 		return (e);
2019509Smrj 	}
2020509Smrj 
20215591Sstephh 	/*
20225591Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
20235591Sstephh 	 * cache.
20245591Sstephh 	 */
20255591Sstephh 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
20265591Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
20275591Sstephh 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
20285591Sstephh 	}
20295591Sstephh 
2030509Smrj 	/* if the first window uses the copy buffer, sync it for the device */
2031509Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2032509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
2033509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2034509Smrj 		    DDI_DMA_SYNC_FORDEV);
2035509Smrj 	}
2036509Smrj 
2037509Smrj 	/*
2038509Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2039509Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2040509Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
2041509Smrj 	 * cookies we return is the number of cookies in the first window.
2042509Smrj 	 */
2043509Smrj 	if (e == DDI_DMA_MAPPED) {
2044509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2045509Smrj 		*ccountp = sinfo->si_sgl_size;
2046509Smrj 	} else {
2047509Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2048509Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2049509Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2050509Smrj 	}
2051509Smrj 	*cookiep = dma->dp_cookies[0];
2052509Smrj 	hp->dmai_cookie++;
2053509Smrj 
2054509Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2055509Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2056509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2057509Smrj 	    dma->dp_dma.dmao_size);
2058509Smrj 	return (e);
2059509Smrj }
2060509Smrj 
2061509Smrj 
2062509Smrj /*
20637613SVikram.Hegde@Sun.COM  * rootnex_dma_bindhdl()
20647613SVikram.Hegde@Sun.COM  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2065509Smrj  */
20667613SVikram.Hegde@Sun.COM static int
20677613SVikram.Hegde@Sun.COM rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
20687613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
20697613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
20707613SVikram.Hegde@Sun.COM {
20717613SVikram.Hegde@Sun.COM #if !defined(__xpv)
20727613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
20737613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
20747613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
20757613SVikram.Hegde@Sun.COM 	}
20767613SVikram.Hegde@Sun.COM #endif
20777613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
20787613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
20797613SVikram.Hegde@Sun.COM }
20807613SVikram.Hegde@Sun.COM 
2081509Smrj /*ARGSUSED*/
2082509Smrj static int
20837613SVikram.Hegde@Sun.COM rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2084509Smrj     ddi_dma_handle_t handle)
2085509Smrj {
2086509Smrj 	ddi_dma_impl_t *hp;
2087509Smrj 	rootnex_dma_t *dma;
2088509Smrj 	int e;
2089509Smrj 
2090509Smrj 	hp = (ddi_dma_impl_t *)handle;
2091509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2092509Smrj 
2093509Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
2094509Smrj 	if (rootnex_unbind_verify_buffer) {
2095509Smrj 		e = rootnex_verify_buffer(dma);
2096509Smrj 		if (e != DDI_SUCCESS) {
2097509Smrj 			ASSERT(0);
2098509Smrj 			return (DDI_FAILURE);
2099509Smrj 		}
2100509Smrj 	}
2101509Smrj 
2102509Smrj 	/* sync the current window before unbinding the buffer */
2103509Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2104509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
2105509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2106509Smrj 		    DDI_DMA_SYNC_FORCPU);
2107509Smrj 	}
2108509Smrj 
2109509Smrj 	/*
21101865Sdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
21111865Sdilpreet 	 * cache.
21121865Sdilpreet 	 */
21131865Sdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
21141865Sdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
21151865Sdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
21161865Sdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
21171865Sdilpreet 		}
21181865Sdilpreet 	}
21191865Sdilpreet 
21201865Sdilpreet 	/*
2121509Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
2122509Smrj 	 * buffer or windows, there won't be much to do :-)
2123509Smrj 	 */
2124509Smrj 	rootnex_teardown_copybuf(dma);
2125509Smrj 	rootnex_teardown_windows(dma);
2126509Smrj 
21277613SVikram.Hegde@Sun.COM #if !defined(__xpv)
2128509Smrj 	/*
21297589SVikram.Hegde@Sun.COM 	 * If intel iommu enabled, clean up the page tables and free the dvma
21307589SVikram.Hegde@Sun.COM 	 */
21317589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
21327589SVikram.Hegde@Sun.COM 		intel_iommu_unmap_sgl(handle);
21337589SVikram.Hegde@Sun.COM 	}
21347613SVikram.Hegde@Sun.COM #endif
21357589SVikram.Hegde@Sun.COM 
21367589SVikram.Hegde@Sun.COM 	/*
2137509Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
2138509Smrj 	 * fit into our pre-allocate buffer), free that up now
2139509Smrj 	 */
2140509Smrj 	if (dma->dp_need_to_free_cookie) {
2141509Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2142509Smrj 	}
2143509Smrj 
2144509Smrj 	/*
2145509Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
2146509Smrj 	 * handle is reused).
2147509Smrj 	 */
2148509Smrj 	rootnex_clean_dmahdl(hp);
2149509Smrj 
2150509Smrj 	if (rootnex_state->r_dvma_call_list_id)
2151509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2152509Smrj 
2153509Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2154509Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
2155509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2156509Smrj 
2157509Smrj 	return (DDI_SUCCESS);
2158509Smrj }
2159509Smrj 
21607613SVikram.Hegde@Sun.COM /*
21617613SVikram.Hegde@Sun.COM  * rootnex_dma_unbindhdl()
21627613SVikram.Hegde@Sun.COM  *    called from ddi_dma_unbind_handle()
21637613SVikram.Hegde@Sun.COM  */
21647613SVikram.Hegde@Sun.COM /*ARGSUSED*/
21657613SVikram.Hegde@Sun.COM static int
21667613SVikram.Hegde@Sun.COM rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
21677613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
21687613SVikram.Hegde@Sun.COM {
21697613SVikram.Hegde@Sun.COM #if !defined(__xpv)
21707613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
21717613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_unbindhdl(dip, rdip, handle));
21727613SVikram.Hegde@Sun.COM 	}
21737613SVikram.Hegde@Sun.COM #endif
21747613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_unbindhdl(dip, rdip, handle));
21757613SVikram.Hegde@Sun.COM }
21767613SVikram.Hegde@Sun.COM 
2177*7617SVikram.Hegde@Sun.COM #if !defined(__xpv)
21787613SVikram.Hegde@Sun.COM /*ARGSUSED*/
21797613SVikram.Hegde@Sun.COM static void
21807613SVikram.Hegde@Sun.COM rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
21817613SVikram.Hegde@Sun.COM {
21827613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
21837613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
21847613SVikram.Hegde@Sun.COM 
21857613SVikram.Hegde@Sun.COM 	hp->dmai_cookie = &dma->dp_cookies[0];
21867613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
21877613SVikram.Hegde@Sun.COM }
21887613SVikram.Hegde@Sun.COM 
21897613SVikram.Hegde@Sun.COM /*ARGSUSED*/
21907613SVikram.Hegde@Sun.COM static int
21917613SVikram.Hegde@Sun.COM rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
21927613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
21937613SVikram.Hegde@Sun.COM {
21947613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
21957613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
21967613SVikram.Hegde@Sun.COM 
21977613SVikram.Hegde@Sun.COM 
21987613SVikram.Hegde@Sun.COM 	if (hp->dmai_rflags & DDI_DMA_PARTIAL) {
21997613SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
22007613SVikram.Hegde@Sun.COM 	} else {
22017613SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_sglinfo.si_sgl_size;
22027613SVikram.Hegde@Sun.COM 	}
22037613SVikram.Hegde@Sun.COM 	*cookiep = dma->dp_cookies[0];
22047613SVikram.Hegde@Sun.COM 
22057613SVikram.Hegde@Sun.COM 	/* reset the cookies */
22067613SVikram.Hegde@Sun.COM 	hp->dmai_cookie = &dma->dp_cookies[0];
22077613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
22087613SVikram.Hegde@Sun.COM 
22097613SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
22107613SVikram.Hegde@Sun.COM }
2211*7617SVikram.Hegde@Sun.COM #endif
2212509Smrj 
2213509Smrj /*
2214509Smrj  * rootnex_verify_buffer()
2215509Smrj  *   verify buffer wasn't free'd
2216509Smrj  */
2217509Smrj static int
2218509Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
2219509Smrj {
2220509Smrj 	page_t **pplist;
2221509Smrj 	caddr_t vaddr;
2222509Smrj 	uint_t pcnt;
2223509Smrj 	uint_t poff;
2224509Smrj 	page_t *pp;
22251865Sdilpreet 	char b;
2226509Smrj 	int i;
2227509Smrj 
2228509Smrj 	/* Figure out how many pages this buffer occupies */
2229509Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2230509Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2231509Smrj 	} else {
2232509Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2233509Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2234509Smrj 	}
2235509Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2236509Smrj 
2237509Smrj 	switch (dma->dp_dma.dmao_type) {
22380Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
2239509Smrj 		/*
2240509Smrj 		 * for a linked list of pp's walk through them to make sure
2241509Smrj 		 * they're locked and not free.
2242509Smrj 		 */
2243509Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2244509Smrj 		for (i = 0; i < pcnt; i++) {
2245509Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2246509Smrj 				return (DDI_FAILURE);
22470Sstevel@tonic-gate 			}
2248509Smrj 			pp = pp->p_next;
22490Sstevel@tonic-gate 		}
22500Sstevel@tonic-gate 		break;
2251509Smrj 
22520Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
22530Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
2254509Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2255509Smrj 		/*
2256509Smrj 		 * for an array of pp's walk through them to make sure they're
2257509Smrj 		 * not free. It's possible that they may not be locked.
2258509Smrj 		 */
2259509Smrj 		if (pplist) {
2260509Smrj 			for (i = 0; i < pcnt; i++) {
2261509Smrj 				if (PP_ISFREE(pplist[i])) {
2262509Smrj 					return (DDI_FAILURE);
2263509Smrj 				}
2264509Smrj 			}
2265509Smrj 
2266509Smrj 		/* For a virtual address, try to peek at each page */
2267509Smrj 		} else {
2268509Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
2269509Smrj 				for (i = 0; i < pcnt; i++) {
22701865Sdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
22711865Sdilpreet 					    DDI_FAILURE)
2272509Smrj 						return (DDI_FAILURE);
22731865Sdilpreet 					vaddr += MMU_PAGESIZE;
2274509Smrj 				}
2275509Smrj 			}
2276509Smrj 		}
2277509Smrj 		break;
2278509Smrj 
2279509Smrj 	default:
2280509Smrj 		ASSERT(0);
2281509Smrj 		break;
2282509Smrj 	}
2283509Smrj 
2284509Smrj 	return (DDI_SUCCESS);
2285509Smrj }
2286509Smrj 
2287509Smrj 
2288509Smrj /*
2289509Smrj  * rootnex_clean_dmahdl()
2290509Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
2291509Smrj  *    unbind handle. Set the handle state to the default settings.
2292509Smrj  */
2293509Smrj static void
2294509Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2295509Smrj {
2296509Smrj 	rootnex_dma_t *dma;
2297509Smrj 
2298509Smrj 
2299509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2300509Smrj 
2301509Smrj 	hp->dmai_nwin = 0;
2302509Smrj 	dma->dp_current_cookie = 0;
2303509Smrj 	dma->dp_copybuf_size = 0;
2304509Smrj 	dma->dp_window = NULL;
2305509Smrj 	dma->dp_cbaddr = NULL;
2306509Smrj 	dma->dp_inuse = B_FALSE;
2307509Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
2308509Smrj 	dma->dp_need_to_free_window = B_FALSE;
2309509Smrj 	dma->dp_partial_required = B_FALSE;
2310509Smrj 	dma->dp_trim_required = B_FALSE;
2311509Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
2312509Smrj #if !defined(__amd64)
2313509Smrj 	dma->dp_cb_remaping = B_FALSE;
2314509Smrj 	dma->dp_kva = NULL;
2315509Smrj #endif
2316509Smrj 
2317509Smrj 	/* FMA related initialization */
2318509Smrj 	hp->dmai_fault = 0;
2319509Smrj 	hp->dmai_fault_check = NULL;
2320509Smrj 	hp->dmai_fault_notify = NULL;
2321509Smrj 	hp->dmai_error.err_ena = 0;
2322509Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
2323509Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2324509Smrj 	hp->dmai_error.err_ontrap = NULL;
2325509Smrj 	hp->dmai_error.err_fep = NULL;
23261865Sdilpreet 	hp->dmai_error.err_cf = NULL;
2327509Smrj }
2328509Smrj 
2329509Smrj 
2330509Smrj /*
2331509Smrj  * rootnex_valid_alloc_parms()
2332509Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2333509Smrj  */
2334509Smrj static int
2335509Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2336509Smrj {
2337509Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2338509Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2339509Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2340509Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2341509Smrj 		return (DDI_DMA_BADATTR);
2342509Smrj 	}
2343509Smrj 
2344509Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2345509Smrj 		return (DDI_DMA_BADATTR);
2346509Smrj 	}
2347509Smrj 
2348509Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2349509Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2350509Smrj 	    attr->dma_attr_sgllen <= 0) {
2351509Smrj 		return (DDI_DMA_BADATTR);
2352509Smrj 	}
2353509Smrj 
2354509Smrj 	/* We should be able to DMA into every byte offset in a page */
2355509Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
2356509Smrj 		return (DDI_DMA_BADATTR);
2357509Smrj 	}
2358509Smrj 
2359509Smrj 	return (DDI_SUCCESS);
2360509Smrj }
2361509Smrj 
2362509Smrj 
2363509Smrj /*
2364509Smrj  * rootnex_valid_bind_parms()
2365509Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2366509Smrj  */
2367509Smrj /* ARGSUSED */
2368509Smrj static int
2369509Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2370509Smrj {
2371509Smrj #if !defined(__amd64)
2372509Smrj 	/*
2373509Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2374509Smrj 	 * we can track the offset for the obsoleted interfaces.
2375509Smrj 	 */
2376509Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2377509Smrj 		return (DDI_DMA_TOOBIG);
2378509Smrj 	}
2379509Smrj #endif
2380509Smrj 
2381509Smrj 	return (DDI_SUCCESS);
2382509Smrj }
2383509Smrj 
2384509Smrj 
2385509Smrj /*
2386509Smrj  * rootnex_get_sgl()
2387509Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2388509Smrj  *    with a call to the vm layer when vm2.0 comes around...
2389509Smrj  */
2390509Smrj static void
2391509Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2392509Smrj     rootnex_sglinfo_t *sglinfo)
2393509Smrj {
2394509Smrj 	ddi_dma_atyp_t buftype;
23955084Sjohnlev 	rootnex_addr_t raddr;
2396509Smrj 	uint64_t last_page;
2397509Smrj 	uint64_t offset;
2398509Smrj 	uint64_t addrhi;
2399509Smrj 	uint64_t addrlo;
2400509Smrj 	uint64_t maxseg;
2401509Smrj 	page_t **pplist;
2402509Smrj 	uint64_t paddr;
2403509Smrj 	uint32_t psize;
2404509Smrj 	uint32_t size;
2405509Smrj 	caddr_t vaddr;
2406509Smrj 	uint_t pcnt;
2407509Smrj 	page_t *pp;
2408509Smrj 	uint_t cnt;
2409509Smrj 
2410509Smrj 
2411509Smrj 	/* shortcuts */
2412509Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2413509Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2414509Smrj 	maxseg = sglinfo->si_max_cookie_size;
2415509Smrj 	buftype = dmar_object->dmao_type;
2416509Smrj 	addrhi = sglinfo->si_max_addr;
2417509Smrj 	addrlo = sglinfo->si_min_addr;
2418509Smrj 	size = dmar_object->dmao_size;
2419509Smrj 
2420509Smrj 	pcnt = 0;
2421509Smrj 	cnt = 0;
2422509Smrj 
2423509Smrj 	/*
2424509Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
2425509Smrj 	 * page_t, use this to get our physical address and buf offset.
2426509Smrj 	 */
2427509Smrj 	if (buftype == DMA_OTYP_PAGES) {
2428509Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2429509Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2430509Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2431509Smrj 		    MMU_PAGEOFFSET;
24325084Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2433509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2434509Smrj 		pp = pp->p_next;
2435509Smrj 		sglinfo->si_asp = NULL;
2436509Smrj 
2437509Smrj 	/*
2438509Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
2439509Smrj 	 * down an array of pages, use this to get our physical address and buf
2440509Smrj 	 * offset.
2441509Smrj 	 */
2442509Smrj 	} else if (pplist != NULL) {
2443509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2444509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2445509Smrj 
2446509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2447509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2448509Smrj 		if (sglinfo->si_asp == NULL) {
2449509Smrj 			sglinfo->si_asp = &kas;
2450509Smrj 		}
2451509Smrj 
2452509Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
24535084Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2454509Smrj 		paddr += offset;
2455509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2456509Smrj 		pcnt++;
2457509Smrj 
2458509Smrj 	/*
2459509Smrj 	 * All we have is a virtual address, we'll need to call into the VM
2460509Smrj 	 * to get the physical address.
2461509Smrj 	 */
2462509Smrj 	} else {
2463509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2464509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2465509Smrj 
2466509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2467509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2468509Smrj 		if (sglinfo->si_asp == NULL) {
2469509Smrj 			sglinfo->si_asp = &kas;
2470509Smrj 		}
2471509Smrj 
24725084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2473509Smrj 		paddr += offset;
2474509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2475509Smrj 		vaddr += psize;
2476509Smrj 	}
2477509Smrj 
24785084Sjohnlev #ifdef __xpv
24795084Sjohnlev 	/*
24805084Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
24815084Sjohnlev 	 * the cookies with MFNs instead of PFNs.
24825084Sjohnlev 	 */
24835084Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
24845084Sjohnlev #else
24855084Sjohnlev 	raddr = paddr;
24865084Sjohnlev #endif
24875084Sjohnlev 
2488509Smrj 	/*
2489509Smrj 	 * Setup the first cookie with the physical address of the page and the
2490509Smrj 	 * size of the page (which takes into account the initial offset into
2491509Smrj 	 * the page.
2492509Smrj 	 */
24935084Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
2494509Smrj 	sgl[cnt].dmac_size = psize;
2495509Smrj 	sgl[cnt].dmac_type = 0;
2496509Smrj 
2497509Smrj 	/*
2498509Smrj 	 * Save away the buffer offset into the page. We'll need this later in
2499509Smrj 	 * the copy buffer code to help figure out the page index within the
2500509Smrj 	 * buffer and the offset into the current page.
2501509Smrj 	 */
2502509Smrj 	sglinfo->si_buf_offset = offset;
2503509Smrj 
2504509Smrj 	/*
2505509Smrj 	 * If the DMA engine can't reach the physical address, increase how
2506509Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
2507509Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
2508509Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2509509Smrj 	 * last cookie, go to the next cookie (since we separate each page which
2510509Smrj 	 * uses the copy buffer in case the copy buffer is not physically
2511509Smrj 	 * contiguous.
2512509Smrj 	 */
25135084Sjohnlev 	if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2514509Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2515509Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2516509Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
2517509Smrj 			cnt++;
2518509Smrj 			sgl[cnt].dmac_laddress = 0;
2519509Smrj 			sgl[cnt].dmac_size = 0;
2520509Smrj 			sgl[cnt].dmac_type = 0;
2521509Smrj 		}
2522509Smrj 	}
2523509Smrj 
2524509Smrj 	/*
2525509Smrj 	 * save this page's physical address so we can figure out if the next
2526509Smrj 	 * page is physically contiguous. Keep decrementing size until we are
2527509Smrj 	 * done with the buffer.
2528509Smrj 	 */
25295084Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
2530509Smrj 	size -= psize;
2531509Smrj 
2532509Smrj 	while (size > 0) {
2533509Smrj 		/* Get the size for this page (i.e. partial or full page) */
2534509Smrj 		psize = MIN(size, MMU_PAGESIZE);
2535509Smrj 
2536509Smrj 		if (buftype == DMA_OTYP_PAGES) {
2537509Smrj 			/* get the paddr from the page_t */
2538509Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
25395084Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
2540509Smrj 			pp = pp->p_next;
2541509Smrj 		} else if (pplist != NULL) {
2542509Smrj 			/* index into the array of page_t's to get the paddr */
2543509Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
25445084Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2545509Smrj 			pcnt++;
25460Sstevel@tonic-gate 		} else {
2547509Smrj 			/* call into the VM to get the paddr */
25485084Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2549509Smrj 			    vaddr));
2550509Smrj 			vaddr += psize;
2551509Smrj 		}
2552509Smrj 
25535084Sjohnlev #ifdef __xpv
25545084Sjohnlev 		/*
25555084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
25565084Sjohnlev 		 * the cookies with MFNs instead of PFNs.
25575084Sjohnlev 		 */
25585084Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
25595084Sjohnlev #else
25605084Sjohnlev 		raddr = paddr;
25615084Sjohnlev #endif
25625084Sjohnlev 
2563509Smrj 		/* check to see if this page needs the copy buffer */
25645084Sjohnlev 		if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2565509Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2566509Smrj 
25670Sstevel@tonic-gate 			/*
2568509Smrj 			 * if there is something in the current cookie, go to
2569509Smrj 			 * the next one. We only want one page in a cookie which
2570509Smrj 			 * uses the copybuf since the copybuf doesn't have to
2571509Smrj 			 * be physically contiguous.
2572509Smrj 			 */
2573509Smrj 			if (sgl[cnt].dmac_size != 0) {
2574509Smrj 				cnt++;
2575509Smrj 			}
25765084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2577509Smrj 			sgl[cnt].dmac_size = psize;
2578509Smrj #if defined(__amd64)
2579509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2580509Smrj #else
2581509Smrj 			/*
2582509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2583509Smrj 			 * obsoleted interfaces.
2584509Smrj 			 */
2585509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2586509Smrj 			    (dmar_object->dmao_size - size);
2587509Smrj #endif
2588509Smrj 			/* if this isn't the last cookie, go to the next one */
2589509Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
2590509Smrj 				cnt++;
2591509Smrj 				sgl[cnt].dmac_laddress = 0;
2592509Smrj 				sgl[cnt].dmac_size = 0;
2593509Smrj 				sgl[cnt].dmac_type = 0;
2594509Smrj 			}
2595509Smrj 
2596509Smrj 		/*
2597509Smrj 		 * this page didn't need the copy buffer, if it's not physically
2598509Smrj 		 * contiguous, or it would put us over a segment boundary, or it
2599509Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
2600509Smrj 		 * have anything in it.
2601509Smrj 		 */
26025084Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
26035084Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
2604509Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2605509Smrj 		    (sgl[cnt].dmac_size == 0)) {
2606509Smrj 			/*
2607509Smrj 			 * if we're not already in a new cookie, go to the next
2608509Smrj 			 * cookie.
2609509Smrj 			 */
2610509Smrj 			if (sgl[cnt].dmac_size != 0) {
2611509Smrj 				cnt++;
2612509Smrj 			}
2613509Smrj 
2614509Smrj 			/* save the cookie information */
26155084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2616509Smrj 			sgl[cnt].dmac_size = psize;
2617509Smrj #if defined(__amd64)
2618509Smrj 			sgl[cnt].dmac_type = 0;
2619509Smrj #else
2620509Smrj 			/*
2621509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2622509Smrj 			 * obsoleted interfaces.
2623509Smrj 			 */
2624509Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2625509Smrj #endif
2626509Smrj 
2627509Smrj 		/*
2628509Smrj 		 * this page didn't need the copy buffer, it is physically
2629509Smrj 		 * contiguous with the last page, and it's <= the max cookie
2630509Smrj 		 * size.
2631509Smrj 		 */
2632509Smrj 		} else {
2633509Smrj 			sgl[cnt].dmac_size += psize;
2634509Smrj 
2635509Smrj 			/*
2636509Smrj 			 * if this exactly ==  the maximum cookie size, and
2637509Smrj 			 * it isn't the last cookie, go to the next cookie.
2638509Smrj 			 */
2639509Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2640509Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2641509Smrj 				cnt++;
2642509Smrj 				sgl[cnt].dmac_laddress = 0;
2643509Smrj 				sgl[cnt].dmac_size = 0;
2644509Smrj 				sgl[cnt].dmac_type = 0;
2645509Smrj 			}
2646509Smrj 		}
2647509Smrj 
2648509Smrj 		/*
2649509Smrj 		 * save this page's physical address so we can figure out if the
2650509Smrj 		 * next page is physically contiguous. Keep decrementing size
2651509Smrj 		 * until we are done with the buffer.
2652509Smrj 		 */
26535084Sjohnlev 		last_page = raddr;
2654509Smrj 		size -= psize;
2655509Smrj 	}
2656509Smrj 
2657509Smrj 	/* we're done, save away how many cookies the sgl has */
2658509Smrj 	if (sgl[cnt].dmac_size == 0) {
2659509Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
2660509Smrj 		sglinfo->si_sgl_size = cnt;
2661509Smrj 	} else {
2662509Smrj 		sglinfo->si_sgl_size = cnt + 1;
2663509Smrj 	}
2664509Smrj }
2665509Smrj 
2666509Smrj 
2667509Smrj /*
2668509Smrj  * rootnex_bind_slowpath()
2669509Smrj  *    Call in the bind path if the calling driver can't use the sgl without
2670509Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
2671509Smrj  *    with a partial bind.
2672509Smrj  */
2673509Smrj static int
2674509Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2675509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2676509Smrj {
2677509Smrj 	rootnex_sglinfo_t *sinfo;
2678509Smrj 	rootnex_window_t *window;
2679509Smrj 	ddi_dma_cookie_t *cookie;
2680509Smrj 	size_t copybuf_used;
2681509Smrj 	size_t dmac_size;
2682509Smrj 	boolean_t partial;
2683509Smrj 	off_t cur_offset;
2684509Smrj 	page_t *cur_pp;
2685509Smrj 	major_t mnum;
2686509Smrj 	int e;
2687509Smrj 	int i;
2688509Smrj 
2689509Smrj 
2690509Smrj 	sinfo = &dma->dp_sglinfo;
2691509Smrj 	copybuf_used = 0;
2692509Smrj 	partial = B_FALSE;
2693509Smrj 
2694509Smrj 	/*
2695509Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
2696509Smrj 	 * Needs to be first since it sets the copy buffer size.
2697509Smrj 	 */
2698509Smrj 	if (sinfo->si_copybuf_req != 0) {
2699509Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2700509Smrj 		if (e != DDI_SUCCESS) {
2701509Smrj 			return (e);
2702509Smrj 		}
2703509Smrj 	} else {
2704509Smrj 		dma->dp_copybuf_size = 0;
2705509Smrj 	}
2706509Smrj 
2707509Smrj 	/*
2708509Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
2709509Smrj 	 * if we need to trim the buffers when we munge the sgl.
2710509Smrj 	 */
2711509Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2712509Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2713509Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2714509Smrj 		dma->dp_partial_required = B_TRUE;
2715509Smrj 		if (attr->dma_attr_granular != 1) {
2716509Smrj 			dma->dp_trim_required = B_TRUE;
2717509Smrj 		}
2718509Smrj 	} else {
2719509Smrj 		dma->dp_partial_required = B_FALSE;
2720509Smrj 		dma->dp_trim_required = B_FALSE;
2721509Smrj 	}
2722509Smrj 
2723509Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
2724509Smrj 	if (dma->dp_partial_required &&
2725509Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2726509Smrj 
2727509Smrj 		mnum = ddi_driver_major(dma->dp_dip);
2728509Smrj 		/*
2729509Smrj 		 * patchable which allows us to print one warning per major
2730509Smrj 		 * number.
2731509Smrj 		 */
2732509Smrj 		if ((rootnex_bind_warn) &&
2733509Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2734509Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2735509Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2736509Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2737509Smrj 			    "There is a small risk of data corruption in "
2738509Smrj 			    "particular with large I/Os. The driver should be "
2739509Smrj 			    "replaced with a corrected version for proper "
2740509Smrj 			    "system operation. To disable this warning, add "
2741509Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
2742509Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2743509Smrj 		}
2744509Smrj 		return (DDI_DMA_TOOBIG);
2745509Smrj 	}
2746509Smrj 
2747509Smrj 	/*
2748509Smrj 	 * we might need multiple windows, setup state to handle them. In this
2749509Smrj 	 * code path, we will have at least one window.
2750509Smrj 	 */
2751509Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2752509Smrj 	if (e != DDI_SUCCESS) {
2753509Smrj 		rootnex_teardown_copybuf(dma);
2754509Smrj 		return (e);
2755509Smrj 	}
2756509Smrj 
2757509Smrj 	window = &dma->dp_window[0];
2758509Smrj 	cookie = &dma->dp_cookies[0];
2759509Smrj 	cur_offset = 0;
2760509Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2761509Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2762509Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2763509Smrj 	}
2764509Smrj 
2765509Smrj 	/* loop though all the cookies we got back from get_sgl() */
2766509Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2767509Smrj 		/*
2768509Smrj 		 * If we're using the copy buffer, check this cookie and setup
2769509Smrj 		 * its associated copy buffer state. If this cookie uses the
2770509Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
2771509Smrj 		 */
2772509Smrj 		if (dma->dp_copybuf_size > 0) {
2773509Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2774509Smrj 			    cur_offset, &copybuf_used, &cur_pp);
2775509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2776509Smrj 				window->wd_dosync = B_TRUE;
2777509Smrj 			}
2778509Smrj 		}
2779509Smrj 
2780509Smrj 		/*
2781509Smrj 		 * save away the cookie size, since it could be modified in
2782509Smrj 		 * the windowing code.
2783509Smrj 		 */
2784509Smrj 		dmac_size = cookie->dmac_size;
2785509Smrj 
2786509Smrj 		/* if we went over max copybuf size */
2787509Smrj 		if (dma->dp_copybuf_size &&
2788509Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
2789509Smrj 			partial = B_TRUE;
2790509Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2791509Smrj 			    cookie, cur_offset, &copybuf_used);
2792509Smrj 			if (e != DDI_SUCCESS) {
2793509Smrj 				rootnex_teardown_copybuf(dma);
2794509Smrj 				rootnex_teardown_windows(dma);
2795509Smrj 				return (e);
2796509Smrj 			}
2797509Smrj 
2798509Smrj 			/*
2799509Smrj 			 * if the coookie uses the copy buffer, make sure the
2800509Smrj 			 * new window we just moved to is set to sync.
2801509Smrj 			 */
2802509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2803509Smrj 				window->wd_dosync = B_TRUE;
2804509Smrj 			}
2805509Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2806509Smrj 			    dma->dp_dip);
2807509Smrj 
2808509Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
2809509Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2810509Smrj 			partial = B_TRUE;
2811509Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2812509Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2813509Smrj 			    cookie, attr, cur_offset);
2814509Smrj 			if (e != DDI_SUCCESS) {
2815509Smrj 				rootnex_teardown_copybuf(dma);
2816509Smrj 				rootnex_teardown_windows(dma);
2817509Smrj 				return (e);
2818509Smrj 			}
2819509Smrj 
2820509Smrj 			/*
2821509Smrj 			 * if the coookie uses the copy buffer, make sure the
2822509Smrj 			 * new window we just moved to is set to sync.
2823509Smrj 			 */
2824509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2825509Smrj 				window->wd_dosync = B_TRUE;
2826509Smrj 			}
2827509Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2828509Smrj 			    dma->dp_dip);
2829509Smrj 
2830509Smrj 		/* else if we will be over maxxfer */
2831509Smrj 		} else if ((window->wd_size + dmac_size) >
2832509Smrj 		    dma->dp_maxxfer) {
2833509Smrj 			partial = B_TRUE;
2834509Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2835509Smrj 			    cookie);
2836509Smrj 			if (e != DDI_SUCCESS) {
2837509Smrj 				rootnex_teardown_copybuf(dma);
2838509Smrj 				rootnex_teardown_windows(dma);
2839509Smrj 				return (e);
2840509Smrj 			}
2841509Smrj 
2842509Smrj 			/*
2843509Smrj 			 * if the coookie uses the copy buffer, make sure the
2844509Smrj 			 * new window we just moved to is set to sync.
28450Sstevel@tonic-gate 			 */
2846509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2847509Smrj 				window->wd_dosync = B_TRUE;
2848509Smrj 			}
2849509Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2850509Smrj 			    dma->dp_dip);
2851509Smrj 
2852509Smrj 		/* else this cookie fits in the current window */
2853509Smrj 		} else {
2854509Smrj 			window->wd_cookie_cnt++;
2855509Smrj 			window->wd_size += dmac_size;
2856509Smrj 		}
2857509Smrj 
2858509Smrj 		/* track our offset into the buffer, go to the next cookie */
2859509Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2860509Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
2861509Smrj 		cur_offset += dmac_size;
2862509Smrj 		cookie++;
2863509Smrj 	}
2864509Smrj 
2865509Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
2866509Smrj 	if (window->wd_size == 0) {
2867509Smrj 		hp->dmai_nwin--;
2868509Smrj 		window--;
2869509Smrj 	}
2870509Smrj 
2871509Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
2872509Smrj 
2873509Smrj 	if (!partial) {
2874509Smrj 		return (DDI_DMA_MAPPED);
2875509Smrj 	}
2876509Smrj 
2877509Smrj 	ASSERT(dma->dp_partial_required);
2878509Smrj 	return (DDI_DMA_PARTIAL_MAP);
2879509Smrj }
2880509Smrj 
2881509Smrj 
2882509Smrj /*
2883509Smrj  * rootnex_setup_copybuf()
2884509Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
2885509Smrj  *    buffer, and if we do, sets up the basic state to handle it.
2886509Smrj  */
2887509Smrj static int
2888509Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2889509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
2890509Smrj {
2891509Smrj 	rootnex_sglinfo_t *sinfo;
2892509Smrj 	ddi_dma_attr_t lattr;
2893509Smrj 	size_t max_copybuf;
2894509Smrj 	int cansleep;
2895509Smrj 	int e;
2896509Smrj #if !defined(__amd64)
2897509Smrj 	int vmflag;
2898509Smrj #endif
2899509Smrj 
2900509Smrj 
2901509Smrj 	sinfo = &dma->dp_sglinfo;
2902509Smrj 
29035251Smrj 	/* read this first so it's consistent through the routine  */
29045251Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
2905509Smrj 
2906509Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
2907509Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
2908509Smrj 
2909509Smrj 	/* make sure the copybuf size <= the max size */
2910509Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
2911509Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
2912509Smrj 
2913509Smrj #if !defined(__amd64)
2914509Smrj 	/*
2915509Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
2916509Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
2917509Smrj 	 * the 64-bit kernel.
2918509Smrj 	 */
2919509Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
2920509Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
2921509Smrj 
2922509Smrj 		/* convert the sleep flags */
2923509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2924509Smrj 			vmflag = VM_SLEEP;
2925509Smrj 		} else {
2926509Smrj 			vmflag = VM_NOSLEEP;
2927509Smrj 		}
2928509Smrj 
2929509Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
2930509Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
2931509Smrj 		    vmflag);
2932509Smrj 		if (dma->dp_kva == NULL) {
2933509Smrj 			return (DDI_DMA_NORESOURCES);
2934509Smrj 		}
2935509Smrj 	}
2936509Smrj #endif
2937509Smrj 
2938509Smrj 	/* convert the sleep flags */
2939509Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2940509Smrj 		cansleep = 1;
2941509Smrj 	} else {
2942509Smrj 		cansleep = 0;
2943509Smrj 	}
2944509Smrj 
2945509Smrj 	/*
29467173Smrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
29477173Smrj 	 * engine limits, so we can't use kmem_alloc... We don't need
29487173Smrj 	 * contiguous memory (sgllen) since we will be forcing windows on
29497173Smrj 	 * sgllen anyway.
2950509Smrj 	 */
2951509Smrj 	lattr = *attr;
2952509Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
29537173Smrj 	/*
29547173Smrj 	 * this should be < 0 to indicate no limit, but due to a bug in
29557173Smrj 	 * the rootnex, we'll set it to the maximum positive int.
29567173Smrj 	 */
29577173Smrj 	lattr.dma_attr_sgllen = 0x7fffffff;
2958509Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
2959509Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
2960509Smrj 	if (e != DDI_SUCCESS) {
2961509Smrj #if !defined(__amd64)
2962509Smrj 		if (dma->dp_kva != NULL) {
2963509Smrj 			vmem_free(heap_arena, dma->dp_kva,
2964509Smrj 			    dma->dp_copybuf_size);
2965509Smrj 		}
2966509Smrj #endif
2967509Smrj 		return (DDI_DMA_NORESOURCES);
2968509Smrj 	}
2969509Smrj 
2970509Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
2971509Smrj 	    size_t, dma->dp_copybuf_size);
2972509Smrj 
2973509Smrj 	return (DDI_SUCCESS);
2974509Smrj }
2975509Smrj 
2976509Smrj 
2977509Smrj /*
2978509Smrj  * rootnex_setup_windows()
2979509Smrj  *    Called in bind slowpath to setup the window state. We always have windows
2980509Smrj  *    in the slowpath. Even if the window count = 1.
2981509Smrj  */
2982509Smrj static int
2983509Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2984509Smrj     ddi_dma_attr_t *attr, int kmflag)
2985509Smrj {
2986509Smrj 	rootnex_window_t *windowp;
2987509Smrj 	rootnex_sglinfo_t *sinfo;
2988509Smrj 	size_t copy_state_size;
2989509Smrj 	size_t win_state_size;
2990509Smrj 	size_t state_available;
2991509Smrj 	size_t space_needed;
2992509Smrj 	uint_t copybuf_win;
2993509Smrj 	uint_t maxxfer_win;
2994509Smrj 	size_t space_used;
2995509Smrj 	uint_t sglwin;
2996509Smrj 
2997509Smrj 
2998509Smrj 	sinfo = &dma->dp_sglinfo;
2999509Smrj 
3000509Smrj 	dma->dp_current_win = 0;
3001509Smrj 	hp->dmai_nwin = 0;
3002509Smrj 
3003509Smrj 	/* If we don't need to do a partial, we only have one window */
3004509Smrj 	if (!dma->dp_partial_required) {
3005509Smrj 		dma->dp_max_win = 1;
3006509Smrj 
3007509Smrj 	/*
3008509Smrj 	 * we need multiple windows, need to figure out the worse case number
3009509Smrj 	 * of windows.
3010509Smrj 	 */
3011509Smrj 	} else {
3012509Smrj 		/*
3013509Smrj 		 * if we need windows because we need more copy buffer that
3014509Smrj 		 * we allow, the worse case number of windows we could need
3015509Smrj 		 * here would be (copybuf space required / copybuf space that
3016509Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
3017509Smrj 		 * extra pages on the trim for the first and last pages of the
3018509Smrj 		 * buffer (a page is the minimum window size so under the right
3019509Smrj 		 * attr settings, you could have a window for each page).
3020509Smrj 		 * The last page will only be hit here if the size is not a
3021509Smrj 		 * multiple of the granularity (which theoretically shouldn't
3022509Smrj 		 * be the case but never has been enforced, so we could have
3023509Smrj 		 * broken things without it).
3024509Smrj 		 */
3025509Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3026509Smrj 			ASSERT(dma->dp_copybuf_size > 0);
3027509Smrj 			copybuf_win = (sinfo->si_copybuf_req /
3028509Smrj 			    dma->dp_copybuf_size) + 1 + 2;
3029509Smrj 		} else {
3030509Smrj 			copybuf_win = 0;
3031509Smrj 		}
3032509Smrj 
3033509Smrj 		/*
3034509Smrj 		 * if we need windows because we have more cookies than the H/W
3035509Smrj 		 * can handle, the number of windows we would need here would
3036509Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
3037509Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
3038509Smrj 		 * (see above comment about trim)
3039509Smrj 		 */
3040509Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
3041509Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
3042509Smrj 			    + 1) + 2;
3043509Smrj 		} else {
3044509Smrj 			sglwin = 0;
3045509Smrj 		}
3046509Smrj 
3047509Smrj 		/*
3048509Smrj 		 * if we need windows because we're binding more memory than the
3049509Smrj 		 * H/W can transfer at once, the number of windows we would need
3050509Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
3051509Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
3052509Smrj 		 * trim (see above comment about trim)
3053509Smrj 		 */
3054509Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
3055509Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
3056509Smrj 			    dma->dp_maxxfer) + 1 + 2;
3057509Smrj 		} else {
3058509Smrj 			maxxfer_win = 0;
3059509Smrj 		}
3060509Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3061509Smrj 		ASSERT(dma->dp_max_win > 0);
3062509Smrj 	}
3063509Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3064509Smrj 
3065509Smrj 	/*
3066509Smrj 	 * Get space for window and potential copy buffer state. Before we
3067509Smrj 	 * go and allocate memory, see if we can get away with using what's
3068509Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
3069509Smrj 	 */
3070509Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3071509Smrj 	    sizeof (ddi_dma_cookie_t));
3072509Smrj 
3073509Smrj 	/* if we dynamically allocated space for the cookies */
3074509Smrj 	if (dma->dp_need_to_free_cookie) {
3075509Smrj 		/* if we have more space in the pre-allocted buffer, use it */
3076509Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
3077509Smrj 		if ((dma->dp_cookie_size - space_used) <=
3078509Smrj 		    rootnex_state->r_prealloc_size) {
3079509Smrj 			state_available = rootnex_state->r_prealloc_size;
3080509Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3081509Smrj 
3082509Smrj 		/*
3083509Smrj 		 * else, we have more free space in the dynamically allocated
3084509Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3085509Smrj 		 * didn't need a lot of cookies.
3086509Smrj 		 */
3087509Smrj 		} else {
3088509Smrj 			state_available = dma->dp_cookie_size - space_used;
3089509Smrj 			windowp = (rootnex_window_t *)
3090509Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
3091509Smrj 		}
3092509Smrj 
3093509Smrj 	/* we used the pre-alloced buffer */
3094509Smrj 	} else {
3095509Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3096509Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
3097509Smrj 		windowp = (rootnex_window_t *)
3098509Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
3099509Smrj 	}
3100509Smrj 
3101509Smrj 	/*
3102509Smrj 	 * figure out how much state we need to track the copy buffer. Add an
3103509Smrj 	 * addition 8 bytes for pointer alignemnt later.
3104509Smrj 	 */
3105509Smrj 	if (dma->dp_copybuf_size > 0) {
3106509Smrj 		copy_state_size = sinfo->si_max_pages *
3107509Smrj 		    sizeof (rootnex_pgmap_t);
3108509Smrj 	} else {
3109509Smrj 		copy_state_size = 0;
3110509Smrj 	}
3111509Smrj 	/* add an additional 8 bytes for pointer alignment */
3112509Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
3113509Smrj 
3114509Smrj 	/* if we have enough space already, use it */
3115509Smrj 	if (state_available >= space_needed) {
3116509Smrj 		dma->dp_window = windowp;
3117509Smrj 		dma->dp_need_to_free_window = B_FALSE;
3118509Smrj 
3119509Smrj 	/* not enough space, need to allocate more. */
3120509Smrj 	} else {
3121509Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3122509Smrj 		if (dma->dp_window == NULL) {
3123509Smrj 			return (DDI_DMA_NORESOURCES);
3124509Smrj 		}
3125509Smrj 		dma->dp_need_to_free_window = B_TRUE;
3126509Smrj 		dma->dp_window_size = space_needed;
3127509Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3128509Smrj 		    dma->dp_dip, size_t, space_needed);
3129509Smrj 	}
3130509Smrj 
3131509Smrj 	/*
3132509Smrj 	 * we allocate copy buffer state and window state at the same time.
3133509Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
3134509Smrj 	 */
3135509Smrj 	if (dma->dp_copybuf_size > 0) {
3136509Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3137509Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3138509Smrj 
3139509Smrj #if !defined(__amd64)
3140509Smrj 		/*
3141509Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3142509Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
3143509Smrj 		 */
3144509Smrj 		bzero(dma->dp_pgmap, copy_state_size);
3145509Smrj #endif
3146509Smrj 	} else {
3147509Smrj 		dma->dp_pgmap = NULL;
3148509Smrj 	}
3149509Smrj 
3150509Smrj 	return (DDI_SUCCESS);
3151509Smrj }
3152509Smrj 
3153509Smrj 
3154509Smrj /*
3155509Smrj  * rootnex_teardown_copybuf()
3156509Smrj  *    cleans up after rootnex_setup_copybuf()
3157509Smrj  */
3158509Smrj static void
3159509Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
3160509Smrj {
3161509Smrj #if !defined(__amd64)
3162509Smrj 	int i;
3163509Smrj 
3164509Smrj 	/*
3165509Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
3166509Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3167509Smrj 	 * arena. Then free the VMEM space.
3168509Smrj 	 */
3169509Smrj 	if (dma->dp_kva != NULL) {
3170509Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3171509Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
3172509Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3173509Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
3174509Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3175509Smrj 			}
3176509Smrj 		}
3177509Smrj 
3178509Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3179509Smrj 	}
3180509Smrj 
3181509Smrj #endif
3182509Smrj 
3183509Smrj 	/* if we allocated a copy buffer, free it */
3184509Smrj 	if (dma->dp_cbaddr != NULL) {
31851900Seota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
3186509Smrj 	}
3187509Smrj }
3188509Smrj 
3189509Smrj 
3190509Smrj /*
3191509Smrj  * rootnex_teardown_windows()
3192509Smrj  *    cleans up after rootnex_setup_windows()
3193509Smrj  */
3194509Smrj static void
3195509Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
3196509Smrj {
3197509Smrj 	/*
3198509Smrj 	 * if we had to allocate window state on the last bind (because we
3199509Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
3200509Smrj 	 */
3201509Smrj 	if (dma->dp_need_to_free_window) {
3202509Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
3203509Smrj 	}
3204509Smrj }
3205509Smrj 
3206509Smrj 
3207509Smrj /*
3208509Smrj  * rootnex_init_win()
3209509Smrj  *    Called in bind slow path during creation of a new window. Initializes
3210509Smrj  *    window state to default values.
3211509Smrj  */
3212509Smrj /*ARGSUSED*/
3213509Smrj static void
3214509Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3215509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3216509Smrj {
3217509Smrj 	hp->dmai_nwin++;
3218509Smrj 	window->wd_dosync = B_FALSE;
3219509Smrj 	window->wd_offset = cur_offset;
3220509Smrj 	window->wd_size = 0;
3221509Smrj 	window->wd_first_cookie = cookie;
3222509Smrj 	window->wd_cookie_cnt = 0;
3223509Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
3224509Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
3225509Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3226509Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3227509Smrj #if !defined(__amd64)
3228509Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3229509Smrj #endif
3230509Smrj }
3231509Smrj 
3232509Smrj 
3233509Smrj /*
3234509Smrj  * rootnex_setup_cookie()
3235509Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3236509Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3237509Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
3238509Smrj  *    need during sync.
3239509Smrj  */
3240509Smrj static void
3241509Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3242509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3243509Smrj     page_t **cur_pp)
3244509Smrj {
3245509Smrj 	boolean_t copybuf_sz_power_2;
3246509Smrj 	rootnex_sglinfo_t *sinfo;
32475084Sjohnlev 	paddr_t paddr;
3248509Smrj 	uint_t pidx;
3249509Smrj 	uint_t pcnt;
3250509Smrj 	off_t poff;
3251509Smrj #if defined(__amd64)
3252509Smrj 	pfn_t pfn;
3253509Smrj #else
3254509Smrj 	page_t **pplist;
3255509Smrj #endif
3256509Smrj 
3257509Smrj 	sinfo = &dma->dp_sglinfo;
3258509Smrj 
3259509Smrj 	/*
3260509Smrj 	 * Calculate the page index relative to the start of the buffer. The
3261509Smrj 	 * index to the current page for our buffer is the offset into the
3262509Smrj 	 * first page of the buffer plus our current offset into the buffer
3263509Smrj 	 * itself, shifted of course...
3264509Smrj 	 */
3265509Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3266509Smrj 	ASSERT(pidx < sinfo->si_max_pages);
3267509Smrj 
3268509Smrj 	/* if this cookie uses the copy buffer */
3269509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3270509Smrj 		/*
3271509Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
3272509Smrj 		 * is <= MMU_PAGESIZE.
3273509Smrj 		 */
3274509Smrj 
3275509Smrj 		/*
3276509Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
3277509Smrj 		 * pfn which we'll use with seg kpm.
3278509Smrj 		 */
32795084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3280509Smrj #if defined(__amd64)
32815084Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
32825084Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
32835084Sjohnlev #endif /* __amd64 */
3284509Smrj 
3285509Smrj 		/* figure out if the copybuf size is a power of 2 */
3286509Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3287509Smrj 			copybuf_sz_power_2 = B_FALSE;
3288509Smrj 		} else {
3289509Smrj 			copybuf_sz_power_2 = B_TRUE;
3290509Smrj 		}
3291509Smrj 
3292509Smrj 		/* This page uses the copy buffer */
3293509Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3294509Smrj 
3295509Smrj 		/*
3296509Smrj 		 * save the copy buffer KVA that we'll use with this page.
3297509Smrj 		 * if we still fit within the copybuf, it's a simple add.
3298509Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
3299509Smrj 		 */
3300509Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3301509Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3302509Smrj 			    *copybuf_used;
3303509Smrj 		} else {
3304509Smrj 			if (copybuf_sz_power_2) {
3305509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3306509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3307509Smrj 				    (*copybuf_used &
3308509Smrj 				    (dma->dp_copybuf_size - 1)));
33090Sstevel@tonic-gate 			} else {
3310509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3311509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3312509Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
33130Sstevel@tonic-gate 			}
3314509Smrj 		}
3315509Smrj 
3316509Smrj 		/*
3317509Smrj 		 * over write the cookie physical address with the address of
3318509Smrj 		 * the physical address of the copy buffer page that we will
3319509Smrj 		 * use.
3320509Smrj 		 */
33215084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3322509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3323509Smrj 
33245084Sjohnlev #ifdef __xpv
33255084Sjohnlev 		/*
33265084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
33275084Sjohnlev 		 * the cookies with MAs instead of PAs.
33285084Sjohnlev 		 */
33295084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
33305084Sjohnlev #else
33315084Sjohnlev 		cookie->dmac_laddress = paddr;
33325084Sjohnlev #endif
33335084Sjohnlev 
3334509Smrj 		/* if we have a kernel VA, it's easy, just save that address */
3335509Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3336509Smrj 		    (sinfo->si_asp == &kas)) {
3337509Smrj 			/*
3338509Smrj 			 * save away the page aligned virtual address of the
3339509Smrj 			 * driver buffer. Offsets are handled in the sync code.
3340509Smrj 			 */
3341509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3342509Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3343509Smrj 			    & MMU_PAGEMASK);
3344509Smrj #if !defined(__amd64)
3345509Smrj 			/*
3346509Smrj 			 * we didn't need to, and will never need to map this
3347509Smrj 			 * page.
3348509Smrj 			 */
3349509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3350509Smrj #endif
3351509Smrj 
3352509Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
3353509Smrj 		} else {
3354509Smrj #if defined(__amd64)
3355509Smrj 			/*
3356509Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3357509Smrj 			 * get a Kernel VA for the corresponding pfn.
3358509Smrj 			 */
3359509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3360509Smrj #else
3361509Smrj 			/*
3362509Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
3363509Smrj 			 * save away the page_t or user VA for this page. This
3364509Smrj 			 * is needed in rootnex_dma_win() when we switch to a
3365509Smrj 			 * new window which requires us to re-map the copy
3366509Smrj 			 * buffer.
3367509Smrj 			 */
3368509Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3369509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3370509Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3371509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3372509Smrj 			} else if (pplist != NULL) {
3373509Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3374509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3375509Smrj 			} else {
3376509Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
3377509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3378509Smrj 				    (((uintptr_t)
3379509Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
3380509Smrj 				    cur_offset) & MMU_PAGEMASK);
3381509Smrj 			}
3382509Smrj 
3383509Smrj 			/*
3384509Smrj 			 * save away the page aligned virtual address which was
3385509Smrj 			 * allocated from the kernel heap arena (taking into
3386509Smrj 			 * account if we need more copy buffer than we alloced
3387509Smrj 			 * and use multiple windows to handle this, i.e. &,%).
3388509Smrj 			 * NOTE: there isn't and physical memory backing up this
3389509Smrj 			 * virtual address space currently.
3390509Smrj 			 */
3391509Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
3392509Smrj 			    dma->dp_copybuf_size) {
3393509Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3394509Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3395509Smrj 				    MMU_PAGEMASK);
3396509Smrj 			} else {
3397509Smrj 				if (copybuf_sz_power_2) {
3398509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3399509Smrj 					    (((uintptr_t)dma->dp_kva +
3400509Smrj 					    (*copybuf_used &
3401509Smrj 					    (dma->dp_copybuf_size - 1))) &
3402509Smrj 					    MMU_PAGEMASK);
3403509Smrj 				} else {
3404509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3405509Smrj 					    (((uintptr_t)dma->dp_kva +
3406509Smrj 					    (*copybuf_used %
3407509Smrj 					    dma->dp_copybuf_size)) &
3408509Smrj 					    MMU_PAGEMASK);
3409509Smrj 				}
3410509Smrj 			}
3411509Smrj 
3412509Smrj 			/*
3413509Smrj 			 * if we haven't used up the available copy buffer yet,
3414509Smrj 			 * map the kva to the physical page.
3415509Smrj 			 */
3416509Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3417509Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3418509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3419509Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3420509Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3421509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3422509Smrj 				} else {
3423509Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3424509Smrj 					    sinfo->si_asp,
3425509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3426509Smrj 				}
3427509Smrj 
3428509Smrj 			/*
3429509Smrj 			 * we've used up the available copy buffer, this page
3430509Smrj 			 * will have to be mapped during rootnex_dma_win() when
3431509Smrj 			 * we switch to a new window which requires a re-map
3432509Smrj 			 * the copy buffer. (32-bit kernel only)
3433509Smrj 			 */
3434509Smrj 			} else {
3435509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3436509Smrj 			}
3437509Smrj #endif
3438509Smrj 			/* go to the next page_t */
3439509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3440509Smrj 				*cur_pp = (*cur_pp)->p_next;
3441509Smrj 			}
34420Sstevel@tonic-gate 		}
3443509Smrj 
3444509Smrj 		/* add to the copy buffer count */
3445509Smrj 		*copybuf_used += MMU_PAGESIZE;
3446509Smrj 
3447509Smrj 	/*
3448509Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3449509Smrj 	 * cookie occupies to reflect this.
3450509Smrj 	 */
3451509Smrj 	} else {
3452509Smrj 		/*
3453509Smrj 		 * figure out how many pages the cookie occupies. We need to
3454509Smrj 		 * use the original page offset of the buffer and the cookies
3455509Smrj 		 * offset in the buffer to do this.
3456509Smrj 		 */
3457509Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3458509Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3459509Smrj 
3460509Smrj 		while (pcnt > 0) {
3461509Smrj #if !defined(__amd64)
3462509Smrj 			/*
3463509Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
3464509Smrj 			 * to map in the driver buffer (if it didn't come down
3465509Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
3466509Smrj 			 * use the copy buffer, it's not, or will it ever, have
3467509Smrj 			 * to be mapped in.
3468509Smrj 			 */
3469509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3470509Smrj #endif
3471509Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3472509Smrj 
3473509Smrj 			/*
3474509Smrj 			 * we need to update pidx and cur_pp or we'll loose
3475509Smrj 			 * track of where we are.
3476509Smrj 			 */
3477509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3478509Smrj 				*cur_pp = (*cur_pp)->p_next;
3479509Smrj 			}
3480509Smrj 			pidx++;
3481509Smrj 			pcnt--;
3482509Smrj 		}
3483509Smrj 	}
3484509Smrj }
3485509Smrj 
3486509Smrj 
3487509Smrj /*
3488509Smrj  * rootnex_sgllen_window_boundary()
3489509Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
3490509Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3491509Smrj  *    length supported by the DMA H/W.
3492509Smrj  */
3493509Smrj static int
3494509Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3495509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3496509Smrj     off_t cur_offset)
3497509Smrj {
3498509Smrj 	off_t new_offset;
3499509Smrj 	size_t trim_sz;
3500509Smrj 	off_t coffset;
3501509Smrj 
3502509Smrj 
3503509Smrj 	/*
3504509Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3505509Smrj 	 * the next window and init it. We're done.
3506509Smrj 	 */
3507509Smrj 	if (!dma->dp_trim_required) {
3508509Smrj 		(*windowp)++;
3509509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3510509Smrj 		(*windowp)->wd_cookie_cnt++;
3511509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3512509Smrj 		return (DDI_SUCCESS);
3513509Smrj 	}
3514509Smrj 
3515509Smrj 	/* figure out how much we need to trim from the window */
3516509Smrj 	ASSERT(attr->dma_attr_granular != 0);
3517509Smrj 	if (dma->dp_granularity_power_2) {
3518509Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3519509Smrj 	} else {
3520509Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3521509Smrj 	}
3522509Smrj 
3523509Smrj 	/* The window's a whole multiple of granularity. We're done */
3524509Smrj 	if (trim_sz == 0) {
3525509Smrj 		(*windowp)++;
3526509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3527509Smrj 		(*windowp)->wd_cookie_cnt++;
3528509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3529509Smrj 		return (DDI_SUCCESS);
3530509Smrj 	}
3531509Smrj 
3532509Smrj 	/*
3533509Smrj 	 * The window's not a whole multiple of granularity, since we know this
3534509Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
3535509Smrj 	 * that one, add the left over part of the old cookie into the new
3536509Smrj 	 * window, and then add in the new cookie into the new window.
3537509Smrj 	 */
3538509Smrj 
3539509Smrj 	/*
3540509Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
3541509Smrj 	 * sgllen == 1 don't go together.
3542509Smrj 	 */
3543509Smrj 	if (attr->dma_attr_sgllen == 1) {
3544509Smrj 		return (DDI_DMA_NOMAPPING);
3545509Smrj 	}
3546509Smrj 
3547509Smrj 	/*
3548509Smrj 	 * first, setup the current window to account for the trim. Need to go
3549509Smrj 	 * back to the last cookie for this.
3550509Smrj 	 */
3551509Smrj 	cookie--;
3552509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3553509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
35545084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3555509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3556509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3557509Smrj 	(*windowp)->wd_size -= trim_sz;
3558509Smrj 
3559509Smrj 	/* save the buffer offsets for the next window */
3560509Smrj 	coffset = cookie->dmac_size - trim_sz;
3561509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3562509Smrj 
3563509Smrj 	/*
3564509Smrj 	 * set this now in case this is the first window. all other cases are
3565509Smrj 	 * set in dma_win()
3566509Smrj 	 */
3567509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3568509Smrj 
3569509Smrj 	/*
3570509Smrj 	 * initialize the next window using what's left over in the previous
3571509Smrj 	 * cookie.
3572509Smrj 	 */
3573509Smrj 	(*windowp)++;
3574509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3575509Smrj 	(*windowp)->wd_cookie_cnt++;
3576509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
35775084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3578509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3579509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3580509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3581509Smrj 	}
3582509Smrj 
3583509Smrj 	/*
3584509Smrj 	 * now go back to the current cookie and add it to the new window. set
3585509Smrj 	 * the new window size to the what was left over from the previous
3586509Smrj 	 * cookie and what's in the current cookie.
3587509Smrj 	 */
3588509Smrj 	cookie++;
3589509Smrj 	(*windowp)->wd_cookie_cnt++;
3590509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3591509Smrj 
3592509Smrj 	/*
3593509Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3594509Smrj 	 * a max size of maxxfer). Handle that case.
3595509Smrj 	 */
3596509Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3597509Smrj 		/*
3598509Smrj 		 * maxxfer is already a whole multiple of granularity, and this
3599509Smrj 		 * trim will be <= the previous trim (since a cookie can't be
3600509Smrj 		 * larger than maxxfer). Make things simple here.
3601509Smrj 		 */
3602509Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3603509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3604509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
36055084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3606509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3607509Smrj 		(*windowp)->wd_size -= trim_sz;
3608509Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3609509Smrj 
3610509Smrj 		/* save the buffer offsets for the next window */
3611509Smrj 		coffset = cookie->dmac_size - trim_sz;
3612509Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3613509Smrj 
3614509Smrj 		/* setup the next window */
3615509Smrj 		(*windowp)++;
3616509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3617509Smrj 		(*windowp)->wd_cookie_cnt++;
3618509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
36195084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
3620509Smrj 		    coffset;
3621509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3622509Smrj 	}
3623509Smrj 
3624509Smrj 	return (DDI_SUCCESS);
3625509Smrj }
3626509Smrj 
3627509Smrj 
3628509Smrj /*
3629509Smrj  * rootnex_copybuf_window_boundary()
3630509Smrj  *    Called in bind slowpath when we get to a window boundary because we used
3631509Smrj  *    up all the copy buffer that we have.
3632509Smrj  */
3633509Smrj static int
3634509Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3635509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3636509Smrj     size_t *copybuf_used)
3637509Smrj {
3638509Smrj 	rootnex_sglinfo_t *sinfo;
3639509Smrj 	off_t new_offset;
3640509Smrj 	size_t trim_sz;
36415084Sjohnlev 	paddr_t paddr;
3642509Smrj 	off_t coffset;
3643509Smrj 	uint_t pidx;
3644509Smrj 	off_t poff;
3645509Smrj 
3646509Smrj 
3647509Smrj 	sinfo = &dma->dp_sglinfo;
3648509Smrj 
3649509Smrj 	/*
3650509Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
3651509Smrj 	 * this cookie is <= MMU_PAGESIZE.
3652509Smrj 	 */
3653509Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3654509Smrj 
3655509Smrj 	/*
3656509Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
3657509Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3658509Smrj 	 * space...
3659509Smrj 	 */
3660509Smrj #if !defined(__amd64)
3661509Smrj 	dma->dp_cb_remaping = B_TRUE;
3662509Smrj #endif
3663509Smrj 
3664509Smrj 	/* reset copybuf used */
3665509Smrj 	*copybuf_used = 0;
3666509Smrj 
3667509Smrj 	/*
3668509Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
3669509Smrj 	 * next window and add the current cookie to it. We know the current
3670509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3671509Smrj 	 */
3672509Smrj 	if (!dma->dp_trim_required) {
3673509Smrj 		(*windowp)++;
3674509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3675509Smrj 
3676509Smrj 		/* Add this cookie to the new window */
3677509Smrj 		(*windowp)->wd_cookie_cnt++;
3678509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3679509Smrj 		*copybuf_used += MMU_PAGESIZE;
3680509Smrj 		return (DDI_SUCCESS);
3681509Smrj 	}
3682509Smrj 
3683509Smrj 	/*
3684509Smrj 	 * *** may need to trim, figure it out.
3685509Smrj 	 */
3686509Smrj 
3687509Smrj 	/* figure out how much we need to trim from the window */
3688509Smrj 	if (dma->dp_granularity_power_2) {
3689509Smrj 		trim_sz = (*windowp)->wd_size &
3690509Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
3691509Smrj 	} else {
3692509Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3693509Smrj 	}
3694509Smrj 
3695509Smrj 	/*
3696509Smrj 	 * if the window's a whole multiple of granularity, go to the next
3697509Smrj 	 * window, init it, then add in the current cookie. We know the current
3698509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3699509Smrj 	 */
3700509Smrj 	if (trim_sz == 0) {
3701509Smrj 		(*windowp)++;
3702509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3703509Smrj 
3704509Smrj 		/* Add this cookie to the new window */
3705509Smrj 		(*windowp)->wd_cookie_cnt++;
3706509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3707509Smrj 		*copybuf_used += MMU_PAGESIZE;
3708509Smrj 		return (DDI_SUCCESS);
3709509Smrj 	}
3710509Smrj 
3711509Smrj 	/*
3712509Smrj 	 * *** We figured it out, we definitly need to trim
3713509Smrj 	 */
3714509Smrj 
3715509Smrj 	/*
3716509Smrj 	 * make sure the driver isn't making us do something bad...
3717509Smrj 	 * Trimming and sgllen == 1 don't go together.
3718509Smrj 	 */
3719509Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3720509Smrj 		return (DDI_DMA_NOMAPPING);
3721509Smrj 	}
3722509Smrj 
3723509Smrj 	/*
3724509Smrj 	 * first, setup the current window to account for the trim. Need to go
3725509Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
3726509Smrj 	 * the current window, and some of the last cookie will be in the new
3727509Smrj 	 * window. All of the current cookie will be in the new window.
3728509Smrj 	 */
3729509Smrj 	cookie--;
3730509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3731509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
37325084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3733509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3734509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3735509Smrj 	(*windowp)->wd_size -= trim_sz;
3736509Smrj 
3737509Smrj 	/*
3738509Smrj 	 * we're trimming the last cookie (not the current cookie). So that
3739509Smrj 	 * last cookie may have or may not have been using the copy buffer (
3740509Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
3741509Smrj 	 * this code path).
3742509Smrj 	 *
3743509Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
3744509Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
3745509Smrj 	 * last page in the current window and the first page in the next
3746509Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
3747509Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3748509Smrj 	 * current window, and the start of the copy buffer in the next window.
3749509Smrj 	 * Track that info... The cookie physical address was already set to
3750509Smrj 	 * the copy buffer physical address in setup_cookie..
3751509Smrj 	 */
3752509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3753509Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3754509Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3755509Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3756509Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3757509Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
3758509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
3759509Smrj #if !defined(__amd64)
3760509Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
3761509Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
3762509Smrj #endif
3763509Smrj 	}
3764509Smrj 
3765509Smrj 	/* save the buffer offsets for the next window */
3766509Smrj 	coffset = cookie->dmac_size - trim_sz;
3767509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3768509Smrj 
3769509Smrj 	/*
3770509Smrj 	 * set this now in case this is the first window. all other cases are
3771509Smrj 	 * set in dma_win()
3772509Smrj 	 */
3773509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3774509Smrj 
3775509Smrj 	/*
3776509Smrj 	 * initialize the next window using what's left over in the previous
3777509Smrj 	 * cookie.
3778509Smrj 	 */
3779509Smrj 	(*windowp)++;
3780509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3781509Smrj 	(*windowp)->wd_cookie_cnt++;
3782509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
37835084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3784509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3785509Smrj 
3786509Smrj 	/*
3787509Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
3788509Smrj 	 * read the comment above for more info on why we need to track
3789509Smrj 	 * additional state.
3790509Smrj 	 *
3791509Smrj 	 * For the first cookie in the new window, we need reset the physical
3792509Smrj 	 * address to DMA into to the start of the copy buffer plus any
3793509Smrj 	 * initial page offset which may be present.
3794509Smrj 	 */
3795509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3796509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3797509Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3798509Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3799509Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3800509Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
38015084Sjohnlev 
38025084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
38035084Sjohnlev 		    poff;
38045084Sjohnlev #ifdef __xpv
38055084Sjohnlev 		/*
38065084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
38075084Sjohnlev 		 * the cookies with MAs instead of PAs.
38085084Sjohnlev 		 */
38095084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
38105084Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
38115084Sjohnlev #else
38125084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
38135084Sjohnlev #endif
38145084Sjohnlev 
3815509Smrj #if !defined(__amd64)
3816509Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3817509Smrj #endif
3818509Smrj 		/* account for the cookie copybuf usage in the new window */
3819509Smrj 		*copybuf_used += MMU_PAGESIZE;
3820509Smrj 
3821509Smrj 		/*
3822509Smrj 		 * every piece of code has to have a hack, and here is this
3823509Smrj 		 * ones :-)
3824509Smrj 		 *
3825509Smrj 		 * There is a complex interaction between setup_cookie and the
3826509Smrj 		 * copybuf window boundary. The complexity had to be in either
3827509Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
3828509Smrj 		 * copybuf code.
3829509Smrj 		 *
3830509Smrj 		 * So in this code path, we have taken the last cookie,
3831509Smrj 		 * virtually broken it in half due to the trim, and it happens
3832509Smrj 		 * to use the copybuf which further complicates life. At the
3833509Smrj 		 * same time, we have already setup the current cookie, which
3834509Smrj 		 * is now wrong. More background info: the current cookie uses
3835509Smrj 		 * the copybuf, so it is only a page long max. So we need to
3836509Smrj 		 * fix the current cookies copy buffer address, physical
3837509Smrj 		 * address, and kva for the 32-bit kernel. We due this by
3838509Smrj 		 * bumping them by page size (of course, we can't due this on
3839509Smrj 		 * the physical address since the copy buffer may not be
3840509Smrj 		 * physically contiguous).
3841509Smrj 		 */
3842509Smrj 		cookie++;
3843509Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
38445084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
38455084Sjohnlev 
38465084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3847509Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
38485084Sjohnlev #ifdef __xpv
38495084Sjohnlev 		/*
38505084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
38515084Sjohnlev 		 * the cookies with MAs instead of PAs.
38525084Sjohnlev 		 */
38535084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
38545084Sjohnlev #else
38555084Sjohnlev 		cookie->dmac_laddress = paddr;
38565084Sjohnlev #endif
38575084Sjohnlev 
3858509Smrj #if !defined(__amd64)
3859509Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3860509Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3861509Smrj #endif
3862509Smrj 	} else {
3863509Smrj 		/* go back to the current cookie */
3864509Smrj 		cookie++;
3865509Smrj 	}
3866509Smrj 
3867509Smrj 	/*
3868509Smrj 	 * add the current cookie to the new window. set the new window size to
3869509Smrj 	 * the what was left over from the previous cookie and what's in the
3870509Smrj 	 * current cookie.
3871509Smrj 	 */
3872509Smrj 	(*windowp)->wd_cookie_cnt++;
3873509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3874509Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
3875509Smrj 
3876509Smrj 	/*
3877509Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
3878509Smrj 	 * wouldn't be here if it didn't.
3879509Smrj 	 */
3880509Smrj 	*copybuf_used += MMU_PAGESIZE;
3881509Smrj 
3882509Smrj 	return (DDI_SUCCESS);
3883509Smrj }
3884509Smrj 
3885509Smrj 
3886509Smrj /*
3887509Smrj  * rootnex_maxxfer_window_boundary()
3888509Smrj  *    Called in bind slowpath when we get to a window boundary because we will
3889509Smrj  *    go over maxxfer.
3890509Smrj  */
3891509Smrj static int
3892509Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3893509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
3894509Smrj {
3895509Smrj 	size_t dmac_size;
3896509Smrj 	off_t new_offset;
3897509Smrj 	size_t trim_sz;
3898509Smrj 	off_t coffset;
3899509Smrj 
3900509Smrj 
3901509Smrj 	/*
3902509Smrj 	 * calculate how much we have to trim off of the current cookie to equal
3903509Smrj 	 * maxxfer. We don't have to account for granularity here since our
3904509Smrj 	 * maxxfer already takes that into account.
3905509Smrj 	 */
3906509Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
3907509Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
3908509Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
3909509Smrj 
3910509Smrj 	/* save cookie size since we need it later and we might change it */
3911509Smrj 	dmac_size = cookie->dmac_size;
3912509Smrj 
3913509Smrj 	/*
3914509Smrj 	 * if we're not trimming the entire cookie, setup the current window to
3915509Smrj 	 * account for the trim.
3916509Smrj 	 */
3917509Smrj 	if (trim_sz < cookie->dmac_size) {
3918509Smrj 		(*windowp)->wd_cookie_cnt++;
3919509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3920509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
39215084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3922509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3923509Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
3924509Smrj 
3925509Smrj 		/*
3926509Smrj 		 * set the adjusted cookie size now in case this is the first
3927509Smrj 		 * window. All other windows are taken care of in get win
3928509Smrj 		 */
3929509Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3930509Smrj 	}
3931509Smrj 
3932509Smrj 	/*
3933509Smrj 	 * coffset is the current offset within the cookie, new_offset is the
3934509Smrj 	 * current offset with the entire buffer.
3935509Smrj 	 */
3936509Smrj 	coffset = dmac_size - trim_sz;
3937509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3938509Smrj 
3939509Smrj 	/* initialize the next window */
3940509Smrj 	(*windowp)++;
3941509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3942509Smrj 	(*windowp)->wd_cookie_cnt++;
3943509Smrj 	(*windowp)->wd_size = trim_sz;
3944509Smrj 	if (trim_sz < dmac_size) {
3945509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
39465084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
3947509Smrj 		    coffset;
3948509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3949509Smrj 	}
3950509Smrj 
3951509Smrj 	return (DDI_SUCCESS);
3952509Smrj }
3953509Smrj 
3954509Smrj 
3955509Smrj /*ARGSUSED*/
3956509Smrj static int
39577613SVikram.Hegde@Sun.COM rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3958509Smrj     off_t off, size_t len, uint_t cache_flags)
3959509Smrj {
3960509Smrj 	rootnex_sglinfo_t *sinfo;
3961509Smrj 	rootnex_pgmap_t *cbpage;
3962509Smrj 	rootnex_window_t *win;
3963509Smrj 	ddi_dma_impl_t *hp;
3964509Smrj 	rootnex_dma_t *dma;
3965509Smrj 	caddr_t fromaddr;
3966509Smrj 	caddr_t toaddr;
3967509Smrj 	uint_t psize;
3968509Smrj 	off_t offset;
3969509Smrj 	uint_t pidx;
3970509Smrj 	size_t size;
3971509Smrj 	off_t poff;
3972509Smrj 	int e;
3973509Smrj 
3974509Smrj 
3975509Smrj 	hp = (ddi_dma_impl_t *)handle;
3976509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
3977509Smrj 	sinfo = &dma->dp_sglinfo;
3978509Smrj 
3979509Smrj 	/*
3980509Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
3981509Smrj 	 * will cause us to have at least one window.
3982509Smrj 	 */
3983509Smrj 	if (dma->dp_window == NULL) {
3984509Smrj 		return (DDI_SUCCESS);
3985509Smrj 	}
3986509Smrj 
3987509Smrj 	/* This window may not need to be sync'd */
3988509Smrj 	win = &dma->dp_window[dma->dp_current_win];
3989509Smrj 	if (!win->wd_dosync) {
3990509Smrj 		return (DDI_SUCCESS);
3991509Smrj 	}
3992509Smrj 
3993509Smrj 	/* handle off and len special cases */
3994509Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
3995509Smrj 		offset = win->wd_offset;
3996509Smrj 	} else {
3997509Smrj 		offset = off;
3998509Smrj 	}
3999509Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
4000509Smrj 		size = win->wd_size;
4001509Smrj 	} else {
4002509Smrj 		size = len;
4003509Smrj 	}
4004509Smrj 
4005509Smrj 	/* check the sync args to make sure they make a little sense */
4006509Smrj 	if (rootnex_sync_check_parms) {
4007509Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
4008509Smrj 		    cache_flags);
4009509Smrj 		if (e != DDI_SUCCESS) {
4010509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4011509Smrj 			return (DDI_FAILURE);
4012509Smrj 		}
4013509Smrj 	}
4014509Smrj 
4015509Smrj 	/*
4016509Smrj 	 * special case the first page to handle the offset into the page. The
4017509Smrj 	 * offset to the current page for our buffer is the offset into the
4018509Smrj 	 * first page of the buffer plus our current offset into the buffer
4019509Smrj 	 * itself, masked of course.
4020509Smrj 	 */
4021509Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4022509Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
4023509Smrj 
4024509Smrj 	/* go through all the pages that we want to sync */
4025509Smrj 	while (size > 0) {
4026509Smrj 		/*
4027509Smrj 		 * Calculate the page index relative to the start of the buffer.
4028509Smrj 		 * The index to the current page for our buffer is the offset
4029509Smrj 		 * into the first page of the buffer plus our current offset
4030509Smrj 		 * into the buffer itself, shifted of course...
4031509Smrj 		 */
4032509Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4033509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4034509Smrj 
4035509Smrj 		/*
4036509Smrj 		 * if this page uses the copy buffer, we need to sync it,
4037509Smrj 		 * otherwise, go on to the next page.
4038509Smrj 		 */
4039509Smrj 		cbpage = &dma->dp_pgmap[pidx];
4040509Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4041509Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
4042509Smrj 		if (cbpage->pm_uses_copybuf) {
4043509Smrj 			/* cbaddr and kaddr should be page aligned */
4044509Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4045509Smrj 			    MMU_PAGEOFFSET) == 0);
4046509Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
4047509Smrj 			    MMU_PAGEOFFSET) == 0);
4048509Smrj 
4049509Smrj 			/*
4050509Smrj 			 * if we're copying for the device, we are going to
4051509Smrj 			 * copy from the drivers buffer and to the rootnex
4052509Smrj 			 * allocated copy buffer.
4053509Smrj 			 */
4054509Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4055509Smrj 				fromaddr = cbpage->pm_kaddr + poff;
4056509Smrj 				toaddr = cbpage->pm_cbaddr + poff;
4057509Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
4058509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4059509Smrj 
4060509Smrj 			/*
4061509Smrj 			 * if we're copying for the cpu/kernel, we are going to
4062509Smrj 			 * copy from the rootnex allocated copy buffer to the
4063509Smrj 			 * drivers buffer.
4064509Smrj 			 */
4065509Smrj 			} else {
4066509Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
4067509Smrj 				toaddr = cbpage->pm_kaddr + poff;
4068509Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
4069509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4070509Smrj 			}
4071509Smrj 
4072509Smrj 			bcopy(fromaddr, toaddr, psize);
4073509Smrj 		}
4074509Smrj 
4075509Smrj 		/*
4076509Smrj 		 * decrement size until we're done, update our offset into the
4077509Smrj 		 * buffer, and get the next page size.
4078509Smrj 		 */
4079509Smrj 		size -= psize;
4080509Smrj 		offset += psize;
4081509Smrj 		psize = MIN(MMU_PAGESIZE, size);
4082509Smrj 
4083509Smrj 		/* page offset is zero for the rest of this loop */
4084509Smrj 		poff = 0;
4085509Smrj 	}
4086509Smrj 
4087509Smrj 	return (DDI_SUCCESS);
4088509Smrj }
4089509Smrj 
40907613SVikram.Hegde@Sun.COM /*
40917613SVikram.Hegde@Sun.COM  * rootnex_dma_sync()
40927613SVikram.Hegde@Sun.COM  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
40937613SVikram.Hegde@Sun.COM  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
40947613SVikram.Hegde@Sun.COM  *    is set, ddi_dma_sync() returns immediately passing back success.
40957613SVikram.Hegde@Sun.COM  */
40967613SVikram.Hegde@Sun.COM /*ARGSUSED*/
40977613SVikram.Hegde@Sun.COM static int
40987613SVikram.Hegde@Sun.COM rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
40997613SVikram.Hegde@Sun.COM     off_t off, size_t len, uint_t cache_flags)
41007613SVikram.Hegde@Sun.COM {
41017613SVikram.Hegde@Sun.COM #if !defined(__xpv)
41027613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
41037613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
41047613SVikram.Hegde@Sun.COM 		    cache_flags));
41057613SVikram.Hegde@Sun.COM 	}
41067613SVikram.Hegde@Sun.COM #endif
41077613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
41087613SVikram.Hegde@Sun.COM 	    cache_flags));
41097613SVikram.Hegde@Sun.COM }
4110509Smrj 
4111509Smrj /*
4112509Smrj  * rootnex_valid_sync_parms()
4113509Smrj  *    checks the parameters passed to sync to verify they are correct.
4114509Smrj  */
4115509Smrj static int
4116509Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4117509Smrj     off_t offset, size_t size, uint_t cache_flags)
4118509Smrj {
4119509Smrj 	off_t woffset;
4120509Smrj 
4121509Smrj 
4122509Smrj 	/*
4123509Smrj 	 * the first part of the test to make sure the offset passed in is
4124509Smrj 	 * within the window.
4125509Smrj 	 */
4126509Smrj 	if (offset < win->wd_offset) {
4127509Smrj 		return (DDI_FAILURE);
4128509Smrj 	}
4129509Smrj 
4130509Smrj 	/*
4131509Smrj 	 * second and last part of the test to make sure the offset and length
4132509Smrj 	 * passed in is within the window.
4133509Smrj 	 */
4134509Smrj 	woffset = offset - win->wd_offset;
4135509Smrj 	if ((woffset + size) > win->wd_size) {
4136509Smrj 		return (DDI_FAILURE);
4137509Smrj 	}
4138509Smrj 
4139509Smrj 	/*
4140509Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4141509Smrj 	 * be set too.
4142509Smrj 	 */
4143509Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4144509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4145509Smrj 		return (DDI_SUCCESS);
4146509Smrj 	}
4147509Smrj 
4148509Smrj 	/*
4149509Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4150509Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4151509Smrj 	 */
4152509Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4153509Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4154509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4155509Smrj 		return (DDI_SUCCESS);
4156509Smrj 	}
4157509Smrj 
4158509Smrj 	return (DDI_FAILURE);
4159509Smrj }
4160509Smrj 
4161509Smrj 
4162509Smrj /*ARGSUSED*/
4163509Smrj static int
41647613SVikram.Hegde@Sun.COM rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4165509Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4166509Smrj     uint_t *ccountp)
4167509Smrj {
4168509Smrj 	rootnex_window_t *window;
4169509Smrj 	rootnex_trim_t *trim;
4170509Smrj 	ddi_dma_impl_t *hp;
4171509Smrj 	rootnex_dma_t *dma;
4172509Smrj #if !defined(__amd64)
4173509Smrj 	rootnex_sglinfo_t *sinfo;
4174509Smrj 	rootnex_pgmap_t *pmap;
4175509Smrj 	uint_t pidx;
4176509Smrj 	uint_t pcnt;
4177509Smrj 	off_t poff;
4178509Smrj 	int i;
4179509Smrj #endif
4180509Smrj 
4181509Smrj 
4182509Smrj 	hp = (ddi_dma_impl_t *)handle;
4183509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4184509Smrj #if !defined(__amd64)
4185509Smrj 	sinfo = &dma->dp_sglinfo;
4186509Smrj #endif
4187509Smrj 
4188509Smrj 	/* If we try and get a window which doesn't exist, return failure */
4189509Smrj 	if (win >= hp->dmai_nwin) {
4190509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4191509Smrj 		return (DDI_FAILURE);
4192509Smrj 	}
4193509Smrj 
4194509Smrj 	/*
4195509Smrj 	 * if we don't have any windows, and they're asking for the first
4196509Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
4197509Smrj 	 * setup our return values, then increment the cookie since we return
4198509Smrj 	 * the first cookie on the stack.
4199509Smrj 	 */
4200509Smrj 	if (dma->dp_window == NULL) {
4201509Smrj 		if (win != 0) {
4202509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4203509Smrj 			return (DDI_FAILURE);
4204509Smrj 		}
4205509Smrj 		hp->dmai_cookie = dma->dp_cookies;
4206509Smrj 		*offp = 0;
4207509Smrj 		*lenp = dma->dp_dma.dmao_size;
4208509Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4209509Smrj 		*cookiep = hp->dmai_cookie[0];
4210509Smrj 		hp->dmai_cookie++;
4211509Smrj 		return (DDI_SUCCESS);
4212509Smrj 	}
4213509Smrj 
4214509Smrj 	/* sync the old window before moving on to the new one */
4215509Smrj 	window = &dma->dp_window[dma->dp_current_win];
4216509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
4217509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4218509Smrj 		    DDI_DMA_SYNC_FORCPU);
4219509Smrj 	}
4220509Smrj 
4221509Smrj #if !defined(__amd64)
4222509Smrj 	/*
4223509Smrj 	 * before we move to the next window, if we need to re-map, unmap all
4224509Smrj 	 * the pages in this window.
4225509Smrj 	 */
4226509Smrj 	if (dma->dp_cb_remaping) {
4227509Smrj 		/*
4228509Smrj 		 * If we switch to this window again, we'll need to map in
4229509Smrj 		 * on the fly next time.
4230509Smrj 		 */
4231509Smrj 		window->wd_remap_copybuf = B_TRUE;
4232509Smrj 
4233509Smrj 		/*
4234509Smrj 		 * calculate the page index into the buffer where this window
4235509Smrj 		 * starts, and the number of pages this window takes up.
4236509Smrj 		 */
4237509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4238509Smrj 		    MMU_PAGESHIFT;
4239509Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4240509Smrj 		    MMU_PAGEOFFSET;
4241509Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
4242509Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4243509Smrj 
4244509Smrj 		/* unmap pages which are currently mapped in this window */
4245509Smrj 		for (i = 0; i < pcnt; i++) {
4246509Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
4247509Smrj 				hat_unload(kas.a_hat,
4248509Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4249509Smrj 				    HAT_UNLOAD);
4250509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4251509Smrj 			}
4252509Smrj 			pidx++;
4253509Smrj 		}
4254509Smrj 	}
4255509Smrj #endif
4256509Smrj 
4257509Smrj 	/*
4258509Smrj 	 * Move to the new window.
4259509Smrj 	 * NOTE: current_win must be set for sync to work right
4260509Smrj 	 */
4261509Smrj 	dma->dp_current_win = win;
4262509Smrj 	window = &dma->dp_window[win];
4263509Smrj 
4264509Smrj 	/* if needed, adjust the first and/or last cookies for trim */
4265509Smrj 	trim = &window->wd_trim;
4266509Smrj 	if (trim->tr_trim_first) {
42675084Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4268509Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4269509Smrj #if !defined(__amd64)
4270509Smrj 		window->wd_first_cookie->dmac_type =
4271509Smrj 		    (window->wd_first_cookie->dmac_type &
4272509Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4273509Smrj #endif
4274509Smrj 		if (trim->tr_first_copybuf_win) {
4275509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4276509Smrj 			    trim->tr_first_cbaddr;
4277509Smrj #if !defined(__amd64)
4278509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4279509Smrj 			    trim->tr_first_kaddr;
4280509Smrj #endif
4281509Smrj 		}
4282509Smrj 	}
4283509Smrj 	if (trim->tr_trim_last) {
42845084Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4285509Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4286509Smrj 		if (trim->tr_last_copybuf_win) {
4287509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4288509Smrj 			    trim->tr_last_cbaddr;
4289509Smrj #if !defined(__amd64)
4290509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4291509Smrj 			    trim->tr_last_kaddr;
4292509Smrj #endif
4293509Smrj 		}
4294509Smrj 	}
4295509Smrj 
4296509Smrj 	/*
4297509Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
4298509Smrj 	 * our return values, then increment the cookie since we return the
4299509Smrj 	 * first cookie on the stack.
4300509Smrj 	 */
4301509Smrj 	hp->dmai_cookie = window->wd_first_cookie;
4302509Smrj 	*offp = window->wd_offset;
4303509Smrj 	*lenp = window->wd_size;
4304509Smrj 	*ccountp = window->wd_cookie_cnt;
4305509Smrj 	*cookiep = hp->dmai_cookie[0];
4306509Smrj 	hp->dmai_cookie++;
4307509Smrj 
4308509Smrj #if !defined(__amd64)
4309509Smrj 	/* re-map copybuf if required for this window */
4310509Smrj 	if (dma->dp_cb_remaping) {
4311509Smrj 		/*
4312509Smrj 		 * calculate the page index into the buffer where this
4313509Smrj 		 * window starts.
4314509Smrj 		 */
4315509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4316509Smrj 		    MMU_PAGESHIFT;
4317509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4318509Smrj 
4319509Smrj 		/*
4320509Smrj 		 * the first page can get unmapped if it's shared with the
4321509Smrj 		 * previous window. Even if the rest of this window is already
4322509Smrj 		 * mapped in, we need to still check this one.
4323509Smrj 		 */
4324509Smrj 		pmap = &dma->dp_pgmap[pidx];
4325509Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4326509Smrj 			if (pmap->pm_pp != NULL) {
4327509Smrj 				pmap->pm_mapped = B_TRUE;
4328509Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4329509Smrj 			} else if (pmap->pm_vaddr != NULL) {
4330509Smrj 				pmap->pm_mapped = B_TRUE;
4331509Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4332509Smrj 				    pmap->pm_kaddr);
4333509Smrj 			}
4334509Smrj 		}
4335509Smrj 		pidx++;
4336509Smrj 
4337509Smrj 		/* map in the rest of the pages if required */
4338509Smrj 		if (window->wd_remap_copybuf) {
4339509Smrj 			window->wd_remap_copybuf = B_FALSE;
4340509Smrj 
4341509Smrj 			/* figure out many pages this window takes up */
4342509Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4343509Smrj 			    MMU_PAGEOFFSET;
4344509Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
4345509Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4346509Smrj 
4347509Smrj 			/* map pages which require it */
4348509Smrj 			for (i = 1; i < pcnt; i++) {
4349509Smrj 				pmap = &dma->dp_pgmap[pidx];
4350509Smrj 				if (pmap->pm_uses_copybuf) {
4351509Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
4352509Smrj 					if (pmap->pm_pp != NULL) {
4353509Smrj 						pmap->pm_mapped = B_TRUE;
4354509Smrj 						i86_pp_map(pmap->pm_pp,
4355509Smrj 						    pmap->pm_kaddr);
4356509Smrj 					} else if (pmap->pm_vaddr != NULL) {
4357509Smrj 						pmap->pm_mapped = B_TRUE;
4358509Smrj 						i86_va_map(pmap->pm_vaddr,
4359509Smrj 						    sinfo->si_asp,
4360509Smrj 						    pmap->pm_kaddr);
4361509Smrj 					}
4362509Smrj 				}
4363509Smrj 				pidx++;
4364509Smrj 			}
4365509Smrj 		}
4366509Smrj 	}
4367509Smrj #endif
4368509Smrj 
4369509Smrj 	/* if the new window uses the copy buffer, sync it for the device */
4370509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4371509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4372509Smrj 		    DDI_DMA_SYNC_FORDEV);
4373509Smrj 	}
4374509Smrj 
4375509Smrj 	return (DDI_SUCCESS);
4376509Smrj }
4377509Smrj 
43787613SVikram.Hegde@Sun.COM /*
43797613SVikram.Hegde@Sun.COM  * rootnex_dma_win()
43807613SVikram.Hegde@Sun.COM  *    called from ddi_dma_getwin()
43817613SVikram.Hegde@Sun.COM  */
43827613SVikram.Hegde@Sun.COM /*ARGSUSED*/
43837613SVikram.Hegde@Sun.COM static int
43847613SVikram.Hegde@Sun.COM rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
43857613SVikram.Hegde@Sun.COM     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
43867613SVikram.Hegde@Sun.COM     uint_t *ccountp)
43877613SVikram.Hegde@Sun.COM {
43887613SVikram.Hegde@Sun.COM #if !defined(__xpv)
43897613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
43907613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
43917613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
43927613SVikram.Hegde@Sun.COM 	}
43937613SVikram.Hegde@Sun.COM #endif
43947613SVikram.Hegde@Sun.COM 
43957613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
43967613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
43977613SVikram.Hegde@Sun.COM }
4398509Smrj 
4399509Smrj /*
4400509Smrj  * ************************
4401509Smrj  *  obsoleted dma routines
4402509Smrj  * ************************
4403509Smrj  */
4404509Smrj 
4405509Smrj /* ARGSUSED */
4406509Smrj static int
44077613SVikram.Hegde@Sun.COM rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
44087613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4409509Smrj {
4410509Smrj #if defined(__amd64)
4411509Smrj 	/*
4412509Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4413509Smrj 	 * rootnex_dma_mctl()
4414509Smrj 	 */
4415509Smrj 	return (DDI_DMA_NORESOURCES);
4416509Smrj 
4417509Smrj #else /* 32-bit x86 kernel */
4418509Smrj 	ddi_dma_handle_t *lhandlep;
4419509Smrj 	ddi_dma_handle_t lhandle;
4420509Smrj 	ddi_dma_cookie_t cookie;
4421509Smrj 	ddi_dma_attr_t dma_attr;
4422509Smrj 	ddi_dma_lim_t *dma_lim;
4423509Smrj 	uint_t ccnt;
4424509Smrj 	int e;
4425509Smrj 
4426509Smrj 
4427509Smrj 	/*
4428509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4429509Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4430509Smrj 	 */
4431509Smrj 	if (handlep == NULL) {
4432509Smrj 		lhandlep = &lhandle;
4433509Smrj 	} else {
4434509Smrj 		lhandlep = handlep;
4435509Smrj 	}
4436509Smrj 
4437509Smrj 	/* convert the limit structure to a dma_attr one */
4438509Smrj 	dma_lim = dmareq->dmar_limits;
4439509Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4440509Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4441509Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4442509Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4443509Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4444509Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4445509Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4446509Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4447509Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4448509Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4449509Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4450509Smrj 	dma_attr.dma_attr_flags = 0;
4451509Smrj 
4452509Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4453509Smrj 	    dmareq->dmar_arg, lhandlep);
4454509Smrj 	if (e != DDI_SUCCESS) {
4455509Smrj 		return (e);
4456509Smrj 	}
4457509Smrj 
4458509Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4459509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4460509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4461509Smrj 		return (e);
4462509Smrj 	}
4463509Smrj 
4464509Smrj 	/*
4465509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4466509Smrj 	 * free up the local state and return the result.
4467509Smrj 	 */
4468509Smrj 	if (handlep == NULL) {
4469509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4470509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4471509Smrj 		if (e == DDI_DMA_MAPPED) {
4472509Smrj 			return (DDI_DMA_MAPOK);
44730Sstevel@tonic-gate 		} else {
4474509Smrj 			return (DDI_DMA_NOMAPPING);
4475509Smrj 		}
4476509Smrj 	}
4477509Smrj 
4478509Smrj 	return (e);
4479509Smrj #endif /* defined(__amd64) */
4480509Smrj }
4481509Smrj 
44827613SVikram.Hegde@Sun.COM /*
44837613SVikram.Hegde@Sun.COM  * rootnex_dma_map()
44847613SVikram.Hegde@Sun.COM  *    called from ddi_dma_setup()
44857613SVikram.Hegde@Sun.COM  */
44867613SVikram.Hegde@Sun.COM /* ARGSUSED */
44877613SVikram.Hegde@Sun.COM static int
44887613SVikram.Hegde@Sun.COM rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
44897613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
44907613SVikram.Hegde@Sun.COM {
44917613SVikram.Hegde@Sun.COM #if !defined(__xpv)
44927613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
44937613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_map(dip, rdip, dmareq, handlep));
44947613SVikram.Hegde@Sun.COM 	}
44957613SVikram.Hegde@Sun.COM #endif
44967613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_map(dip, rdip, dmareq, handlep));
44977613SVikram.Hegde@Sun.COM }
4498509Smrj 
4499509Smrj /*
4500509Smrj  * rootnex_dma_mctl()
4501509Smrj  *
4502509Smrj  */
4503509Smrj /* ARGSUSED */
4504509Smrj static int
45057613SVikram.Hegde@Sun.COM rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4506509Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4507509Smrj     uint_t cache_flags)
4508509Smrj {
4509509Smrj #if defined(__amd64)
4510509Smrj 	/*
4511509Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4512509Smrj 	 * common implementation in genunix, so they no longer have x86
4513509Smrj 	 * specific functionality which called into dma_ctl.
4514509Smrj 	 *
4515509Smrj 	 * The rest of the obsoleted interfaces were never supported in the
4516509Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4517509Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4518509Smrj 	 * implementation issues.
4519509Smrj 	 *
4520509Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4521509Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4522509Smrj 	 * reflect that now too...
4523509Smrj 	 *
4524509Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4525509Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
4526509Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4527509Smrj 	 * that in a future release.
4528509Smrj 	 */
4529509Smrj 	return (DDI_FAILURE);
4530509Smrj 
4531509Smrj #else /* 32-bit x86 kernel */
4532509Smrj 	ddi_dma_cookie_t lcookie;
4533509Smrj 	ddi_dma_cookie_t *cookie;
4534509Smrj 	rootnex_window_t *window;
4535509Smrj 	ddi_dma_impl_t *hp;
4536509Smrj 	rootnex_dma_t *dma;
4537509Smrj 	uint_t nwin;
4538509Smrj 	uint_t ccnt;
4539509Smrj 	size_t len;
4540509Smrj 	off_t off;
4541509Smrj 	int e;
4542509Smrj 
4543509Smrj 
4544509Smrj 	/*
4545509Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4546509Smrj 	 * hacky since were optimizing for the current interfaces and so we can
4547509Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
4548509Smrj 	 * obsoleted routines someday soon.
4549509Smrj 	 */
4550509Smrj 
4551509Smrj 	switch (request) {
4552509Smrj 
4553509Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4554509Smrj 		hp = (ddi_dma_impl_t *)handle;
4555509Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
4556509Smrj 
4557509Smrj 		/*
4558509Smrj 		 * convert segment to cookie. We don't distinguish between the
4559509Smrj 		 * two :-)
4560509Smrj 		 */
4561509Smrj 		*cookie = *hp->dmai_cookie;
4562509Smrj 		*lenp = cookie->dmac_size;
4563509Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4564509Smrj 		return (DDI_SUCCESS);
4565509Smrj 
4566509Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4567509Smrj 		hp = (ddi_dma_impl_t *)handle;
4568509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4569509Smrj 
4570509Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4571509Smrj 			return (DDI_DMA_STALE);
45720Sstevel@tonic-gate 		}
4573509Smrj 
4574509Smrj 		/* handle the case where we don't have any windows */
4575509Smrj 		if (dma->dp_window == NULL) {
4576509Smrj 			/*
4577509Smrj 			 * if seg == NULL, and we don't have any windows,
4578509Smrj 			 * return the first cookie in the sgl.
4579509Smrj 			 */
4580509Smrj 			if (*lenp == NULL) {
4581509Smrj 				dma->dp_current_cookie = 0;
4582509Smrj 				hp->dmai_cookie = dma->dp_cookies;
4583509Smrj 				*objpp = (caddr_t)handle;
4584509Smrj 				return (DDI_SUCCESS);
4585509Smrj 
4586509Smrj 			/* if we have more cookies, go to the next cookie */
4587509Smrj 			} else {
4588509Smrj 				if ((dma->dp_current_cookie + 1) >=
4589509Smrj 				    dma->dp_sglinfo.si_sgl_size) {
4590509Smrj 					return (DDI_DMA_DONE);
4591509Smrj 				}
4592509Smrj 				dma->dp_current_cookie++;
4593509Smrj 				hp->dmai_cookie++;
4594509Smrj 				return (DDI_SUCCESS);
4595509Smrj 			}
4596509Smrj 		}
4597509Smrj 
4598509Smrj 		/* We have one or more windows */
4599509Smrj 		window = &dma->dp_window[dma->dp_current_win];
4600509Smrj 
4601509Smrj 		/*
4602509Smrj 		 * if seg == NULL, return the first cookie in the current
4603509Smrj 		 * window
4604509Smrj 		 */
4605509Smrj 		if (*lenp == NULL) {
4606509Smrj 			dma->dp_current_cookie = 0;
4607683Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4608509Smrj 
4609509Smrj 		/*
4610509Smrj 		 * go to the next cookie in the window then see if we done with
4611509Smrj 		 * this window.
4612509Smrj 		 */
4613509Smrj 		} else {
4614509Smrj 			if ((dma->dp_current_cookie + 1) >=
4615509Smrj 			    window->wd_cookie_cnt) {
4616509Smrj 				return (DDI_DMA_DONE);
4617509Smrj 			}
4618509Smrj 			dma->dp_current_cookie++;
4619509Smrj 			hp->dmai_cookie++;
4620509Smrj 		}
4621509Smrj 		*objpp = (caddr_t)handle;
4622509Smrj 		return (DDI_SUCCESS);
4623509Smrj 
4624509Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4625509Smrj 		hp = (ddi_dma_impl_t *)handle;
4626509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4627509Smrj 
4628509Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4629509Smrj 			return (DDI_DMA_STALE);
4630509Smrj 		}
4631509Smrj 
4632509Smrj 		/* if win == NULL, return the first window in the bind */
4633509Smrj 		if (*offp == NULL) {
4634509Smrj 			nwin = 0;
4635509Smrj 
4636509Smrj 		/*
4637509Smrj 		 * else, go to the next window then see if we're done with all
4638509Smrj 		 * the windows.
4639509Smrj 		 */
4640509Smrj 		} else {
4641509Smrj 			nwin = dma->dp_current_win + 1;
4642509Smrj 			if (nwin >= hp->dmai_nwin) {
4643509Smrj 				return (DDI_DMA_DONE);
4644509Smrj 			}
4645509Smrj 		}
4646509Smrj 
4647509Smrj 		/* switch to the next window */
4648509Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4649509Smrj 		    &lcookie, &ccnt);
4650509Smrj 		ASSERT(e == DDI_SUCCESS);
4651509Smrj 		if (e != DDI_SUCCESS) {
4652509Smrj 			return (DDI_DMA_STALE);
4653509Smrj 		}
4654509Smrj 
4655509Smrj 		/* reset the cookie back to the first cookie in the window */
4656509Smrj 		if (dma->dp_window != NULL) {
4657509Smrj 			window = &dma->dp_window[dma->dp_current_win];
4658509Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4659509Smrj 		} else {
4660509Smrj 			hp->dmai_cookie = dma->dp_cookies;
4661509Smrj 		}
4662509Smrj 
4663509Smrj 		*objpp = (caddr_t)handle;
4664509Smrj 		return (DDI_SUCCESS);
4665509Smrj 
4666509Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
4667509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4668509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4669509Smrj 		if (rootnex_state->r_dvma_call_list_id) {
4670509Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4671509Smrj 		}
4672509Smrj 		return (DDI_SUCCESS);
4673509Smrj 
4674509Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4675509Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4676509Smrj 		/* should never get here, handled in genunix */
4677509Smrj 		ASSERT(0);
4678509Smrj 		return (DDI_FAILURE);
4679509Smrj 
4680509Smrj 	case DDI_DMA_KVADDR:
4681509Smrj 	case DDI_DMA_GETERR:
4682509Smrj 	case DDI_DMA_COFF:
4683509Smrj 		return (DDI_FAILURE);
46840Sstevel@tonic-gate 	}
4685509Smrj 
4686509Smrj 	return (DDI_FAILURE);
4687509Smrj #endif /* defined(__amd64) */
46880Sstevel@tonic-gate }
46891414Scindi 
46907613SVikram.Hegde@Sun.COM /*
46917613SVikram.Hegde@Sun.COM  * rootnex_dma_mctl()
46927613SVikram.Hegde@Sun.COM  *
46937613SVikram.Hegde@Sun.COM  */
46947613SVikram.Hegde@Sun.COM /* ARGSUSED */
46957613SVikram.Hegde@Sun.COM static int
46967613SVikram.Hegde@Sun.COM rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
46977613SVikram.Hegde@Sun.COM     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
46987613SVikram.Hegde@Sun.COM     uint_t cache_flags)
46997613SVikram.Hegde@Sun.COM {
47007613SVikram.Hegde@Sun.COM #if !defined(__xpv)
47017613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
47027613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_mctl(dip, rdip, handle, request, offp,
47037613SVikram.Hegde@Sun.COM 		    lenp, objpp, cache_flags));
47047613SVikram.Hegde@Sun.COM 	}
47057613SVikram.Hegde@Sun.COM #endif
47067613SVikram.Hegde@Sun.COM 
47077613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_mctl(dip, rdip, handle, request, offp,
47087613SVikram.Hegde@Sun.COM 	    lenp, objpp, cache_flags));
47097613SVikram.Hegde@Sun.COM }
47101865Sdilpreet 
47111865Sdilpreet /*
47121865Sdilpreet  * *********
47131865Sdilpreet  *  FMA Code
47141865Sdilpreet  * *********
47151865Sdilpreet  */
47161865Sdilpreet 
47171865Sdilpreet /*
47181865Sdilpreet  * rootnex_fm_init()
47191865Sdilpreet  *    FMA init busop
47201865Sdilpreet  */
47211865Sdilpreet /* ARGSUSED */
47221865Sdilpreet static int
47231865Sdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
47241865Sdilpreet     ddi_iblock_cookie_t *ibc)
47251865Sdilpreet {
47261865Sdilpreet 	*ibc = rootnex_state->r_err_ibc;
47271865Sdilpreet 
47281865Sdilpreet 	return (ddi_system_fmcap);
47291865Sdilpreet }
47301865Sdilpreet 
47311865Sdilpreet /*
47321865Sdilpreet  * rootnex_dma_check()
47331865Sdilpreet  *    Function called after a dma fault occurred to find out whether the
47341865Sdilpreet  *    fault address is associated with a driver that is able to handle faults
47351865Sdilpreet  *    and recover from faults.
47361865Sdilpreet  */
47371865Sdilpreet /* ARGSUSED */
47381414Scindi static int
47391865Sdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
47401865Sdilpreet     const void *not_used)
47411414Scindi {
47421865Sdilpreet 	rootnex_window_t *window;
47431865Sdilpreet 	uint64_t start_addr;
47441865Sdilpreet 	uint64_t fault_addr;
47451865Sdilpreet 	ddi_dma_impl_t *hp;
47461865Sdilpreet 	rootnex_dma_t *dma;
47471865Sdilpreet 	uint64_t end_addr;
47481865Sdilpreet 	size_t csize;
47491865Sdilpreet 	int i;
47501865Sdilpreet 	int j;
47511865Sdilpreet 
47521865Sdilpreet 
47531865Sdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
47541865Sdilpreet 	hp = (ddi_dma_impl_t *)handle;
47551865Sdilpreet 	ASSERT(hp);
47561865Sdilpreet 
47571865Sdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
47581865Sdilpreet 
47591865Sdilpreet 	/* Get the address that we need to search for */
47601865Sdilpreet 	fault_addr = *(uint64_t *)addr;
47611865Sdilpreet 
47621865Sdilpreet 	/*
47631865Sdilpreet 	 * if we don't have any windows, we can just walk through all the
47641865Sdilpreet 	 * cookies.
47651865Sdilpreet 	 */
47661865Sdilpreet 	if (dma->dp_window == NULL) {
47671865Sdilpreet 		/* for each cookie */
47681865Sdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
47691865Sdilpreet 			/*
47701865Sdilpreet 			 * if the faulted address is within the physical address
47711865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
47721865Sdilpreet 			 */
47731865Sdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
47741865Sdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
47751865Sdilpreet 			    dma->dp_cookies[i].dmac_size))) {
47761865Sdilpreet 				return (DDI_FM_NONFATAL);
47771865Sdilpreet 			}
47781865Sdilpreet 		}
47791865Sdilpreet 
47801865Sdilpreet 		/* fault_addr not within this DMA handle */
47811865Sdilpreet 		return (DDI_FM_UNKNOWN);
47821865Sdilpreet 	}
47831865Sdilpreet 
47841865Sdilpreet 	/* we have mutiple windows, walk through each window */
47851865Sdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
47861865Sdilpreet 		window = &dma->dp_window[i];
47871865Sdilpreet 
47881865Sdilpreet 		/* Go through all the cookies in the window */
47891865Sdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
47901865Sdilpreet 
47911865Sdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
47921865Sdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
47931865Sdilpreet 
47941865Sdilpreet 			/*
47951865Sdilpreet 			 * if we are trimming the first cookie in the window,
47961865Sdilpreet 			 * and this is the first cookie, adjust the start
47971865Sdilpreet 			 * address and size of the cookie to account for the
47981865Sdilpreet 			 * trim.
47991865Sdilpreet 			 */
48001865Sdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
48011865Sdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
48021865Sdilpreet 				csize = window->wd_trim.tr_first_size;
48031865Sdilpreet 			}
48041865Sdilpreet 
48051865Sdilpreet 			/*
48061865Sdilpreet 			 * if we are trimming the last cookie in the window,
48071865Sdilpreet 			 * and this is the last cookie, adjust the start
48081865Sdilpreet 			 * address and size of the cookie to account for the
48091865Sdilpreet 			 * trim.
48101865Sdilpreet 			 */
48111865Sdilpreet 			if (window->wd_trim.tr_trim_last &&
48121865Sdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
48131865Sdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
48141865Sdilpreet 				csize = window->wd_trim.tr_last_size;
48151865Sdilpreet 			}
48161865Sdilpreet 
48171865Sdilpreet 			end_addr = start_addr + csize;
48181865Sdilpreet 
48191865Sdilpreet 			/*
48201865Sdilpreet 			 * if the faulted address is within the physical address
48211865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
48221865Sdilpreet 			 */
48231865Sdilpreet 			if ((fault_addr >= start_addr) &&
48241865Sdilpreet 			    (fault_addr <= end_addr)) {
48251865Sdilpreet 				return (DDI_FM_NONFATAL);
48261865Sdilpreet 			}
48271865Sdilpreet 		}
48281865Sdilpreet 	}
48291865Sdilpreet 
48301865Sdilpreet 	/* fault_addr not within this DMA handle */
48311865Sdilpreet 	return (DDI_FM_UNKNOWN);
48321414Scindi }
4833