xref: /onnv-gate/usr/src/uts/i86pc/io/rootnex.c (revision 7613:e49de7ec7617)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51865Sdilpreet  * Common Development and Distribution License (the "License").
61865Sdilpreet  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
227173Smrj  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
27509Smrj  * x86 root nexus driver
280Sstevel@tonic-gate  */
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/conf.h>
320Sstevel@tonic-gate #include <sys/autoconf.h>
330Sstevel@tonic-gate #include <sys/sysmacros.h>
340Sstevel@tonic-gate #include <sys/debug.h>
350Sstevel@tonic-gate #include <sys/psw.h>
360Sstevel@tonic-gate #include <sys/ddidmareq.h>
370Sstevel@tonic-gate #include <sys/promif.h>
380Sstevel@tonic-gate #include <sys/devops.h>
390Sstevel@tonic-gate #include <sys/kmem.h>
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate #include <vm/seg.h>
420Sstevel@tonic-gate #include <vm/seg_kmem.h>
430Sstevel@tonic-gate #include <vm/seg_dev.h>
440Sstevel@tonic-gate #include <sys/vmem.h>
450Sstevel@tonic-gate #include <sys/mman.h>
460Sstevel@tonic-gate #include <vm/hat.h>
470Sstevel@tonic-gate #include <vm/as.h>
480Sstevel@tonic-gate #include <vm/page.h>
490Sstevel@tonic-gate #include <sys/avintr.h>
500Sstevel@tonic-gate #include <sys/errno.h>
510Sstevel@tonic-gate #include <sys/modctl.h>
520Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
530Sstevel@tonic-gate #include <sys/sunddi.h>
540Sstevel@tonic-gate #include <sys/sunndi.h>
55916Sschwartz #include <sys/mach_intr.h>
560Sstevel@tonic-gate #include <sys/psm.h>
570Sstevel@tonic-gate #include <sys/ontrap.h>
58509Smrj #include <sys/atomic.h>
59509Smrj #include <sys/sdt.h>
60509Smrj #include <sys/rootnex.h>
61509Smrj #include <vm/hat_i86.h>
621865Sdilpreet #include <sys/ddifm.h>
635251Smrj #include <sys/ddi_isa.h>
64509Smrj 
655084Sjohnlev #ifdef __xpv
665084Sjohnlev #include <sys/bootinfo.h>
675084Sjohnlev #include <sys/hypervisor.h>
685084Sjohnlev #include <sys/bootconf.h>
695084Sjohnlev #include <vm/kboot_mmu.h>
70*7613SVikram.Hegde@Sun.COM #else
717589SVikram.Hegde@Sun.COM #include <sys/intel_iommu.h>
72*7613SVikram.Hegde@Sun.COM #endif
73*7613SVikram.Hegde@Sun.COM 
747589SVikram.Hegde@Sun.COM 
75509Smrj /*
76509Smrj  * enable/disable extra checking of function parameters. Useful for debugging
77509Smrj  * drivers.
78509Smrj  */
79509Smrj #ifdef	DEBUG
80509Smrj int rootnex_alloc_check_parms = 1;
81509Smrj int rootnex_bind_check_parms = 1;
82509Smrj int rootnex_bind_check_inuse = 1;
83509Smrj int rootnex_unbind_verify_buffer = 0;
84509Smrj int rootnex_sync_check_parms = 1;
85509Smrj #else
86509Smrj int rootnex_alloc_check_parms = 0;
87509Smrj int rootnex_bind_check_parms = 0;
88509Smrj int rootnex_bind_check_inuse = 0;
89509Smrj int rootnex_unbind_verify_buffer = 0;
90509Smrj int rootnex_sync_check_parms = 0;
91509Smrj #endif
92509Smrj 
931414Scindi /* Master Abort and Target Abort panic flag */
941414Scindi int rootnex_fm_ma_ta_panic_flag = 0;
951414Scindi 
96509Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
970Sstevel@tonic-gate int rootnex_bind_fail = 1;
980Sstevel@tonic-gate int rootnex_bind_warn = 1;
990Sstevel@tonic-gate uint8_t *rootnex_warn_list;
1000Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1010Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
104509Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
105509Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
106509Smrj  * the offset and size into ddi_dma_sync().
1070Sstevel@tonic-gate  */
108509Smrj int rootnex_sync_ignore_params = 0;
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate /*
111509Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
112509Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
113509Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
114509Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
115509Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
116509Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
117509Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
118509Smrj  * attach.
1190Sstevel@tonic-gate  */
120509Smrj #if defined(__amd64)
121509Smrj int rootnex_prealloc_cookies = 65;
122509Smrj int rootnex_prealloc_windows = 4;
123509Smrj int rootnex_prealloc_copybuf = 2;
124509Smrj #else
125509Smrj int rootnex_prealloc_cookies = 33;
126509Smrj int rootnex_prealloc_windows = 4;
127509Smrj int rootnex_prealloc_copybuf = 2;
128509Smrj #endif
129509Smrj 
130509Smrj /* driver global state */
131509Smrj static rootnex_state_t *rootnex_state;
132509Smrj 
133509Smrj /* shortcut to rootnex counters */
134509Smrj static uint64_t *rootnex_cnt;
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate /*
137509Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1380Sstevel@tonic-gate  */
139509Smrj /* statically defined integer/boolean properties for the root node */
140509Smrj static rootnex_intprop_t rootnex_intprp[] = {
141509Smrj 	{ "PAGESIZE",			PAGESIZE },
142509Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
143509Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
144509Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
145509Smrj };
146509Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
147509Smrj 
1485084Sjohnlev #ifdef __xpv
1495084Sjohnlev typedef maddr_t rootnex_addr_t;
1505084Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
1515084Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
1525084Sjohnlev #else
1535084Sjohnlev typedef paddr_t rootnex_addr_t;
1545084Sjohnlev #endif
1555084Sjohnlev 
156*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
157*7613SVikram.Hegde@Sun.COM char _depends_on[] = "mach/pcplusmp misc/iommulib";
158*7613SVikram.Hegde@Sun.COM #endif
159509Smrj 
160509Smrj static struct cb_ops rootnex_cb_ops = {
161509Smrj 	nodev,		/* open */
162509Smrj 	nodev,		/* close */
163509Smrj 	nodev,		/* strategy */
164509Smrj 	nodev,		/* print */
165509Smrj 	nodev,		/* dump */
166509Smrj 	nodev,		/* read */
167509Smrj 	nodev,		/* write */
168509Smrj 	nodev,		/* ioctl */
169509Smrj 	nodev,		/* devmap */
170509Smrj 	nodev,		/* mmap */
171509Smrj 	nodev,		/* segmap */
172509Smrj 	nochpoll,	/* chpoll */
173509Smrj 	ddi_prop_op,	/* cb_prop_op */
174509Smrj 	NULL,		/* struct streamtab */
175509Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
176509Smrj 	CB_REV,		/* Rev */
177509Smrj 	nodev,		/* cb_aread */
178509Smrj 	nodev		/* cb_awrite */
179509Smrj };
180509Smrj 
181509Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1820Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
183509Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1840Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1850Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
186509Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1870Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
188509Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
189509Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
190509Smrj     ddi_dma_handle_t *handlep);
191509Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
192509Smrj     ddi_dma_handle_t handle);
193509Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
194509Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
195509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
196509Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
197509Smrj     ddi_dma_handle_t handle);
198509Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
199509Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
200509Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
201509Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
202509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
203509Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2040Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2050Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
206509Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
207509Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
2081865Sdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
2091865Sdilpreet     ddi_iblock_cookie_t *ibc);
210509Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
211509Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
212509Smrj 
213*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
214*7613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
215*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep);
216*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
217*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
218*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
219*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
220*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
221*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
222*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
223*7613SVikram.Hegde@Sun.COM static void rootnex_coredma_reset_cookies(dev_info_t *dip,
224*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
225*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
226*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
227*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
228*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
229*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
230*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
231*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
232*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
233*7613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
234*7613SVikram.Hegde@Sun.COM static int rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip,
235*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp,
236*7613SVikram.Hegde@Sun.COM     size_t *lenp, caddr_t *objpp, uint_t cache_flags);
2370Sstevel@tonic-gate 
2380Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2390Sstevel@tonic-gate 	BUSO_REV,
2400Sstevel@tonic-gate 	rootnex_map,
2410Sstevel@tonic-gate 	NULL,
2420Sstevel@tonic-gate 	NULL,
2430Sstevel@tonic-gate 	NULL,
2440Sstevel@tonic-gate 	rootnex_map_fault,
2450Sstevel@tonic-gate 	rootnex_dma_map,
2460Sstevel@tonic-gate 	rootnex_dma_allochdl,
2470Sstevel@tonic-gate 	rootnex_dma_freehdl,
2480Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2490Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
250509Smrj 	rootnex_dma_sync,
2510Sstevel@tonic-gate 	rootnex_dma_win,
2520Sstevel@tonic-gate 	rootnex_dma_mctl,
2530Sstevel@tonic-gate 	rootnex_ctlops,
2540Sstevel@tonic-gate 	ddi_bus_prop_op,
2550Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2560Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2570Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2580Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2590Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2600Sstevel@tonic-gate 	0,			/* bus_config */
2610Sstevel@tonic-gate 	0,			/* bus_unconfig */
2621865Sdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2630Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2640Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2650Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2660Sstevel@tonic-gate 	NULL,			/* bus_powr */
2670Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2680Sstevel@tonic-gate };
2690Sstevel@tonic-gate 
270509Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
271509Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2720Sstevel@tonic-gate 
2730Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2740Sstevel@tonic-gate 	DEVO_REV,
275509Smrj 	0,
276509Smrj 	ddi_no_info,
277509Smrj 	nulldev,
2780Sstevel@tonic-gate 	nulldev,
2790Sstevel@tonic-gate 	rootnex_attach,
280509Smrj 	rootnex_detach,
281509Smrj 	nulldev,
282509Smrj 	&rootnex_cb_ops,
2830Sstevel@tonic-gate 	&rootnex_bus_ops
2840Sstevel@tonic-gate };
2850Sstevel@tonic-gate 
286509Smrj static struct modldrv rootnex_modldrv = {
287509Smrj 	&mod_driverops,
2887542SRichard.Bean@Sun.COM 	"i86pc root nexus",
289509Smrj 	&rootnex_ops
290509Smrj };
291509Smrj 
292509Smrj static struct modlinkage rootnex_modlinkage = {
293509Smrj 	MODREV_1,
294509Smrj 	(void *)&rootnex_modldrv,
295509Smrj 	NULL
296509Smrj };
297509Smrj 
298*7613SVikram.Hegde@Sun.COM static iommulib_nexops_t iommulib_nexops = {
299*7613SVikram.Hegde@Sun.COM 	IOMMU_NEXOPS_VERSION,
300*7613SVikram.Hegde@Sun.COM 	"Rootnex IOMMU ops Vers 1.1",
301*7613SVikram.Hegde@Sun.COM 	NULL,
302*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_allochdl,
303*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_freehdl,
304*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_bindhdl,
305*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_unbindhdl,
306*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_reset_cookies,
307*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_get_cookies,
308*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_sync,
309*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_win,
310*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_map,
311*7613SVikram.Hegde@Sun.COM 	rootnex_coredma_mctl
312*7613SVikram.Hegde@Sun.COM };
313509Smrj 
314509Smrj /*
315509Smrj  *  extern hacks
316509Smrj  */
317509Smrj extern struct seg_ops segdev_ops;
318509Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
319509Smrj #ifdef	DDI_MAP_DEBUG
320509Smrj extern int ddi_map_debug_flag;
321509Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
322509Smrj #endif
323509Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
324509Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
325509Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
326509Smrj     psm_intr_op_t, int *);
327509Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
328509Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
3295251Smrj 
330509Smrj /*
331509Smrj  * Use device arena to use for device control register mappings.
332509Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
333509Smrj  * to avoid this address range to prevent undesired device activity.
334509Smrj  */
335509Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
336509Smrj extern void device_arena_free(void * vaddr, size_t size);
337509Smrj 
338509Smrj 
3390Sstevel@tonic-gate /*
340509Smrj  *  Internal functions
3410Sstevel@tonic-gate  */
342509Smrj static int rootnex_dma_init();
343509Smrj static void rootnex_add_props(dev_info_t *);
344509Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
345509Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
346509Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
347509Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
348509Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
349509Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
350509Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
351509Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
352509Smrj     ddi_dma_attr_t *attr);
353509Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
354509Smrj     rootnex_sglinfo_t *sglinfo);
355509Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
356509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
357509Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
358509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
359509Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
360509Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
361509Smrj     ddi_dma_attr_t *attr, int kmflag);
362509Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
363509Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
364509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
365509Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
366509Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
367509Smrj     size_t *copybuf_used, page_t **cur_pp);
368509Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
369509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
370509Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
371509Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
372509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
373509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
374509Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
375509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
376509Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
377509Smrj     off_t offset, size_t size, uint_t cache_flags);
378509Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
3791865Sdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
3801865Sdilpreet     const void *comp_addr, const void *not_used);
381509Smrj 
382509Smrj /*
383509Smrj  * _init()
384509Smrj  *
385509Smrj  */
3860Sstevel@tonic-gate int
3870Sstevel@tonic-gate _init(void)
3880Sstevel@tonic-gate {
389509Smrj 
390509Smrj 	rootnex_state = NULL;
391509Smrj 	return (mod_install(&rootnex_modlinkage));
3920Sstevel@tonic-gate }
3930Sstevel@tonic-gate 
394509Smrj 
395509Smrj /*
396509Smrj  * _info()
397509Smrj  *
398509Smrj  */
399509Smrj int
400509Smrj _info(struct modinfo *modinfop)
401509Smrj {
402509Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
403509Smrj }
404509Smrj 
405509Smrj 
406509Smrj /*
407509Smrj  * _fini()
408509Smrj  *
409509Smrj  */
4100Sstevel@tonic-gate int
4110Sstevel@tonic-gate _fini(void)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate 	return (EBUSY);
4140Sstevel@tonic-gate }
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate /*
418509Smrj  * rootnex_attach()
4190Sstevel@tonic-gate  *
4200Sstevel@tonic-gate  */
421509Smrj static int
422509Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
423509Smrj {
4241414Scindi 	int fmcap;
425509Smrj 	int e;
426509Smrj 
427509Smrj 	switch (cmd) {
428509Smrj 	case DDI_ATTACH:
429509Smrj 		break;
430509Smrj 	case DDI_RESUME:
431509Smrj 		return (DDI_SUCCESS);
432509Smrj 	default:
433509Smrj 		return (DDI_FAILURE);
434509Smrj 	}
435509Smrj 
436509Smrj 	/*
437509Smrj 	 * We should only have one instance of rootnex. Save it away since we
438509Smrj 	 * don't have an easy way to get it back later.
439509Smrj 	 */
440509Smrj 	ASSERT(rootnex_state == NULL);
441509Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
442509Smrj 
443509Smrj 	rootnex_state->r_dip = dip;
4441414Scindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
445509Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
446509Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
4477589SVikram.Hegde@Sun.COM 	rootnex_state->r_intel_iommu_enabled = B_FALSE;
448509Smrj 
4491414Scindi 	/*
4501414Scindi 	 * Set minimum fm capability level for i86pc platforms and then
4511414Scindi 	 * initialize error handling. Since we're the rootnex, we don't
4521414Scindi 	 * care what's returned in the fmcap field.
4531414Scindi 	 */
4541865Sdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
4551865Sdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4561414Scindi 	fmcap = ddi_system_fmcap;
4571414Scindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4581414Scindi 
459509Smrj 	/* initialize DMA related state */
460509Smrj 	e = rootnex_dma_init();
461509Smrj 	if (e != DDI_SUCCESS) {
462509Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
463509Smrj 		return (DDI_FAILURE);
464509Smrj 	}
465509Smrj 
466509Smrj 	/* Add static root node properties */
467509Smrj 	rootnex_add_props(dip);
468509Smrj 
469509Smrj 	/* since we can't call ddi_report_dev() */
470509Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
471509Smrj 
472509Smrj 	/* Initialize rootnex event handle */
473509Smrj 	i_ddi_rootnex_init_events(dip);
474509Smrj 
475*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4767589SVikram.Hegde@Sun.COM #if defined(__amd64)
4777589SVikram.Hegde@Sun.COM 	/* probe intel iommu */
4787589SVikram.Hegde@Sun.COM 	intel_iommu_probe_and_parse();
4797589SVikram.Hegde@Sun.COM 
4807589SVikram.Hegde@Sun.COM 	/* attach the iommu nodes */
4817589SVikram.Hegde@Sun.COM 	if (intel_iommu_support) {
4827589SVikram.Hegde@Sun.COM 		if (intel_iommu_attach_dmar_nodes() == DDI_SUCCESS) {
4837589SVikram.Hegde@Sun.COM 			rootnex_state->r_intel_iommu_enabled = B_TRUE;
4847589SVikram.Hegde@Sun.COM 		} else {
4857589SVikram.Hegde@Sun.COM 			intel_iommu_release_dmar_info();
4867589SVikram.Hegde@Sun.COM 		}
4877589SVikram.Hegde@Sun.COM 	}
4887589SVikram.Hegde@Sun.COM #endif
4897589SVikram.Hegde@Sun.COM 
490*7613SVikram.Hegde@Sun.COM 	e = iommulib_nexus_register(dip, &iommulib_nexops,
491*7613SVikram.Hegde@Sun.COM 	    &rootnex_state->r_iommulib_handle);
492*7613SVikram.Hegde@Sun.COM 
493*7613SVikram.Hegde@Sun.COM 	ASSERT(e == DDI_SUCCESS);
494*7613SVikram.Hegde@Sun.COM #endif
495*7613SVikram.Hegde@Sun.COM 
496509Smrj 	return (DDI_SUCCESS);
497509Smrj }
498509Smrj 
499509Smrj 
500509Smrj /*
501509Smrj  * rootnex_detach()
502509Smrj  *
503509Smrj  */
5040Sstevel@tonic-gate /*ARGSUSED*/
5050Sstevel@tonic-gate static int
506509Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
507509Smrj {
508509Smrj 	switch (cmd) {
509509Smrj 	case DDI_SUSPEND:
510509Smrj 		break;
511509Smrj 	default:
512509Smrj 		return (DDI_FAILURE);
513509Smrj 	}
514509Smrj 
515509Smrj 	return (DDI_SUCCESS);
516509Smrj }
517509Smrj 
518509Smrj 
519509Smrj /*
520509Smrj  * rootnex_dma_init()
521509Smrj  *
522509Smrj  */
523509Smrj /*ARGSUSED*/
524509Smrj static int
525509Smrj rootnex_dma_init()
5260Sstevel@tonic-gate {
527509Smrj 	size_t bufsize;
528509Smrj 
529509Smrj 
530509Smrj 	/*
531509Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
532509Smrj 	 * pre-alloc in dma_alloc_handle
533509Smrj 	 */
534509Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
535509Smrj 	rootnex_state->r_prealloc_size =
536509Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
537509Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
538509Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
539509Smrj 
540509Smrj 	/*
541509Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
542509Smrj 	 * allocate 16 extra bytes for struct pointer alignment
543509Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
544509Smrj 	 */
545509Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
546509Smrj 	    rootnex_state->r_prealloc_size + 0x10;
547509Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
548509Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
549509Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
550509Smrj 		return (DDI_FAILURE);
551509Smrj 	}
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	/*
5540Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
5550Sstevel@tonic-gate 	 * for.
5560Sstevel@tonic-gate 	 */
5570Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
5580Sstevel@tonic-gate 	    KM_SLEEP);
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	return (DDI_SUCCESS);
5610Sstevel@tonic-gate }
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate /*
565509Smrj  * rootnex_add_props()
566509Smrj  *
5670Sstevel@tonic-gate  */
5680Sstevel@tonic-gate static void
569509Smrj rootnex_add_props(dev_info_t *dip)
5700Sstevel@tonic-gate {
571509Smrj 	rootnex_intprop_t *rpp;
5720Sstevel@tonic-gate 	int i;
573509Smrj 
574509Smrj 	/* Add static integer/boolean properties to the root node */
575509Smrj 	rpp = rootnex_intprp;
576509Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
577509Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
578509Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
5790Sstevel@tonic-gate 	}
5800Sstevel@tonic-gate }
5810Sstevel@tonic-gate 
582509Smrj 
583509Smrj 
584509Smrj /*
585509Smrj  * *************************
586509Smrj  *  ctlops related routines
587509Smrj  * *************************
588509Smrj  */
589509Smrj 
5900Sstevel@tonic-gate /*
591509Smrj  * rootnex_ctlops()
592509Smrj  *
5930Sstevel@tonic-gate  */
594693Sgovinda /*ARGSUSED*/
595509Smrj static int
596509Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
597509Smrj     void *arg, void *result)
598509Smrj {
599509Smrj 	int n, *ptr;
600509Smrj 	struct ddi_parent_private_data *pdp;
601509Smrj 
602509Smrj 	switch (ctlop) {
603509Smrj 	case DDI_CTLOPS_DMAPMAPC:
604509Smrj 		/*
605509Smrj 		 * Return 'partial' to indicate that dma mapping
606509Smrj 		 * has to be done in the main MMU.
607509Smrj 		 */
608509Smrj 		return (DDI_DMA_PARTIAL);
609509Smrj 
610509Smrj 	case DDI_CTLOPS_BTOP:
611509Smrj 		/*
612509Smrj 		 * Convert byte count input to physical page units.
613509Smrj 		 * (byte counts that are not a page-size multiple
614509Smrj 		 * are rounded down)
615509Smrj 		 */
616509Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
617509Smrj 		return (DDI_SUCCESS);
618509Smrj 
619509Smrj 	case DDI_CTLOPS_PTOB:
620509Smrj 		/*
621509Smrj 		 * Convert size in physical pages to bytes
622509Smrj 		 */
623509Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
624509Smrj 		return (DDI_SUCCESS);
625509Smrj 
626509Smrj 	case DDI_CTLOPS_BTOPR:
627509Smrj 		/*
628509Smrj 		 * Convert byte count input to physical page units
629509Smrj 		 * (byte counts that are not a page-size multiple
630509Smrj 		 * are rounded up)
631509Smrj 		 */
632509Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
633509Smrj 		return (DDI_SUCCESS);
634509Smrj 
635509Smrj 	case DDI_CTLOPS_INITCHILD:
636509Smrj 		return (impl_ddi_sunbus_initchild(arg));
637509Smrj 
638509Smrj 	case DDI_CTLOPS_UNINITCHILD:
639509Smrj 		impl_ddi_sunbus_removechild(arg);
640509Smrj 		return (DDI_SUCCESS);
641509Smrj 
642509Smrj 	case DDI_CTLOPS_REPORTDEV:
643509Smrj 		return (rootnex_ctl_reportdev(rdip));
644509Smrj 
645509Smrj 	case DDI_CTLOPS_IOMIN:
646509Smrj 		/*
647509Smrj 		 * Nothing to do here but reflect back..
648509Smrj 		 */
649509Smrj 		return (DDI_SUCCESS);
650509Smrj 
651509Smrj 	case DDI_CTLOPS_REGSIZE:
652509Smrj 	case DDI_CTLOPS_NREGS:
653509Smrj 		break;
654509Smrj 
655509Smrj 	case DDI_CTLOPS_SIDDEV:
656509Smrj 		if (ndi_dev_is_prom_node(rdip))
657509Smrj 			return (DDI_SUCCESS);
658509Smrj 		if (ndi_dev_is_persistent_node(rdip))
659509Smrj 			return (DDI_SUCCESS);
660509Smrj 		return (DDI_FAILURE);
661509Smrj 
662509Smrj 	case DDI_CTLOPS_POWER:
663509Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
664509Smrj 
665693Sgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
666509Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
667509Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
668509Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
669693Sgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
670693Sgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
671509Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
672509Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
673509Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
674509Smrj 			    "1 or more reserved/obsolete operations.");
675509Smrj 		}
676509Smrj 		return (DDI_FAILURE);
677509Smrj 
678509Smrj 	default:
679509Smrj 		return (DDI_FAILURE);
680509Smrj 	}
681509Smrj 	/*
682509Smrj 	 * The rest are for "hardware" properties
683509Smrj 	 */
684509Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
685509Smrj 		return (DDI_FAILURE);
686509Smrj 
687509Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
688509Smrj 		ptr = (int *)result;
689509Smrj 		*ptr = pdp->par_nreg;
690509Smrj 	} else {
691509Smrj 		off_t *size = (off_t *)result;
692509Smrj 
693509Smrj 		ptr = (int *)arg;
694509Smrj 		n = *ptr;
695509Smrj 		if (n >= pdp->par_nreg) {
696509Smrj 			return (DDI_FAILURE);
697509Smrj 		}
698509Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
699509Smrj 	}
700509Smrj 	return (DDI_SUCCESS);
701509Smrj }
7020Sstevel@tonic-gate 
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate /*
705509Smrj  * rootnex_ctl_reportdev()
706509Smrj  *
7070Sstevel@tonic-gate  */
7080Sstevel@tonic-gate static int
709509Smrj rootnex_ctl_reportdev(dev_info_t *dev)
7100Sstevel@tonic-gate {
711509Smrj 	int i, n, len, f_len = 0;
712509Smrj 	char *buf;
713509Smrj 
714509Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
715509Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
716509Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
717509Smrj 	len = strlen(buf);
718509Smrj 
719509Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
720509Smrj 
721509Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
722509Smrj 
723509Smrj 		if (i == 0)
724509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
725509Smrj 			    ": ");
726509Smrj 		else
727509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
728509Smrj 			    " and ");
729509Smrj 		len = strlen(buf);
730509Smrj 
731509Smrj 		switch (rp->regspec_bustype) {
732509Smrj 
733509Smrj 		case BTEISA:
734509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
735509Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
7360Sstevel@tonic-gate 			break;
737509Smrj 
738509Smrj 		case BTISA:
739509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
740509Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
7410Sstevel@tonic-gate 			break;
742509Smrj 
743509Smrj 		default:
744509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
745509Smrj 			    "space %x offset %x",
746509Smrj 			    rp->regspec_bustype, rp->regspec_addr);
7470Sstevel@tonic-gate 			break;
7480Sstevel@tonic-gate 		}
749509Smrj 		len = strlen(buf);
7500Sstevel@tonic-gate 	}
751509Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
752509Smrj 		int pri;
753509Smrj 
754509Smrj 		if (i != 0) {
755509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
756509Smrj 			    ",");
757509Smrj 			len = strlen(buf);
758509Smrj 		}
759509Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
760509Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
761509Smrj 		    " sparc ipl %d", pri);
762509Smrj 		len = strlen(buf);
7630Sstevel@tonic-gate 	}
764509Smrj #ifdef DEBUG
765509Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
766509Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
767509Smrj 		    "printed length 1024, real length %d", f_len);
768509Smrj 	}
769509Smrj #endif /* DEBUG */
770509Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
771509Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
7720Sstevel@tonic-gate 	return (DDI_SUCCESS);
7730Sstevel@tonic-gate }
7740Sstevel@tonic-gate 
775509Smrj 
776509Smrj /*
777509Smrj  * ******************
778509Smrj  *  map related code
779509Smrj  * ******************
780509Smrj  */
781509Smrj 
782509Smrj /*
783509Smrj  * rootnex_map()
784509Smrj  *
785509Smrj  */
7860Sstevel@tonic-gate static int
787509Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
788509Smrj     off_t len, caddr_t *vaddrp)
7890Sstevel@tonic-gate {
7900Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
7910Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
7920Sstevel@tonic-gate 	int error;
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	mp = &mr;
7950Sstevel@tonic-gate 
7960Sstevel@tonic-gate 	switch (mp->map_op)  {
7970Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
7980Sstevel@tonic-gate 	case DDI_MO_UNMAP:
7990Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8000Sstevel@tonic-gate 		break;
8010Sstevel@tonic-gate 	default:
8020Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8030Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8040Sstevel@tonic-gate 		    mp->map_op);
8050Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8060Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8070Sstevel@tonic-gate 	}
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8100Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8110Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8120Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8130Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8140Sstevel@tonic-gate 	}
8150Sstevel@tonic-gate 
8160Sstevel@tonic-gate 	/*
8170Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8180Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8190Sstevel@tonic-gate 	 */
8200Sstevel@tonic-gate 
8210Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8240Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8250Sstevel@tonic-gate 		static char *out_of_range =
8260Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8270Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8280Sstevel@tonic-gate 
8290Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8300Sstevel@tonic-gate 		if (rp == NULL)  {
8310Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8320Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8330Sstevel@tonic-gate 			    ddi_get_name(rdip));
8340Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8350Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8360Sstevel@tonic-gate 		}
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 		/*
8390Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8400Sstevel@tonic-gate 		 */
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8430Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8440Sstevel@tonic-gate 	}
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate 	/*
8470Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8480Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8490Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8500Sstevel@tonic-gate 	 */
8510Sstevel@tonic-gate 
8520Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8530Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8540Sstevel@tonic-gate 
8550Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8565084Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
8575084Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8585084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
8595084Sjohnlev 	    len, mp->map_handlep);
8600Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 	/*
8630Sstevel@tonic-gate 	 * I/O or memory mapping:
8640Sstevel@tonic-gate 	 *
8650Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8660Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8670Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8680Sstevel@tonic-gate 	 */
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8710Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8720Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8730Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8740Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8750Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8760Sstevel@tonic-gate 	}
8770Sstevel@tonic-gate 
8780Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8790Sstevel@tonic-gate 		/*
8800Sstevel@tonic-gate 		 * compatibility i/o mapping
8810Sstevel@tonic-gate 		 */
8820Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8830Sstevel@tonic-gate 	} else {
8840Sstevel@tonic-gate 		/*
8850Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8860Sstevel@tonic-gate 		 */
8870Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8880Sstevel@tonic-gate 	}
8890Sstevel@tonic-gate 
8900Sstevel@tonic-gate 	if (len != 0)
8910Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8945084Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
8955084Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8965084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
8975084Sjohnlev 	    offset, len, mp->map_handlep);
8980Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	/*
9010Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9020Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9030Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9040Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9050Sstevel@tonic-gate 	 */
9060Sstevel@tonic-gate 
9070Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9080Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9090Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9100Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9110Sstevel@tonic-gate 
9120Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9130Sstevel@tonic-gate 		return (error);
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate 	switch (mp->map_op)  {
9160Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9170Sstevel@tonic-gate 
9180Sstevel@tonic-gate 		/*
9190Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9200Sstevel@tonic-gate 		 */
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 		/*
9270Sstevel@tonic-gate 		 * Release mapping...
9280Sstevel@tonic-gate 		 */
9290Sstevel@tonic-gate 
9300Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9310Sstevel@tonic-gate 
9320Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 	default:
9370Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9380Sstevel@tonic-gate 	}
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate /*
943509Smrj  * rootnex_map_fault()
9440Sstevel@tonic-gate  *
9450Sstevel@tonic-gate  *	fault in mappings for requestors
9460Sstevel@tonic-gate  */
9470Sstevel@tonic-gate /*ARGSUSED*/
9480Sstevel@tonic-gate static int
949509Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
950509Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
951509Smrj     uint_t lock)
9520Sstevel@tonic-gate {
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9550Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
9560Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
9570Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
9580Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
9590Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9600Sstevel@tonic-gate 
9610Sstevel@tonic-gate 	/*
9620Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
9630Sstevel@tonic-gate 	 *
9640Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9650Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9660Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9670Sstevel@tonic-gate 	 */
9680Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
9695084Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 		if (hat == NULL) {
9720Sstevel@tonic-gate 			/*
9730Sstevel@tonic-gate 			 * This is one plausible interpretation of
9740Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9750Sstevel@tonic-gate 			 * address space hat list which by convention is
9760Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9770Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9780Sstevel@tonic-gate 			 */
9790Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9800Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9810Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9820Sstevel@tonic-gate 		}
9830Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9840Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9850Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9860Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9870Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9880Sstevel@tonic-gate 	} else
9890Sstevel@tonic-gate 		return (DDI_FAILURE);
9900Sstevel@tonic-gate 	return (DDI_SUCCESS);
9910Sstevel@tonic-gate }
9920Sstevel@tonic-gate 
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate /*
995509Smrj  * rootnex_map_regspec()
996509Smrj  *     we don't support mapping of I/O cards above 4Gb
9970Sstevel@tonic-gate  */
998509Smrj static int
999509Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1000509Smrj {
10015084Sjohnlev 	rootnex_addr_t rbase;
1002509Smrj 	void *cvaddr;
1003509Smrj 	uint_t npages, pgoffset;
1004509Smrj 	struct regspec *rp;
1005509Smrj 	ddi_acc_hdl_t *hp;
1006509Smrj 	ddi_acc_impl_t *ap;
1007509Smrj 	uint_t	hat_acc_flags;
10085084Sjohnlev 	paddr_t pbase;
1009509Smrj 
1010509Smrj 	rp = mp->map_obj.rp;
1011509Smrj 	hp = mp->map_handlep;
1012509Smrj 
1013509Smrj #ifdef	DDI_MAP_DEBUG
1014509Smrj 	ddi_map_debug(
1015509Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1016509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1017509Smrj 	    rp->regspec_size, mp->map_handlep);
1018509Smrj #endif	/* DDI_MAP_DEBUG */
1019509Smrj 
1020509Smrj 	/*
1021509Smrj 	 * I/O or memory mapping
1022509Smrj 	 *
1023509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1024509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1025509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1026509Smrj 	 */
1027509Smrj 
1028509Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1029509Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1030509Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1031509Smrj 		    rp->regspec_addr, rp->regspec_size);
1032509Smrj 		return (DDI_FAILURE);
1033509Smrj 	}
1034509Smrj 
1035509Smrj 	if (rp->regspec_bustype != 0) {
1036509Smrj 		/*
1037509Smrj 		 * I/O space - needs a handle.
1038509Smrj 		 */
1039509Smrj 		if (hp == NULL) {
1040509Smrj 			return (DDI_FAILURE);
1041509Smrj 		}
1042509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1043509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1044509Smrj 		impl_acc_hdl_init(hp);
1045509Smrj 
1046509Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1047509Smrj #ifdef  DDI_MAP_DEBUG
10485084Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
10495084Sjohnlev 			    "to I/O space is not supported.\n");
1050509Smrj #endif  /* DDI_MAP_DEBUG */
1051509Smrj 			return (DDI_ME_INVAL);
1052509Smrj 		} else {
1053509Smrj 			/*
1054509Smrj 			 * 1275-compliant vs. compatibility i/o mapping
1055509Smrj 			 */
1056509Smrj 			*vaddrp =
1057509Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
10585084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
10595084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
10605084Sjohnlev #ifdef __xpv
10615084Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
10625084Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
10635084Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
10645084Sjohnlev 				    MMU_PAGEMASK));
10655084Sjohnlev 			} else {
10665084Sjohnlev 				hp->ah_pfn = mmu_btop(
10675084Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
10685084Sjohnlev 			}
10695084Sjohnlev #else
10701865Sdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
10715084Sjohnlev 			    MMU_PAGEMASK);
10725084Sjohnlev #endif
10731865Sdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
10741865Sdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1075509Smrj 		}
1076509Smrj 
1077509Smrj #ifdef	DDI_MAP_DEBUG
1078509Smrj 		ddi_map_debug(
1079509Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1080509Smrj 		    rp->regspec_size, *vaddrp);
1081509Smrj #endif	/* DDI_MAP_DEBUG */
1082509Smrj 		return (DDI_SUCCESS);
1083509Smrj 	}
1084509Smrj 
1085509Smrj 	/*
1086509Smrj 	 * Memory space
1087509Smrj 	 */
1088509Smrj 
1089509Smrj 	if (hp != NULL) {
1090509Smrj 		/*
1091509Smrj 		 * hat layer ignores
1092509Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
1093509Smrj 		 */
1094509Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
1095509Smrj 		case DDI_STRICTORDER_ACC:
1096509Smrj 			hat_acc_flags = HAT_STRICTORDER;
1097509Smrj 			break;
1098509Smrj 		case DDI_UNORDERED_OK_ACC:
1099509Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
1100509Smrj 			break;
1101509Smrj 		case DDI_MERGING_OK_ACC:
1102509Smrj 			hat_acc_flags = HAT_MERGING_OK;
1103509Smrj 			break;
1104509Smrj 		case DDI_LOADCACHING_OK_ACC:
1105509Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
1106509Smrj 			break;
1107509Smrj 		case DDI_STORECACHING_OK_ACC:
1108509Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
1109509Smrj 			break;
1110509Smrj 		}
1111509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1112509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1113509Smrj 		impl_acc_hdl_init(hp);
1114509Smrj 		hp->ah_hat_flags = hat_acc_flags;
1115509Smrj 	} else {
1116509Smrj 		hat_acc_flags = HAT_STRICTORDER;
1117509Smrj 	}
1118509Smrj 
11195084Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
11205084Sjohnlev #ifdef __xpv
11215084Sjohnlev 	/*
11225084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
11235084Sjohnlev 	 * the MA to a PA.
11245084Sjohnlev 	 */
11255084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
11265084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
11275084Sjohnlev 	} else {
11285084Sjohnlev 		pbase = rbase;
11295084Sjohnlev 	}
11305084Sjohnlev #else
11315084Sjohnlev 	pbase = rbase;
11325084Sjohnlev #endif
11335084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1134509Smrj 
1135509Smrj 	if (rp->regspec_size == 0) {
1136509Smrj #ifdef  DDI_MAP_DEBUG
1137509Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1138509Smrj #endif  /* DDI_MAP_DEBUG */
1139509Smrj 		return (DDI_ME_INVAL);
1140509Smrj 	}
1141509Smrj 
1142509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
11435084Sjohnlev 		/* extra cast to make gcc happy */
11445084Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1145509Smrj 	} else {
1146509Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1147509Smrj 
1148509Smrj #ifdef	DDI_MAP_DEBUG
11495084Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
11505084Sjohnlev 		    "physical %llx", npages, pbase);
1151509Smrj #endif	/* DDI_MAP_DEBUG */
1152509Smrj 
1153509Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1154509Smrj 		if (cvaddr == NULL)
1155509Smrj 			return (DDI_ME_NORESOURCES);
1156509Smrj 
1157509Smrj 		/*
1158509Smrj 		 * Now map in the pages we've allocated...
1159509Smrj 		 */
11605084Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
11615084Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
11625084Sjohnlev 		    HAT_LOAD_LOCK);
1163509Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
11641865Sdilpreet 
11651865Sdilpreet 		/* save away pfn and npages for FMA */
11661865Sdilpreet 		hp = mp->map_handlep;
11671865Sdilpreet 		if (hp) {
11685084Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
11691865Sdilpreet 			hp->ah_pnum = npages;
11701865Sdilpreet 		}
1171509Smrj 	}
1172509Smrj 
1173509Smrj #ifdef	DDI_MAP_DEBUG
1174509Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1175509Smrj #endif	/* DDI_MAP_DEBUG */
1176509Smrj 	return (DDI_SUCCESS);
1177509Smrj }
1178509Smrj 
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate /*
1181509Smrj  * rootnex_unmap_regspec()
1182509Smrj  *
1183509Smrj  */
1184509Smrj static int
1185509Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1186509Smrj {
1187509Smrj 	caddr_t addr = (caddr_t)*vaddrp;
1188509Smrj 	uint_t npages, pgoffset;
1189509Smrj 	struct regspec *rp;
1190509Smrj 
1191509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1192509Smrj 		return (0);
1193509Smrj 
1194509Smrj 	rp = mp->map_obj.rp;
1195509Smrj 
1196509Smrj 	if (rp->regspec_size == 0) {
1197509Smrj #ifdef  DDI_MAP_DEBUG
1198509Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1199509Smrj #endif  /* DDI_MAP_DEBUG */
1200509Smrj 		return (DDI_ME_INVAL);
1201509Smrj 	}
1202509Smrj 
1203509Smrj 	/*
1204509Smrj 	 * I/O or memory mapping:
1205509Smrj 	 *
1206509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1207509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1208509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1209509Smrj 	 */
1210509Smrj 	if (rp->regspec_bustype != 0) {
1211509Smrj 		/*
1212509Smrj 		 * This is I/O space, which requires no particular
1213509Smrj 		 * processing on unmap since it isn't mapped in the
1214509Smrj 		 * first place.
1215509Smrj 		 */
1216509Smrj 		return (DDI_SUCCESS);
1217509Smrj 	}
1218509Smrj 
1219509Smrj 	/*
1220509Smrj 	 * Memory space
1221509Smrj 	 */
1222509Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1223509Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1224509Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1225509Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
1226509Smrj 
1227509Smrj 	/*
1228509Smrj 	 * Destroy the pointer - the mapping has logically gone
1229509Smrj 	 */
1230509Smrj 	*vaddrp = NULL;
1231509Smrj 
1232509Smrj 	return (DDI_SUCCESS);
1233509Smrj }
1234509Smrj 
1235509Smrj 
1236509Smrj /*
1237509Smrj  * rootnex_map_handle()
1238509Smrj  *
12390Sstevel@tonic-gate  */
1240509Smrj static int
1241509Smrj rootnex_map_handle(ddi_map_req_t *mp)
1242509Smrj {
12435084Sjohnlev 	rootnex_addr_t rbase;
1244509Smrj 	ddi_acc_hdl_t *hp;
1245509Smrj 	uint_t pgoffset;
1246509Smrj 	struct regspec *rp;
12475084Sjohnlev 	paddr_t pbase;
1248509Smrj 
1249509Smrj 	rp = mp->map_obj.rp;
1250509Smrj 
1251509Smrj #ifdef	DDI_MAP_DEBUG
1252509Smrj 	ddi_map_debug(
1253509Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1254509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1255509Smrj 	    rp->regspec_size, mp->map_handlep);
1256509Smrj #endif	/* DDI_MAP_DEBUG */
1257509Smrj 
1258509Smrj 	/*
1259509Smrj 	 * I/O or memory mapping:
1260509Smrj 	 *
1261509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1262509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1263509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1264509Smrj 	 */
1265509Smrj 	if (rp->regspec_bustype != 0) {
1266509Smrj 		/*
1267509Smrj 		 * This refers to I/O space, and we don't support "mapping"
1268509Smrj 		 * I/O space to a user.
1269509Smrj 		 */
1270509Smrj 		return (DDI_FAILURE);
1271509Smrj 	}
1272509Smrj 
1273509Smrj 	/*
1274509Smrj 	 * Set up the hat_flags for the mapping.
1275509Smrj 	 */
1276509Smrj 	hp = mp->map_handlep;
1277509Smrj 
1278509Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1279509Smrj 	case DDI_NEVERSWAP_ACC:
1280509Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1281509Smrj 		break;
1282509Smrj 	case DDI_STRUCTURE_LE_ACC:
1283509Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1284509Smrj 		break;
1285509Smrj 	case DDI_STRUCTURE_BE_ACC:
1286509Smrj 		return (DDI_FAILURE);
1287509Smrj 	default:
1288509Smrj 		return (DDI_REGS_ACC_CONFLICT);
1289509Smrj 	}
1290509Smrj 
1291509Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
1292509Smrj 	case DDI_STRICTORDER_ACC:
1293509Smrj 		break;
1294509Smrj 	case DDI_UNORDERED_OK_ACC:
1295509Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1296509Smrj 		break;
1297509Smrj 	case DDI_MERGING_OK_ACC:
1298509Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
1299509Smrj 		break;
1300509Smrj 	case DDI_LOADCACHING_OK_ACC:
1301509Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1302509Smrj 		break;
1303509Smrj 	case DDI_STORECACHING_OK_ACC:
1304509Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1305509Smrj 		break;
1306509Smrj 	default:
1307509Smrj 		return (DDI_FAILURE);
1308509Smrj 	}
1309509Smrj 
13105084Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
13115084Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
13125084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1313509Smrj 
1314509Smrj 	if (rp->regspec_size == 0)
1315509Smrj 		return (DDI_ME_INVAL);
1316509Smrj 
13175084Sjohnlev #ifdef __xpv
13185084Sjohnlev 	/*
13195084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
13205084Sjohnlev 	 * the MA to a PA.
13215084Sjohnlev 	 */
13225084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
13235084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
13245084Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
13255084Sjohnlev 	} else {
13265084Sjohnlev 		pbase = rbase;
13275084Sjohnlev 	}
13285084Sjohnlev #else
13295084Sjohnlev 	pbase = rbase;
13305084Sjohnlev #endif
13315084Sjohnlev 
13325084Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
1333509Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1334509Smrj 
1335509Smrj 	return (DDI_SUCCESS);
1336509Smrj }
13370Sstevel@tonic-gate 
13380Sstevel@tonic-gate 
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate /*
1341509Smrj  * ************************
1342509Smrj  *  interrupt related code
1343509Smrj  * ************************
13440Sstevel@tonic-gate  */
13450Sstevel@tonic-gate 
13460Sstevel@tonic-gate /*
1347509Smrj  * rootnex_intr_ops()
13480Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13490Sstevel@tonic-gate  */
13500Sstevel@tonic-gate /* ARGSUSED */
13510Sstevel@tonic-gate static int
13520Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13530Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13540Sstevel@tonic-gate {
13550Sstevel@tonic-gate 	struct intrspec			*ispec;
13560Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
13570Sstevel@tonic-gate 
13580Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13590Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13600Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13610Sstevel@tonic-gate 
13620Sstevel@tonic-gate 	/* Process the interrupt operation */
13630Sstevel@tonic-gate 	switch (intr_op) {
13640Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13650Sstevel@tonic-gate 		/* First check with pcplusmp */
13660Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13670Sstevel@tonic-gate 			return (DDI_FAILURE);
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13700Sstevel@tonic-gate 			*(int *)result = 0;
13710Sstevel@tonic-gate 			return (DDI_FAILURE);
13720Sstevel@tonic-gate 		}
13730Sstevel@tonic-gate 		break;
13740Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13750Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13760Sstevel@tonic-gate 			return (DDI_FAILURE);
13770Sstevel@tonic-gate 
13780Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13790Sstevel@tonic-gate 			return (DDI_FAILURE);
13800Sstevel@tonic-gate 		break;
13810Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13820Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13830Sstevel@tonic-gate 			return (DDI_FAILURE);
13840Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13850Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13860Sstevel@tonic-gate 		break;
13870Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13880Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13890Sstevel@tonic-gate 		/*
13900Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
13910Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
13920Sstevel@tonic-gate 		 * See detailed comments on this in the function
13930Sstevel@tonic-gate 		 * rootnex_get_ispec().
13940Sstevel@tonic-gate 		 */
13950Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
13960Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
13970Sstevel@tonic-gate 			    pdp->par_nintr);
13980Sstevel@tonic-gate 			/*
13990Sstevel@tonic-gate 			 * Set it to zero; so that
14000Sstevel@tonic-gate 			 * DDI framework doesn't free it again
14010Sstevel@tonic-gate 			 */
14020Sstevel@tonic-gate 			pdp->par_intr = NULL;
14030Sstevel@tonic-gate 			pdp->par_nintr = 0;
14040Sstevel@tonic-gate 		}
14050Sstevel@tonic-gate 		break;
14060Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14070Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14080Sstevel@tonic-gate 			return (DDI_FAILURE);
14090Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14100Sstevel@tonic-gate 		break;
14110Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14120Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14130Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14140Sstevel@tonic-gate 			return (DDI_FAILURE);
14150Sstevel@tonic-gate 
14160Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14170Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14180Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14190Sstevel@tonic-gate 			return (DDI_FAILURE);
14200Sstevel@tonic-gate 
14210Sstevel@tonic-gate 		/* Change the priority */
14220Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14230Sstevel@tonic-gate 		    PSM_FAILURE)
14240Sstevel@tonic-gate 			return (DDI_FAILURE);
14250Sstevel@tonic-gate 
14260Sstevel@tonic-gate 		/* update the ispec with the new priority */
14270Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14280Sstevel@tonic-gate 		break;
14290Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14300Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14310Sstevel@tonic-gate 			return (DDI_FAILURE);
14320Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14330Sstevel@tonic-gate 		break;
14340Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14350Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14360Sstevel@tonic-gate 			return (DDI_FAILURE);
14370Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14380Sstevel@tonic-gate 		break;
14390Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14400Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14410Sstevel@tonic-gate 			return (DDI_FAILURE);
14420Sstevel@tonic-gate 
14430Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14440Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14450Sstevel@tonic-gate 			return (DDI_FAILURE);
14460Sstevel@tonic-gate 
1447916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14480Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
14490Sstevel@tonic-gate 		    (int *)&hdlp->ih_vector);
14500Sstevel@tonic-gate 
14510Sstevel@tonic-gate 		/* Add the interrupt handler */
14520Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14530Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1454916Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
14550Sstevel@tonic-gate 			return (DDI_FAILURE);
14560Sstevel@tonic-gate 		break;
14570Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14580Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14590Sstevel@tonic-gate 			return (DDI_FAILURE);
14600Sstevel@tonic-gate 
14610Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14620Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14630Sstevel@tonic-gate 			return (DDI_FAILURE);
14640Sstevel@tonic-gate 
1465916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14660Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14670Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 		/* Remove the interrupt handler */
14700Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14710Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14720Sstevel@tonic-gate 		break;
14730Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14740Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14750Sstevel@tonic-gate 			return (DDI_FAILURE);
14760Sstevel@tonic-gate 
14770Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14780Sstevel@tonic-gate 			return (DDI_FAILURE);
14790Sstevel@tonic-gate 		break;
14800Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14810Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14820Sstevel@tonic-gate 			return (DDI_FAILURE);
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14850Sstevel@tonic-gate 			return (DDI_FAILURE);
14860Sstevel@tonic-gate 		break;
14870Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14880Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14890Sstevel@tonic-gate 			return (DDI_FAILURE);
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14920Sstevel@tonic-gate 		    result)) {
14930Sstevel@tonic-gate 			*(int *)result = 0;
14940Sstevel@tonic-gate 			return (DDI_FAILURE);
14950Sstevel@tonic-gate 		}
14960Sstevel@tonic-gate 		break;
14972580Sanish 	case DDI_INTROP_NAVAIL:
14980Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
14992580Sanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
15002580Sanish 		if (*(int *)result == 0) {
15010Sstevel@tonic-gate 			/*
15020Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15030Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15040Sstevel@tonic-gate 			 *
15050Sstevel@tonic-gate 			 * See detailed comments on this in the function
15060Sstevel@tonic-gate 			 * rootnex_get_ispec().
15070Sstevel@tonic-gate 			 *
15080Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15090Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15100Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15110Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15120Sstevel@tonic-gate 			 */
15130Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15140Sstevel@tonic-gate 				*(int *)result = 1;
15152580Sanish 			else
15162580Sanish 				return (DDI_FAILURE);
15170Sstevel@tonic-gate 		}
15180Sstevel@tonic-gate 		break;
15190Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
15202580Sanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
15210Sstevel@tonic-gate 		break;
15220Sstevel@tonic-gate 	default:
15230Sstevel@tonic-gate 		return (DDI_FAILURE);
15240Sstevel@tonic-gate 	}
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate 	return (DDI_SUCCESS);
15270Sstevel@tonic-gate }
15280Sstevel@tonic-gate 
15290Sstevel@tonic-gate 
15300Sstevel@tonic-gate /*
1531509Smrj  * rootnex_get_ispec()
1532509Smrj  *	convert an interrupt number to an interrupt specification.
1533509Smrj  *	The interrupt number determines which interrupt spec will be
1534509Smrj  *	returned if more than one exists.
1535509Smrj  *
1536509Smrj  *	Look into the parent private data area of the 'rdip' to find out
1537509Smrj  *	the interrupt specification.  First check to make sure there is
1538509Smrj  *	one that matchs "inumber" and then return a pointer to it.
1539509Smrj  *
1540509Smrj  *	Return NULL if one could not be found.
1541509Smrj  *
1542509Smrj  *	NOTE: This is needed for rootnex_intr_ops()
1543509Smrj  */
1544509Smrj static struct intrspec *
1545509Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
1546509Smrj {
1547509Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1548509Smrj 
1549509Smrj 	/*
1550509Smrj 	 * Special case handling for drivers that provide their own
1551509Smrj 	 * intrspec structures instead of relying on the DDI framework.
1552509Smrj 	 *
1553509Smrj 	 * A broken hardware driver in ON could potentially provide its
1554509Smrj 	 * own intrspec structure, instead of relying on the hardware.
1555509Smrj 	 * If these drivers are children of 'rootnex' then we need to
1556509Smrj 	 * continue to provide backward compatibility to them here.
1557509Smrj 	 *
1558509Smrj 	 * Following check is a special case for 'pcic' driver which
1559509Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
1560509Smrj 	 *
1561509Smrj 	 * Verbatim comments from this driver are shown here:
1562509Smrj 	 * "Don't use the ddi_add_intr since we don't have a
1563509Smrj 	 * default intrspec in all cases."
1564509Smrj 	 *
1565509Smrj 	 * Since an 'ispec' may not be always created for it,
1566509Smrj 	 * check for that and create one if so.
1567509Smrj 	 *
1568509Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1569509Smrj 	 */
1570509Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1571509Smrj 		pdp->par_nintr = 1;
1572509Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1573509Smrj 		    pdp->par_nintr, KM_SLEEP);
1574509Smrj 	}
1575509Smrj 
1576509Smrj 	/* Validate the interrupt number */
1577509Smrj 	if (inum >= pdp->par_nintr)
1578509Smrj 		return (NULL);
1579509Smrj 
1580509Smrj 	/* Get the interrupt structure pointer and return that */
1581509Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
1582509Smrj }
1583509Smrj 
1584509Smrj 
1585509Smrj /*
1586509Smrj  * ******************
1587509Smrj  *  dma related code
1588509Smrj  * ******************
1589509Smrj  */
1590509Smrj 
1591509Smrj /*ARGSUSED*/
1592509Smrj static int
1593*7613SVikram.Hegde@Sun.COM rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1594*7613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
1595*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep)
1596509Smrj {
1597509Smrj 	uint64_t maxsegmentsize_ll;
1598509Smrj 	uint_t maxsegmentsize;
1599509Smrj 	ddi_dma_impl_t *hp;
1600509Smrj 	rootnex_dma_t *dma;
1601509Smrj 	uint64_t count_max;
1602509Smrj 	uint64_t seg;
1603509Smrj 	int kmflag;
1604509Smrj 	int e;
1605509Smrj 
1606509Smrj 
1607509Smrj 	/* convert our sleep flags */
1608509Smrj 	if (waitfp == DDI_DMA_SLEEP) {
1609509Smrj 		kmflag = KM_SLEEP;
1610509Smrj 	} else {
1611509Smrj 		kmflag = KM_NOSLEEP;
1612509Smrj 	}
1613509Smrj 
1614509Smrj 	/*
1615509Smrj 	 * We try to do only one memory allocation here. We'll do a little
1616509Smrj 	 * pointer manipulation later. If the bind ends up taking more than
1617509Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
1618509Smrj 	 * bind operation. Not great, but much better than before and the
1619509Smrj 	 * best we can do with the current bind interfaces.
1620509Smrj 	 */
1621509Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1622509Smrj 	if (hp == NULL) {
1623509Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
1624509Smrj 			ddi_set_callback(waitfp, arg,
1625509Smrj 			    &rootnex_state->r_dvma_call_list_id);
1626509Smrj 		}
1627509Smrj 		return (DDI_DMA_NORESOURCES);
1628509Smrj 	}
1629509Smrj 
1630509Smrj 	/* Do our pointer manipulation now, align the structures */
1631509Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
1632509Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1633509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1634509Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1635509Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1636509Smrj 
1637509Smrj 	/* setup the handle */
1638509Smrj 	rootnex_clean_dmahdl(hp);
1639509Smrj 	dma->dp_dip = rdip;
1640509Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1641509Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1642509Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1643509Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1644509Smrj 	hp->dmai_rdip = rdip;
1645509Smrj 	hp->dmai_attr = *attr;
1646509Smrj 
1647509Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
1648509Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1649509Smrj 
1650509Smrj 	/*
1651509Smrj 	 * Figure out our maximum segment size. If the segment size is greater
1652509Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1653509Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1654509Smrj 	 * dma_attr_count_max are size-1 type values.
1655509Smrj 	 *
1656509Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
1657509Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
1658509Smrj 	 * single cookie).
1659509Smrj 	 */
1660509Smrj 
1661509Smrj 	/* handle the rollover cases */
1662509Smrj 	seg = attr->dma_attr_seg + 1;
1663509Smrj 	if (seg < attr->dma_attr_seg) {
1664509Smrj 		seg = attr->dma_attr_seg;
1665509Smrj 	}
1666509Smrj 	count_max = attr->dma_attr_count_max + 1;
1667509Smrj 	if (count_max < attr->dma_attr_count_max) {
1668509Smrj 		count_max = attr->dma_attr_count_max;
1669509Smrj 	}
1670509Smrj 
1671509Smrj 	/*
1672509Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
1673509Smrj 	 * use a simple mask.
1674509Smrj 	 */
1675509Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1676509Smrj 		dma->dp_granularity_power_2 = B_FALSE;
1677509Smrj 	} else {
1678509Smrj 		dma->dp_granularity_power_2 = B_TRUE;
1679509Smrj 	}
1680509Smrj 
1681509Smrj 	/*
1682509Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
1683509Smrj 	 * break up a window because we're greater than maxxfer, we might as
1684509Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1685509Smrj 	 * worry about triming the window later on for this case.
1686509Smrj 	 */
1687509Smrj 	if (attr->dma_attr_granular > 1) {
1688509Smrj 		if (dma->dp_granularity_power_2) {
1689509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1690509Smrj 			    (attr->dma_attr_maxxfer &
1691509Smrj 			    (attr->dma_attr_granular - 1));
1692509Smrj 		} else {
1693509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1694509Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1695509Smrj 		}
1696509Smrj 	} else {
1697509Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1698509Smrj 	}
1699509Smrj 
1700509Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1701509Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1702509Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1703509Smrj 		maxsegmentsize = 0xFFFFFFFF;
1704509Smrj 	} else {
1705509Smrj 		maxsegmentsize = maxsegmentsize_ll;
1706509Smrj 	}
1707509Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1708509Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1709509Smrj 
1710509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1711509Smrj 	if (rootnex_alloc_check_parms) {
1712509Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1713509Smrj 		if (e != DDI_SUCCESS) {
1714509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1715509Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
1716509Smrj 			    (ddi_dma_handle_t)hp);
1717509Smrj 			return (e);
1718509Smrj 		}
1719509Smrj 	}
1720509Smrj 
1721509Smrj 	*handlep = (ddi_dma_handle_t)hp;
1722509Smrj 
1723509Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1724509Smrj 	DTRACE_PROBE1(rootnex__alloc__handle, uint64_t,
1725509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1726509Smrj 
1727509Smrj 	return (DDI_SUCCESS);
1728509Smrj }
1729509Smrj 
1730509Smrj 
1731509Smrj /*
1732*7613SVikram.Hegde@Sun.COM  * rootnex_dma_allochdl()
1733*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_alloc_handle().
1734509Smrj  */
1735*7613SVikram.Hegde@Sun.COM static int
1736*7613SVikram.Hegde@Sun.COM rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1737*7613SVikram.Hegde@Sun.COM     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1738*7613SVikram.Hegde@Sun.COM {
1739*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
1740*7613SVikram.Hegde@Sun.COM 	uint_t error = ENOTSUP;
1741*7613SVikram.Hegde@Sun.COM 	int retval;
1742*7613SVikram.Hegde@Sun.COM 
1743*7613SVikram.Hegde@Sun.COM 	retval = iommulib_nex_open(rdip, &error);
1744*7613SVikram.Hegde@Sun.COM 
1745*7613SVikram.Hegde@Sun.COM 	if (retval != DDI_SUCCESS && error == ENOTSUP) {
1746*7613SVikram.Hegde@Sun.COM 		/* No IOMMU */
1747*7613SVikram.Hegde@Sun.COM 		return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1748*7613SVikram.Hegde@Sun.COM 		    handlep));
1749*7613SVikram.Hegde@Sun.COM 	} else if (retval != DDI_SUCCESS) {
1750*7613SVikram.Hegde@Sun.COM 		return (DDI_FAILURE);
1751*7613SVikram.Hegde@Sun.COM 	}
1752*7613SVikram.Hegde@Sun.COM 
1753*7613SVikram.Hegde@Sun.COM 	ASSERT(IOMMU_USED(rdip));
1754*7613SVikram.Hegde@Sun.COM 
1755*7613SVikram.Hegde@Sun.COM 	/* has an IOMMU */
1756*7613SVikram.Hegde@Sun.COM 	return (iommulib_nexdma_allochdl(dip, rdip, attr,
1757*7613SVikram.Hegde@Sun.COM 	    waitfp, arg, handlep));
1758*7613SVikram.Hegde@Sun.COM #else
1759*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1760*7613SVikram.Hegde@Sun.COM 	    handlep));
1761*7613SVikram.Hegde@Sun.COM #endif
1762*7613SVikram.Hegde@Sun.COM }
1763*7613SVikram.Hegde@Sun.COM 
1764509Smrj /*ARGSUSED*/
1765509Smrj static int
1766*7613SVikram.Hegde@Sun.COM rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
1767*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
1768509Smrj {
1769509Smrj 	ddi_dma_impl_t *hp;
1770509Smrj 	rootnex_dma_t *dma;
1771509Smrj 
1772509Smrj 
1773509Smrj 	hp = (ddi_dma_impl_t *)handle;
1774509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1775509Smrj 
1776509Smrj 	/* unbind should have been called first */
1777509Smrj 	ASSERT(!dma->dp_inuse);
1778509Smrj 
1779509Smrj 	mutex_destroy(&dma->dp_mutex);
1780509Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1781509Smrj 
1782509Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1783509Smrj 	DTRACE_PROBE1(rootnex__free__handle, uint64_t,
1784509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1785509Smrj 
1786509Smrj 	if (rootnex_state->r_dvma_call_list_id)
1787509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1788509Smrj 
1789509Smrj 	return (DDI_SUCCESS);
1790509Smrj }
1791509Smrj 
1792509Smrj /*
1793*7613SVikram.Hegde@Sun.COM  * rootnex_dma_freehdl()
1794*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_free_handle().
1795509Smrj  */
1796*7613SVikram.Hegde@Sun.COM static int
1797*7613SVikram.Hegde@Sun.COM rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1798*7613SVikram.Hegde@Sun.COM {
1799*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
1800*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
1801*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_freehdl(dip, rdip, handle));
1802*7613SVikram.Hegde@Sun.COM 	}
1803*7613SVikram.Hegde@Sun.COM #endif
1804*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_freehdl(dip, rdip, handle));
1805*7613SVikram.Hegde@Sun.COM }
1806*7613SVikram.Hegde@Sun.COM 
1807*7613SVikram.Hegde@Sun.COM 
1808509Smrj /*ARGSUSED*/
1809509Smrj static int
1810*7613SVikram.Hegde@Sun.COM rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1811*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1812*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
18130Sstevel@tonic-gate {
1814509Smrj 	rootnex_sglinfo_t *sinfo;
1815509Smrj 	ddi_dma_attr_t *attr;
1816509Smrj 	ddi_dma_impl_t *hp;
1817509Smrj 	rootnex_dma_t *dma;
1818509Smrj 	int kmflag;
1819509Smrj 	int e;
1820509Smrj 
1821509Smrj 
1822509Smrj 	hp = (ddi_dma_impl_t *)handle;
1823509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1824509Smrj 	sinfo = &dma->dp_sglinfo;
1825509Smrj 	attr = &hp->dmai_attr;
1826509Smrj 
1827509Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1828509Smrj 
1829509Smrj 	/*
1830509Smrj 	 * This is useful for debugging a driver. Not as useful in a production
1831509Smrj 	 * system. The only time this will fail is if you have a driver bug.
1832509Smrj 	 */
1833509Smrj 	if (rootnex_bind_check_inuse) {
1834509Smrj 		/*
1835509Smrj 		 * No one else should ever have this lock unless someone else
1836509Smrj 		 * is trying to use this handle. So contention on the lock
1837509Smrj 		 * is the same as inuse being set.
1838509Smrj 		 */
1839509Smrj 		e = mutex_tryenter(&dma->dp_mutex);
1840509Smrj 		if (e == 0) {
1841509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1842509Smrj 			return (DDI_DMA_INUSE);
1843509Smrj 		}
1844509Smrj 		if (dma->dp_inuse) {
1845509Smrj 			mutex_exit(&dma->dp_mutex);
1846509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1847509Smrj 			return (DDI_DMA_INUSE);
1848509Smrj 		}
1849509Smrj 		dma->dp_inuse = B_TRUE;
1850509Smrj 		mutex_exit(&dma->dp_mutex);
1851509Smrj 	}
1852509Smrj 
1853509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1854509Smrj 	if (rootnex_bind_check_parms) {
1855509Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
1856509Smrj 		if (e != DDI_SUCCESS) {
1857509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1858509Smrj 			rootnex_clean_dmahdl(hp);
1859509Smrj 			return (e);
1860509Smrj 		}
1861509Smrj 	}
1862509Smrj 
1863509Smrj 	/* save away the original bind info */
1864509Smrj 	dma->dp_dma = dmareq->dmar_object;
1865509Smrj 
1866*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
18677589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
18687589SVikram.Hegde@Sun.COM 		e = intel_iommu_map_sgl(handle, dmareq,
18697589SVikram.Hegde@Sun.COM 		    rootnex_state->r_prealloc_cookies);
18707589SVikram.Hegde@Sun.COM 
18717589SVikram.Hegde@Sun.COM 		switch (e) {
18727589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_SUCCESS:
18737589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_end;
18747589SVikram.Hegde@Sun.COM 
18757589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_DISABLE:
18767589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_start;
18777589SVikram.Hegde@Sun.COM 
18787589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_NORESOURCES:
18797589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN, "iommu map sgl failed for %s",
18807589SVikram.Hegde@Sun.COM 			    ddi_node_name(dma->dp_dip));
18817589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
18827589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
18837589SVikram.Hegde@Sun.COM 
18847589SVikram.Hegde@Sun.COM 		default:
18857589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN,
18867589SVikram.Hegde@Sun.COM 			    "undefined value returned from"
18877589SVikram.Hegde@Sun.COM 			    " intel_iommu_map_sgl: %d",
18887589SVikram.Hegde@Sun.COM 			    e);
18897589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
18907589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
18917589SVikram.Hegde@Sun.COM 		}
18927589SVikram.Hegde@Sun.COM 	}
1893*7613SVikram.Hegde@Sun.COM #endif
18947589SVikram.Hegde@Sun.COM 
18957589SVikram.Hegde@Sun.COM rootnex_sgl_start:
1896509Smrj 	/*
1897509Smrj 	 * Figure out a rough estimate of what maximum number of pages this
1898509Smrj 	 * buffer could use (a high estimate of course).
1899509Smrj 	 */
1900509Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1901509Smrj 
1902509Smrj 	/*
1903509Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
1904509Smrj 	 * fit (more important to be consistent, we don't want to create
1905509Smrj 	 * additional degenerate cases).
1906509Smrj 	 */
1907509Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1908509Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1909509Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
1910509Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1911509Smrj 		    uint_t, sinfo->si_max_pages);
1912509Smrj 
1913509Smrj 	/*
1914509Smrj 	 * For anything larger than that, we'll go ahead and allocate the
1915509Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
1916509Smrj 	 * seeing this path in the fast path for high performance devices very
1917509Smrj 	 * frequently.
1918509Smrj 	 *
1919509Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
1920509Smrj 	 * the bind interface would speed this case up.
1921509Smrj 	 */
1922509Smrj 	} else {
1923509Smrj 		/* convert the sleep flags */
1924509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1925509Smrj 			kmflag =  KM_SLEEP;
1926509Smrj 		} else {
1927509Smrj 			kmflag =  KM_NOSLEEP;
1928509Smrj 		}
1929509Smrj 
1930509Smrj 		/*
1931509Smrj 		 * Save away how much memory we allocated. If we're doing a
1932509Smrj 		 * nosleep, the alloc could fail...
1933509Smrj 		 */
1934509Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
1935509Smrj 		    sizeof (ddi_dma_cookie_t);
1936509Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
1937509Smrj 		if (dma->dp_cookies == NULL) {
1938509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1939509Smrj 			rootnex_clean_dmahdl(hp);
1940509Smrj 			return (DDI_DMA_NORESOURCES);
1941509Smrj 		}
1942509Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
1943509Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
1944509Smrj 		    sinfo->si_max_pages);
1945509Smrj 	}
1946509Smrj 	hp->dmai_cookie = dma->dp_cookies;
1947509Smrj 
1948509Smrj 	/*
1949509Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
1950509Smrj 	 * looking at the contraints in the dma structure. It will then put some
1951509Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
1952509Smrj 	 * clean, or do we need to do some munging; how many pages need to be
1953509Smrj 	 * copied, etc.)
1954509Smrj 	 */
1955509Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
1956509Smrj 	    &dma->dp_sglinfo);
19577589SVikram.Hegde@Sun.COM 
19587589SVikram.Hegde@Sun.COM rootnex_sgl_end:
1959509Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
1960509Smrj 	/* if we don't need a copy buffer, we don't need to sync */
1961509Smrj 	if (sinfo->si_copybuf_req == 0) {
1962509Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
1963509Smrj 	}
1964509Smrj 
1965509Smrj 	/*
1966509Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
1967509Smrj 	 * hit the fast path. All the high performance devices should be trying
1968509Smrj 	 * to hit this path. To hit this path, a device should be able to reach
1969509Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
1970509Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
1971509Smrj 	 * handle [sgllen]).
1972509Smrj 	 */
1973509Smrj 	if ((sinfo->si_copybuf_req == 0) &&
1974509Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
1975509Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
1976509Smrj 		/*
19775591Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
19785591Sstephh 		 * handle cache.
19795591Sstephh 		 */
19805591Sstephh 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
19815591Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
19825591Sstephh 			(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
19835591Sstephh 		}
19845591Sstephh 
19855591Sstephh 		/*
1986509Smrj 		 * copy out the first cookie and ccountp, set the cookie
1987509Smrj 		 * pointer to the second cookie. The first cookie is passed
1988509Smrj 		 * back on the stack. Additional cookies are accessed via
1989509Smrj 		 * ddi_dma_nextcookie()
1990509Smrj 		 */
1991509Smrj 		*cookiep = dma->dp_cookies[0];
1992509Smrj 		*ccountp = sinfo->si_sgl_size;
1993509Smrj 		hp->dmai_cookie++;
1994509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
1995509Smrj 		hp->dmai_nwin = 1;
1996509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
1997509Smrj 		DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, uint64_t,
1998509Smrj 		    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
1999509Smrj 		    dma->dp_dma.dmao_size);
2000509Smrj 		return (DDI_DMA_MAPPED);
2001509Smrj 	}
2002509Smrj 
2003509Smrj 	/*
2004509Smrj 	 * go to the slow path, we may need to alloc more memory, create
2005509Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
2006509Smrj 	 */
2007509Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
2008509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2009509Smrj 		if (dma->dp_need_to_free_cookie) {
2010509Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2011509Smrj 		}
2012509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2013509Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2014509Smrj 		return (e);
2015509Smrj 	}
2016509Smrj 
20175591Sstephh 	/*
20185591Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
20195591Sstephh 	 * cache.
20205591Sstephh 	 */
20215591Sstephh 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
20225591Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
20235591Sstephh 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
20245591Sstephh 	}
20255591Sstephh 
2026509Smrj 	/* if the first window uses the copy buffer, sync it for the device */
2027509Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2028509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
2029509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2030509Smrj 		    DDI_DMA_SYNC_FORDEV);
2031509Smrj 	}
2032509Smrj 
2033509Smrj 	/*
2034509Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2035509Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2036509Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
2037509Smrj 	 * cookies we return is the number of cookies in the first window.
2038509Smrj 	 */
2039509Smrj 	if (e == DDI_DMA_MAPPED) {
2040509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2041509Smrj 		*ccountp = sinfo->si_sgl_size;
2042509Smrj 	} else {
2043509Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2044509Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2045509Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2046509Smrj 	}
2047509Smrj 	*cookiep = dma->dp_cookies[0];
2048509Smrj 	hp->dmai_cookie++;
2049509Smrj 
2050509Smrj 	ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2051509Smrj 	DTRACE_PROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2052509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2053509Smrj 	    dma->dp_dma.dmao_size);
2054509Smrj 	return (e);
2055509Smrj }
2056509Smrj 
2057509Smrj 
2058509Smrj /*
2059*7613SVikram.Hegde@Sun.COM  * rootnex_dma_bindhdl()
2060*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2061509Smrj  */
2062*7613SVikram.Hegde@Sun.COM static int
2063*7613SVikram.Hegde@Sun.COM rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2064*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2065*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2066*7613SVikram.Hegde@Sun.COM {
2067*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
2068*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
2069*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
2070*7613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
2071*7613SVikram.Hegde@Sun.COM 	}
2072*7613SVikram.Hegde@Sun.COM #endif
2073*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
2074*7613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
2075*7613SVikram.Hegde@Sun.COM }
2076*7613SVikram.Hegde@Sun.COM 
2077509Smrj /*ARGSUSED*/
2078509Smrj static int
2079*7613SVikram.Hegde@Sun.COM rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2080509Smrj     ddi_dma_handle_t handle)
2081509Smrj {
2082509Smrj 	ddi_dma_impl_t *hp;
2083509Smrj 	rootnex_dma_t *dma;
2084509Smrj 	int e;
2085509Smrj 
2086509Smrj 	hp = (ddi_dma_impl_t *)handle;
2087509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2088509Smrj 
2089509Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
2090509Smrj 	if (rootnex_unbind_verify_buffer) {
2091509Smrj 		e = rootnex_verify_buffer(dma);
2092509Smrj 		if (e != DDI_SUCCESS) {
2093509Smrj 			ASSERT(0);
2094509Smrj 			return (DDI_FAILURE);
2095509Smrj 		}
2096509Smrj 	}
2097509Smrj 
2098509Smrj 	/* sync the current window before unbinding the buffer */
2099509Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2100509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
2101509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
2102509Smrj 		    DDI_DMA_SYNC_FORCPU);
2103509Smrj 	}
2104509Smrj 
2105509Smrj 	/*
21061865Sdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
21071865Sdilpreet 	 * cache.
21081865Sdilpreet 	 */
21091865Sdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
21101865Sdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
21111865Sdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
21121865Sdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
21131865Sdilpreet 		}
21141865Sdilpreet 	}
21151865Sdilpreet 
21161865Sdilpreet 	/*
2117509Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
2118509Smrj 	 * buffer or windows, there won't be much to do :-)
2119509Smrj 	 */
2120509Smrj 	rootnex_teardown_copybuf(dma);
2121509Smrj 	rootnex_teardown_windows(dma);
2122509Smrj 
2123*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
2124509Smrj 	/*
21257589SVikram.Hegde@Sun.COM 	 * If intel iommu enabled, clean up the page tables and free the dvma
21267589SVikram.Hegde@Sun.COM 	 */
21277589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
21287589SVikram.Hegde@Sun.COM 		intel_iommu_unmap_sgl(handle);
21297589SVikram.Hegde@Sun.COM 	}
2130*7613SVikram.Hegde@Sun.COM #endif
21317589SVikram.Hegde@Sun.COM 
21327589SVikram.Hegde@Sun.COM 	/*
2133509Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
2134509Smrj 	 * fit into our pre-allocate buffer), free that up now
2135509Smrj 	 */
2136509Smrj 	if (dma->dp_need_to_free_cookie) {
2137509Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2138509Smrj 	}
2139509Smrj 
2140509Smrj 	/*
2141509Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
2142509Smrj 	 * handle is reused).
2143509Smrj 	 */
2144509Smrj 	rootnex_clean_dmahdl(hp);
2145509Smrj 
2146509Smrj 	if (rootnex_state->r_dvma_call_list_id)
2147509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2148509Smrj 
2149509Smrj 	ROOTNEX_PROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2150509Smrj 	DTRACE_PROBE1(rootnex__unbind, uint64_t,
2151509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2152509Smrj 
2153509Smrj 	return (DDI_SUCCESS);
2154509Smrj }
2155509Smrj 
2156*7613SVikram.Hegde@Sun.COM /*
2157*7613SVikram.Hegde@Sun.COM  * rootnex_dma_unbindhdl()
2158*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_unbind_handle()
2159*7613SVikram.Hegde@Sun.COM  */
2160*7613SVikram.Hegde@Sun.COM /*ARGSUSED*/
2161*7613SVikram.Hegde@Sun.COM static int
2162*7613SVikram.Hegde@Sun.COM rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2163*7613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
2164*7613SVikram.Hegde@Sun.COM {
2165*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
2166*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
2167*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_unbindhdl(dip, rdip, handle));
2168*7613SVikram.Hegde@Sun.COM 	}
2169*7613SVikram.Hegde@Sun.COM #endif
2170*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_unbindhdl(dip, rdip, handle));
2171*7613SVikram.Hegde@Sun.COM }
2172*7613SVikram.Hegde@Sun.COM 
2173*7613SVikram.Hegde@Sun.COM /*ARGSUSED*/
2174*7613SVikram.Hegde@Sun.COM static void
2175*7613SVikram.Hegde@Sun.COM rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2176*7613SVikram.Hegde@Sun.COM {
2177*7613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2178*7613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2179*7613SVikram.Hegde@Sun.COM 
2180*7613SVikram.Hegde@Sun.COM 	hp->dmai_cookie = &dma->dp_cookies[0];
2181*7613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
2182*7613SVikram.Hegde@Sun.COM }
2183*7613SVikram.Hegde@Sun.COM 
2184*7613SVikram.Hegde@Sun.COM /*ARGSUSED*/
2185*7613SVikram.Hegde@Sun.COM static int
2186*7613SVikram.Hegde@Sun.COM rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2187*7613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2188*7613SVikram.Hegde@Sun.COM {
2189*7613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2190*7613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2191*7613SVikram.Hegde@Sun.COM 
2192*7613SVikram.Hegde@Sun.COM 
2193*7613SVikram.Hegde@Sun.COM 	if (hp->dmai_rflags & DDI_DMA_PARTIAL) {
2194*7613SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2195*7613SVikram.Hegde@Sun.COM 	} else {
2196*7613SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_sglinfo.si_sgl_size;
2197*7613SVikram.Hegde@Sun.COM 	}
2198*7613SVikram.Hegde@Sun.COM 	*cookiep = dma->dp_cookies[0];
2199*7613SVikram.Hegde@Sun.COM 
2200*7613SVikram.Hegde@Sun.COM 	/* reset the cookies */
2201*7613SVikram.Hegde@Sun.COM 	hp->dmai_cookie = &dma->dp_cookies[0];
2202*7613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
2203*7613SVikram.Hegde@Sun.COM 
2204*7613SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
2205*7613SVikram.Hegde@Sun.COM }
2206509Smrj 
2207509Smrj /*
2208509Smrj  * rootnex_verify_buffer()
2209509Smrj  *   verify buffer wasn't free'd
2210509Smrj  */
2211509Smrj static int
2212509Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
2213509Smrj {
2214509Smrj 	page_t **pplist;
2215509Smrj 	caddr_t vaddr;
2216509Smrj 	uint_t pcnt;
2217509Smrj 	uint_t poff;
2218509Smrj 	page_t *pp;
22191865Sdilpreet 	char b;
2220509Smrj 	int i;
2221509Smrj 
2222509Smrj 	/* Figure out how many pages this buffer occupies */
2223509Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2224509Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2225509Smrj 	} else {
2226509Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2227509Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2228509Smrj 	}
2229509Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2230509Smrj 
2231509Smrj 	switch (dma->dp_dma.dmao_type) {
22320Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
2233509Smrj 		/*
2234509Smrj 		 * for a linked list of pp's walk through them to make sure
2235509Smrj 		 * they're locked and not free.
2236509Smrj 		 */
2237509Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2238509Smrj 		for (i = 0; i < pcnt; i++) {
2239509Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2240509Smrj 				return (DDI_FAILURE);
22410Sstevel@tonic-gate 			}
2242509Smrj 			pp = pp->p_next;
22430Sstevel@tonic-gate 		}
22440Sstevel@tonic-gate 		break;
2245509Smrj 
22460Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
22470Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
2248509Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2249509Smrj 		/*
2250509Smrj 		 * for an array of pp's walk through them to make sure they're
2251509Smrj 		 * not free. It's possible that they may not be locked.
2252509Smrj 		 */
2253509Smrj 		if (pplist) {
2254509Smrj 			for (i = 0; i < pcnt; i++) {
2255509Smrj 				if (PP_ISFREE(pplist[i])) {
2256509Smrj 					return (DDI_FAILURE);
2257509Smrj 				}
2258509Smrj 			}
2259509Smrj 
2260509Smrj 		/* For a virtual address, try to peek at each page */
2261509Smrj 		} else {
2262509Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
2263509Smrj 				for (i = 0; i < pcnt; i++) {
22641865Sdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
22651865Sdilpreet 					    DDI_FAILURE)
2266509Smrj 						return (DDI_FAILURE);
22671865Sdilpreet 					vaddr += MMU_PAGESIZE;
2268509Smrj 				}
2269509Smrj 			}
2270509Smrj 		}
2271509Smrj 		break;
2272509Smrj 
2273509Smrj 	default:
2274509Smrj 		ASSERT(0);
2275509Smrj 		break;
2276509Smrj 	}
2277509Smrj 
2278509Smrj 	return (DDI_SUCCESS);
2279509Smrj }
2280509Smrj 
2281509Smrj 
2282509Smrj /*
2283509Smrj  * rootnex_clean_dmahdl()
2284509Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
2285509Smrj  *    unbind handle. Set the handle state to the default settings.
2286509Smrj  */
2287509Smrj static void
2288509Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2289509Smrj {
2290509Smrj 	rootnex_dma_t *dma;
2291509Smrj 
2292509Smrj 
2293509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2294509Smrj 
2295509Smrj 	hp->dmai_nwin = 0;
2296509Smrj 	dma->dp_current_cookie = 0;
2297509Smrj 	dma->dp_copybuf_size = 0;
2298509Smrj 	dma->dp_window = NULL;
2299509Smrj 	dma->dp_cbaddr = NULL;
2300509Smrj 	dma->dp_inuse = B_FALSE;
2301509Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
2302509Smrj 	dma->dp_need_to_free_window = B_FALSE;
2303509Smrj 	dma->dp_partial_required = B_FALSE;
2304509Smrj 	dma->dp_trim_required = B_FALSE;
2305509Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
2306509Smrj #if !defined(__amd64)
2307509Smrj 	dma->dp_cb_remaping = B_FALSE;
2308509Smrj 	dma->dp_kva = NULL;
2309509Smrj #endif
2310509Smrj 
2311509Smrj 	/* FMA related initialization */
2312509Smrj 	hp->dmai_fault = 0;
2313509Smrj 	hp->dmai_fault_check = NULL;
2314509Smrj 	hp->dmai_fault_notify = NULL;
2315509Smrj 	hp->dmai_error.err_ena = 0;
2316509Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
2317509Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2318509Smrj 	hp->dmai_error.err_ontrap = NULL;
2319509Smrj 	hp->dmai_error.err_fep = NULL;
23201865Sdilpreet 	hp->dmai_error.err_cf = NULL;
2321509Smrj }
2322509Smrj 
2323509Smrj 
2324509Smrj /*
2325509Smrj  * rootnex_valid_alloc_parms()
2326509Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2327509Smrj  */
2328509Smrj static int
2329509Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2330509Smrj {
2331509Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2332509Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2333509Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2334509Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2335509Smrj 		return (DDI_DMA_BADATTR);
2336509Smrj 	}
2337509Smrj 
2338509Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2339509Smrj 		return (DDI_DMA_BADATTR);
2340509Smrj 	}
2341509Smrj 
2342509Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2343509Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2344509Smrj 	    attr->dma_attr_sgllen <= 0) {
2345509Smrj 		return (DDI_DMA_BADATTR);
2346509Smrj 	}
2347509Smrj 
2348509Smrj 	/* We should be able to DMA into every byte offset in a page */
2349509Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
2350509Smrj 		return (DDI_DMA_BADATTR);
2351509Smrj 	}
2352509Smrj 
2353509Smrj 	return (DDI_SUCCESS);
2354509Smrj }
2355509Smrj 
2356509Smrj 
2357509Smrj /*
2358509Smrj  * rootnex_valid_bind_parms()
2359509Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2360509Smrj  */
2361509Smrj /* ARGSUSED */
2362509Smrj static int
2363509Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2364509Smrj {
2365509Smrj #if !defined(__amd64)
2366509Smrj 	/*
2367509Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2368509Smrj 	 * we can track the offset for the obsoleted interfaces.
2369509Smrj 	 */
2370509Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2371509Smrj 		return (DDI_DMA_TOOBIG);
2372509Smrj 	}
2373509Smrj #endif
2374509Smrj 
2375509Smrj 	return (DDI_SUCCESS);
2376509Smrj }
2377509Smrj 
2378509Smrj 
2379509Smrj /*
2380509Smrj  * rootnex_get_sgl()
2381509Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2382509Smrj  *    with a call to the vm layer when vm2.0 comes around...
2383509Smrj  */
2384509Smrj static void
2385509Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2386509Smrj     rootnex_sglinfo_t *sglinfo)
2387509Smrj {
2388509Smrj 	ddi_dma_atyp_t buftype;
23895084Sjohnlev 	rootnex_addr_t raddr;
2390509Smrj 	uint64_t last_page;
2391509Smrj 	uint64_t offset;
2392509Smrj 	uint64_t addrhi;
2393509Smrj 	uint64_t addrlo;
2394509Smrj 	uint64_t maxseg;
2395509Smrj 	page_t **pplist;
2396509Smrj 	uint64_t paddr;
2397509Smrj 	uint32_t psize;
2398509Smrj 	uint32_t size;
2399509Smrj 	caddr_t vaddr;
2400509Smrj 	uint_t pcnt;
2401509Smrj 	page_t *pp;
2402509Smrj 	uint_t cnt;
2403509Smrj 
2404509Smrj 
2405509Smrj 	/* shortcuts */
2406509Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2407509Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2408509Smrj 	maxseg = sglinfo->si_max_cookie_size;
2409509Smrj 	buftype = dmar_object->dmao_type;
2410509Smrj 	addrhi = sglinfo->si_max_addr;
2411509Smrj 	addrlo = sglinfo->si_min_addr;
2412509Smrj 	size = dmar_object->dmao_size;
2413509Smrj 
2414509Smrj 	pcnt = 0;
2415509Smrj 	cnt = 0;
2416509Smrj 
2417509Smrj 	/*
2418509Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
2419509Smrj 	 * page_t, use this to get our physical address and buf offset.
2420509Smrj 	 */
2421509Smrj 	if (buftype == DMA_OTYP_PAGES) {
2422509Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2423509Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2424509Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2425509Smrj 		    MMU_PAGEOFFSET;
24265084Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2427509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2428509Smrj 		pp = pp->p_next;
2429509Smrj 		sglinfo->si_asp = NULL;
2430509Smrj 
2431509Smrj 	/*
2432509Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
2433509Smrj 	 * down an array of pages, use this to get our physical address and buf
2434509Smrj 	 * offset.
2435509Smrj 	 */
2436509Smrj 	} else if (pplist != NULL) {
2437509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2438509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2439509Smrj 
2440509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2441509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2442509Smrj 		if (sglinfo->si_asp == NULL) {
2443509Smrj 			sglinfo->si_asp = &kas;
2444509Smrj 		}
2445509Smrj 
2446509Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
24475084Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2448509Smrj 		paddr += offset;
2449509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2450509Smrj 		pcnt++;
2451509Smrj 
2452509Smrj 	/*
2453509Smrj 	 * All we have is a virtual address, we'll need to call into the VM
2454509Smrj 	 * to get the physical address.
2455509Smrj 	 */
2456509Smrj 	} else {
2457509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2458509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2459509Smrj 
2460509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2461509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2462509Smrj 		if (sglinfo->si_asp == NULL) {
2463509Smrj 			sglinfo->si_asp = &kas;
2464509Smrj 		}
2465509Smrj 
24665084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2467509Smrj 		paddr += offset;
2468509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2469509Smrj 		vaddr += psize;
2470509Smrj 	}
2471509Smrj 
24725084Sjohnlev #ifdef __xpv
24735084Sjohnlev 	/*
24745084Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
24755084Sjohnlev 	 * the cookies with MFNs instead of PFNs.
24765084Sjohnlev 	 */
24775084Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
24785084Sjohnlev #else
24795084Sjohnlev 	raddr = paddr;
24805084Sjohnlev #endif
24815084Sjohnlev 
2482509Smrj 	/*
2483509Smrj 	 * Setup the first cookie with the physical address of the page and the
2484509Smrj 	 * size of the page (which takes into account the initial offset into
2485509Smrj 	 * the page.
2486509Smrj 	 */
24875084Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
2488509Smrj 	sgl[cnt].dmac_size = psize;
2489509Smrj 	sgl[cnt].dmac_type = 0;
2490509Smrj 
2491509Smrj 	/*
2492509Smrj 	 * Save away the buffer offset into the page. We'll need this later in
2493509Smrj 	 * the copy buffer code to help figure out the page index within the
2494509Smrj 	 * buffer and the offset into the current page.
2495509Smrj 	 */
2496509Smrj 	sglinfo->si_buf_offset = offset;
2497509Smrj 
2498509Smrj 	/*
2499509Smrj 	 * If the DMA engine can't reach the physical address, increase how
2500509Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
2501509Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
2502509Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2503509Smrj 	 * last cookie, go to the next cookie (since we separate each page which
2504509Smrj 	 * uses the copy buffer in case the copy buffer is not physically
2505509Smrj 	 * contiguous.
2506509Smrj 	 */
25075084Sjohnlev 	if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2508509Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2509509Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2510509Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
2511509Smrj 			cnt++;
2512509Smrj 			sgl[cnt].dmac_laddress = 0;
2513509Smrj 			sgl[cnt].dmac_size = 0;
2514509Smrj 			sgl[cnt].dmac_type = 0;
2515509Smrj 		}
2516509Smrj 	}
2517509Smrj 
2518509Smrj 	/*
2519509Smrj 	 * save this page's physical address so we can figure out if the next
2520509Smrj 	 * page is physically contiguous. Keep decrementing size until we are
2521509Smrj 	 * done with the buffer.
2522509Smrj 	 */
25235084Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
2524509Smrj 	size -= psize;
2525509Smrj 
2526509Smrj 	while (size > 0) {
2527509Smrj 		/* Get the size for this page (i.e. partial or full page) */
2528509Smrj 		psize = MIN(size, MMU_PAGESIZE);
2529509Smrj 
2530509Smrj 		if (buftype == DMA_OTYP_PAGES) {
2531509Smrj 			/* get the paddr from the page_t */
2532509Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
25335084Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
2534509Smrj 			pp = pp->p_next;
2535509Smrj 		} else if (pplist != NULL) {
2536509Smrj 			/* index into the array of page_t's to get the paddr */
2537509Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
25385084Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2539509Smrj 			pcnt++;
25400Sstevel@tonic-gate 		} else {
2541509Smrj 			/* call into the VM to get the paddr */
25425084Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2543509Smrj 			    vaddr));
2544509Smrj 			vaddr += psize;
2545509Smrj 		}
2546509Smrj 
25475084Sjohnlev #ifdef __xpv
25485084Sjohnlev 		/*
25495084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
25505084Sjohnlev 		 * the cookies with MFNs instead of PFNs.
25515084Sjohnlev 		 */
25525084Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
25535084Sjohnlev #else
25545084Sjohnlev 		raddr = paddr;
25555084Sjohnlev #endif
25565084Sjohnlev 
2557509Smrj 		/* check to see if this page needs the copy buffer */
25585084Sjohnlev 		if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2559509Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2560509Smrj 
25610Sstevel@tonic-gate 			/*
2562509Smrj 			 * if there is something in the current cookie, go to
2563509Smrj 			 * the next one. We only want one page in a cookie which
2564509Smrj 			 * uses the copybuf since the copybuf doesn't have to
2565509Smrj 			 * be physically contiguous.
2566509Smrj 			 */
2567509Smrj 			if (sgl[cnt].dmac_size != 0) {
2568509Smrj 				cnt++;
2569509Smrj 			}
25705084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2571509Smrj 			sgl[cnt].dmac_size = psize;
2572509Smrj #if defined(__amd64)
2573509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2574509Smrj #else
2575509Smrj 			/*
2576509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2577509Smrj 			 * obsoleted interfaces.
2578509Smrj 			 */
2579509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2580509Smrj 			    (dmar_object->dmao_size - size);
2581509Smrj #endif
2582509Smrj 			/* if this isn't the last cookie, go to the next one */
2583509Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
2584509Smrj 				cnt++;
2585509Smrj 				sgl[cnt].dmac_laddress = 0;
2586509Smrj 				sgl[cnt].dmac_size = 0;
2587509Smrj 				sgl[cnt].dmac_type = 0;
2588509Smrj 			}
2589509Smrj 
2590509Smrj 		/*
2591509Smrj 		 * this page didn't need the copy buffer, if it's not physically
2592509Smrj 		 * contiguous, or it would put us over a segment boundary, or it
2593509Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
2594509Smrj 		 * have anything in it.
2595509Smrj 		 */
25965084Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
25975084Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
2598509Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2599509Smrj 		    (sgl[cnt].dmac_size == 0)) {
2600509Smrj 			/*
2601509Smrj 			 * if we're not already in a new cookie, go to the next
2602509Smrj 			 * cookie.
2603509Smrj 			 */
2604509Smrj 			if (sgl[cnt].dmac_size != 0) {
2605509Smrj 				cnt++;
2606509Smrj 			}
2607509Smrj 
2608509Smrj 			/* save the cookie information */
26095084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2610509Smrj 			sgl[cnt].dmac_size = psize;
2611509Smrj #if defined(__amd64)
2612509Smrj 			sgl[cnt].dmac_type = 0;
2613509Smrj #else
2614509Smrj 			/*
2615509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2616509Smrj 			 * obsoleted interfaces.
2617509Smrj 			 */
2618509Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2619509Smrj #endif
2620509Smrj 
2621509Smrj 		/*
2622509Smrj 		 * this page didn't need the copy buffer, it is physically
2623509Smrj 		 * contiguous with the last page, and it's <= the max cookie
2624509Smrj 		 * size.
2625509Smrj 		 */
2626509Smrj 		} else {
2627509Smrj 			sgl[cnt].dmac_size += psize;
2628509Smrj 
2629509Smrj 			/*
2630509Smrj 			 * if this exactly ==  the maximum cookie size, and
2631509Smrj 			 * it isn't the last cookie, go to the next cookie.
2632509Smrj 			 */
2633509Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2634509Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2635509Smrj 				cnt++;
2636509Smrj 				sgl[cnt].dmac_laddress = 0;
2637509Smrj 				sgl[cnt].dmac_size = 0;
2638509Smrj 				sgl[cnt].dmac_type = 0;
2639509Smrj 			}
2640509Smrj 		}
2641509Smrj 
2642509Smrj 		/*
2643509Smrj 		 * save this page's physical address so we can figure out if the
2644509Smrj 		 * next page is physically contiguous. Keep decrementing size
2645509Smrj 		 * until we are done with the buffer.
2646509Smrj 		 */
26475084Sjohnlev 		last_page = raddr;
2648509Smrj 		size -= psize;
2649509Smrj 	}
2650509Smrj 
2651509Smrj 	/* we're done, save away how many cookies the sgl has */
2652509Smrj 	if (sgl[cnt].dmac_size == 0) {
2653509Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
2654509Smrj 		sglinfo->si_sgl_size = cnt;
2655509Smrj 	} else {
2656509Smrj 		sglinfo->si_sgl_size = cnt + 1;
2657509Smrj 	}
2658509Smrj }
2659509Smrj 
2660509Smrj 
2661509Smrj /*
2662509Smrj  * rootnex_bind_slowpath()
2663509Smrj  *    Call in the bind path if the calling driver can't use the sgl without
2664509Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
2665509Smrj  *    with a partial bind.
2666509Smrj  */
2667509Smrj static int
2668509Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2669509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2670509Smrj {
2671509Smrj 	rootnex_sglinfo_t *sinfo;
2672509Smrj 	rootnex_window_t *window;
2673509Smrj 	ddi_dma_cookie_t *cookie;
2674509Smrj 	size_t copybuf_used;
2675509Smrj 	size_t dmac_size;
2676509Smrj 	boolean_t partial;
2677509Smrj 	off_t cur_offset;
2678509Smrj 	page_t *cur_pp;
2679509Smrj 	major_t mnum;
2680509Smrj 	int e;
2681509Smrj 	int i;
2682509Smrj 
2683509Smrj 
2684509Smrj 	sinfo = &dma->dp_sglinfo;
2685509Smrj 	copybuf_used = 0;
2686509Smrj 	partial = B_FALSE;
2687509Smrj 
2688509Smrj 	/*
2689509Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
2690509Smrj 	 * Needs to be first since it sets the copy buffer size.
2691509Smrj 	 */
2692509Smrj 	if (sinfo->si_copybuf_req != 0) {
2693509Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2694509Smrj 		if (e != DDI_SUCCESS) {
2695509Smrj 			return (e);
2696509Smrj 		}
2697509Smrj 	} else {
2698509Smrj 		dma->dp_copybuf_size = 0;
2699509Smrj 	}
2700509Smrj 
2701509Smrj 	/*
2702509Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
2703509Smrj 	 * if we need to trim the buffers when we munge the sgl.
2704509Smrj 	 */
2705509Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2706509Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2707509Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2708509Smrj 		dma->dp_partial_required = B_TRUE;
2709509Smrj 		if (attr->dma_attr_granular != 1) {
2710509Smrj 			dma->dp_trim_required = B_TRUE;
2711509Smrj 		}
2712509Smrj 	} else {
2713509Smrj 		dma->dp_partial_required = B_FALSE;
2714509Smrj 		dma->dp_trim_required = B_FALSE;
2715509Smrj 	}
2716509Smrj 
2717509Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
2718509Smrj 	if (dma->dp_partial_required &&
2719509Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2720509Smrj 
2721509Smrj 		mnum = ddi_driver_major(dma->dp_dip);
2722509Smrj 		/*
2723509Smrj 		 * patchable which allows us to print one warning per major
2724509Smrj 		 * number.
2725509Smrj 		 */
2726509Smrj 		if ((rootnex_bind_warn) &&
2727509Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2728509Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2729509Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2730509Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2731509Smrj 			    "There is a small risk of data corruption in "
2732509Smrj 			    "particular with large I/Os. The driver should be "
2733509Smrj 			    "replaced with a corrected version for proper "
2734509Smrj 			    "system operation. To disable this warning, add "
2735509Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
2736509Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2737509Smrj 		}
2738509Smrj 		return (DDI_DMA_TOOBIG);
2739509Smrj 	}
2740509Smrj 
2741509Smrj 	/*
2742509Smrj 	 * we might need multiple windows, setup state to handle them. In this
2743509Smrj 	 * code path, we will have at least one window.
2744509Smrj 	 */
2745509Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2746509Smrj 	if (e != DDI_SUCCESS) {
2747509Smrj 		rootnex_teardown_copybuf(dma);
2748509Smrj 		return (e);
2749509Smrj 	}
2750509Smrj 
2751509Smrj 	window = &dma->dp_window[0];
2752509Smrj 	cookie = &dma->dp_cookies[0];
2753509Smrj 	cur_offset = 0;
2754509Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2755509Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2756509Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2757509Smrj 	}
2758509Smrj 
2759509Smrj 	/* loop though all the cookies we got back from get_sgl() */
2760509Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2761509Smrj 		/*
2762509Smrj 		 * If we're using the copy buffer, check this cookie and setup
2763509Smrj 		 * its associated copy buffer state. If this cookie uses the
2764509Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
2765509Smrj 		 */
2766509Smrj 		if (dma->dp_copybuf_size > 0) {
2767509Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2768509Smrj 			    cur_offset, &copybuf_used, &cur_pp);
2769509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2770509Smrj 				window->wd_dosync = B_TRUE;
2771509Smrj 			}
2772509Smrj 		}
2773509Smrj 
2774509Smrj 		/*
2775509Smrj 		 * save away the cookie size, since it could be modified in
2776509Smrj 		 * the windowing code.
2777509Smrj 		 */
2778509Smrj 		dmac_size = cookie->dmac_size;
2779509Smrj 
2780509Smrj 		/* if we went over max copybuf size */
2781509Smrj 		if (dma->dp_copybuf_size &&
2782509Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
2783509Smrj 			partial = B_TRUE;
2784509Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2785509Smrj 			    cookie, cur_offset, &copybuf_used);
2786509Smrj 			if (e != DDI_SUCCESS) {
2787509Smrj 				rootnex_teardown_copybuf(dma);
2788509Smrj 				rootnex_teardown_windows(dma);
2789509Smrj 				return (e);
2790509Smrj 			}
2791509Smrj 
2792509Smrj 			/*
2793509Smrj 			 * if the coookie uses the copy buffer, make sure the
2794509Smrj 			 * new window we just moved to is set to sync.
2795509Smrj 			 */
2796509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2797509Smrj 				window->wd_dosync = B_TRUE;
2798509Smrj 			}
2799509Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2800509Smrj 			    dma->dp_dip);
2801509Smrj 
2802509Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
2803509Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2804509Smrj 			partial = B_TRUE;
2805509Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2806509Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2807509Smrj 			    cookie, attr, cur_offset);
2808509Smrj 			if (e != DDI_SUCCESS) {
2809509Smrj 				rootnex_teardown_copybuf(dma);
2810509Smrj 				rootnex_teardown_windows(dma);
2811509Smrj 				return (e);
2812509Smrj 			}
2813509Smrj 
2814509Smrj 			/*
2815509Smrj 			 * if the coookie uses the copy buffer, make sure the
2816509Smrj 			 * new window we just moved to is set to sync.
2817509Smrj 			 */
2818509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2819509Smrj 				window->wd_dosync = B_TRUE;
2820509Smrj 			}
2821509Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2822509Smrj 			    dma->dp_dip);
2823509Smrj 
2824509Smrj 		/* else if we will be over maxxfer */
2825509Smrj 		} else if ((window->wd_size + dmac_size) >
2826509Smrj 		    dma->dp_maxxfer) {
2827509Smrj 			partial = B_TRUE;
2828509Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2829509Smrj 			    cookie);
2830509Smrj 			if (e != DDI_SUCCESS) {
2831509Smrj 				rootnex_teardown_copybuf(dma);
2832509Smrj 				rootnex_teardown_windows(dma);
2833509Smrj 				return (e);
2834509Smrj 			}
2835509Smrj 
2836509Smrj 			/*
2837509Smrj 			 * if the coookie uses the copy buffer, make sure the
2838509Smrj 			 * new window we just moved to is set to sync.
28390Sstevel@tonic-gate 			 */
2840509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2841509Smrj 				window->wd_dosync = B_TRUE;
2842509Smrj 			}
2843509Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2844509Smrj 			    dma->dp_dip);
2845509Smrj 
2846509Smrj 		/* else this cookie fits in the current window */
2847509Smrj 		} else {
2848509Smrj 			window->wd_cookie_cnt++;
2849509Smrj 			window->wd_size += dmac_size;
2850509Smrj 		}
2851509Smrj 
2852509Smrj 		/* track our offset into the buffer, go to the next cookie */
2853509Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2854509Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
2855509Smrj 		cur_offset += dmac_size;
2856509Smrj 		cookie++;
2857509Smrj 	}
2858509Smrj 
2859509Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
2860509Smrj 	if (window->wd_size == 0) {
2861509Smrj 		hp->dmai_nwin--;
2862509Smrj 		window--;
2863509Smrj 	}
2864509Smrj 
2865509Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
2866509Smrj 
2867509Smrj 	if (!partial) {
2868509Smrj 		return (DDI_DMA_MAPPED);
2869509Smrj 	}
2870509Smrj 
2871509Smrj 	ASSERT(dma->dp_partial_required);
2872509Smrj 	return (DDI_DMA_PARTIAL_MAP);
2873509Smrj }
2874509Smrj 
2875509Smrj 
2876509Smrj /*
2877509Smrj  * rootnex_setup_copybuf()
2878509Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
2879509Smrj  *    buffer, and if we do, sets up the basic state to handle it.
2880509Smrj  */
2881509Smrj static int
2882509Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2883509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
2884509Smrj {
2885509Smrj 	rootnex_sglinfo_t *sinfo;
2886509Smrj 	ddi_dma_attr_t lattr;
2887509Smrj 	size_t max_copybuf;
2888509Smrj 	int cansleep;
2889509Smrj 	int e;
2890509Smrj #if !defined(__amd64)
2891509Smrj 	int vmflag;
2892509Smrj #endif
2893509Smrj 
2894509Smrj 
2895509Smrj 	sinfo = &dma->dp_sglinfo;
2896509Smrj 
28975251Smrj 	/* read this first so it's consistent through the routine  */
28985251Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
2899509Smrj 
2900509Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
2901509Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
2902509Smrj 
2903509Smrj 	/* make sure the copybuf size <= the max size */
2904509Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
2905509Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
2906509Smrj 
2907509Smrj #if !defined(__amd64)
2908509Smrj 	/*
2909509Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
2910509Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
2911509Smrj 	 * the 64-bit kernel.
2912509Smrj 	 */
2913509Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
2914509Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
2915509Smrj 
2916509Smrj 		/* convert the sleep flags */
2917509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2918509Smrj 			vmflag = VM_SLEEP;
2919509Smrj 		} else {
2920509Smrj 			vmflag = VM_NOSLEEP;
2921509Smrj 		}
2922509Smrj 
2923509Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
2924509Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
2925509Smrj 		    vmflag);
2926509Smrj 		if (dma->dp_kva == NULL) {
2927509Smrj 			return (DDI_DMA_NORESOURCES);
2928509Smrj 		}
2929509Smrj 	}
2930509Smrj #endif
2931509Smrj 
2932509Smrj 	/* convert the sleep flags */
2933509Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2934509Smrj 		cansleep = 1;
2935509Smrj 	} else {
2936509Smrj 		cansleep = 0;
2937509Smrj 	}
2938509Smrj 
2939509Smrj 	/*
29407173Smrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
29417173Smrj 	 * engine limits, so we can't use kmem_alloc... We don't need
29427173Smrj 	 * contiguous memory (sgllen) since we will be forcing windows on
29437173Smrj 	 * sgllen anyway.
2944509Smrj 	 */
2945509Smrj 	lattr = *attr;
2946509Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
29477173Smrj 	/*
29487173Smrj 	 * this should be < 0 to indicate no limit, but due to a bug in
29497173Smrj 	 * the rootnex, we'll set it to the maximum positive int.
29507173Smrj 	 */
29517173Smrj 	lattr.dma_attr_sgllen = 0x7fffffff;
2952509Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
2953509Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
2954509Smrj 	if (e != DDI_SUCCESS) {
2955509Smrj #if !defined(__amd64)
2956509Smrj 		if (dma->dp_kva != NULL) {
2957509Smrj 			vmem_free(heap_arena, dma->dp_kva,
2958509Smrj 			    dma->dp_copybuf_size);
2959509Smrj 		}
2960509Smrj #endif
2961509Smrj 		return (DDI_DMA_NORESOURCES);
2962509Smrj 	}
2963509Smrj 
2964509Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
2965509Smrj 	    size_t, dma->dp_copybuf_size);
2966509Smrj 
2967509Smrj 	return (DDI_SUCCESS);
2968509Smrj }
2969509Smrj 
2970509Smrj 
2971509Smrj /*
2972509Smrj  * rootnex_setup_windows()
2973509Smrj  *    Called in bind slowpath to setup the window state. We always have windows
2974509Smrj  *    in the slowpath. Even if the window count = 1.
2975509Smrj  */
2976509Smrj static int
2977509Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
2978509Smrj     ddi_dma_attr_t *attr, int kmflag)
2979509Smrj {
2980509Smrj 	rootnex_window_t *windowp;
2981509Smrj 	rootnex_sglinfo_t *sinfo;
2982509Smrj 	size_t copy_state_size;
2983509Smrj 	size_t win_state_size;
2984509Smrj 	size_t state_available;
2985509Smrj 	size_t space_needed;
2986509Smrj 	uint_t copybuf_win;
2987509Smrj 	uint_t maxxfer_win;
2988509Smrj 	size_t space_used;
2989509Smrj 	uint_t sglwin;
2990509Smrj 
2991509Smrj 
2992509Smrj 	sinfo = &dma->dp_sglinfo;
2993509Smrj 
2994509Smrj 	dma->dp_current_win = 0;
2995509Smrj 	hp->dmai_nwin = 0;
2996509Smrj 
2997509Smrj 	/* If we don't need to do a partial, we only have one window */
2998509Smrj 	if (!dma->dp_partial_required) {
2999509Smrj 		dma->dp_max_win = 1;
3000509Smrj 
3001509Smrj 	/*
3002509Smrj 	 * we need multiple windows, need to figure out the worse case number
3003509Smrj 	 * of windows.
3004509Smrj 	 */
3005509Smrj 	} else {
3006509Smrj 		/*
3007509Smrj 		 * if we need windows because we need more copy buffer that
3008509Smrj 		 * we allow, the worse case number of windows we could need
3009509Smrj 		 * here would be (copybuf space required / copybuf space that
3010509Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
3011509Smrj 		 * extra pages on the trim for the first and last pages of the
3012509Smrj 		 * buffer (a page is the minimum window size so under the right
3013509Smrj 		 * attr settings, you could have a window for each page).
3014509Smrj 		 * The last page will only be hit here if the size is not a
3015509Smrj 		 * multiple of the granularity (which theoretically shouldn't
3016509Smrj 		 * be the case but never has been enforced, so we could have
3017509Smrj 		 * broken things without it).
3018509Smrj 		 */
3019509Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3020509Smrj 			ASSERT(dma->dp_copybuf_size > 0);
3021509Smrj 			copybuf_win = (sinfo->si_copybuf_req /
3022509Smrj 			    dma->dp_copybuf_size) + 1 + 2;
3023509Smrj 		} else {
3024509Smrj 			copybuf_win = 0;
3025509Smrj 		}
3026509Smrj 
3027509Smrj 		/*
3028509Smrj 		 * if we need windows because we have more cookies than the H/W
3029509Smrj 		 * can handle, the number of windows we would need here would
3030509Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
3031509Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
3032509Smrj 		 * (see above comment about trim)
3033509Smrj 		 */
3034509Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
3035509Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
3036509Smrj 			    + 1) + 2;
3037509Smrj 		} else {
3038509Smrj 			sglwin = 0;
3039509Smrj 		}
3040509Smrj 
3041509Smrj 		/*
3042509Smrj 		 * if we need windows because we're binding more memory than the
3043509Smrj 		 * H/W can transfer at once, the number of windows we would need
3044509Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
3045509Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
3046509Smrj 		 * trim (see above comment about trim)
3047509Smrj 		 */
3048509Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
3049509Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
3050509Smrj 			    dma->dp_maxxfer) + 1 + 2;
3051509Smrj 		} else {
3052509Smrj 			maxxfer_win = 0;
3053509Smrj 		}
3054509Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3055509Smrj 		ASSERT(dma->dp_max_win > 0);
3056509Smrj 	}
3057509Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3058509Smrj 
3059509Smrj 	/*
3060509Smrj 	 * Get space for window and potential copy buffer state. Before we
3061509Smrj 	 * go and allocate memory, see if we can get away with using what's
3062509Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
3063509Smrj 	 */
3064509Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3065509Smrj 	    sizeof (ddi_dma_cookie_t));
3066509Smrj 
3067509Smrj 	/* if we dynamically allocated space for the cookies */
3068509Smrj 	if (dma->dp_need_to_free_cookie) {
3069509Smrj 		/* if we have more space in the pre-allocted buffer, use it */
3070509Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
3071509Smrj 		if ((dma->dp_cookie_size - space_used) <=
3072509Smrj 		    rootnex_state->r_prealloc_size) {
3073509Smrj 			state_available = rootnex_state->r_prealloc_size;
3074509Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3075509Smrj 
3076509Smrj 		/*
3077509Smrj 		 * else, we have more free space in the dynamically allocated
3078509Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3079509Smrj 		 * didn't need a lot of cookies.
3080509Smrj 		 */
3081509Smrj 		} else {
3082509Smrj 			state_available = dma->dp_cookie_size - space_used;
3083509Smrj 			windowp = (rootnex_window_t *)
3084509Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
3085509Smrj 		}
3086509Smrj 
3087509Smrj 	/* we used the pre-alloced buffer */
3088509Smrj 	} else {
3089509Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3090509Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
3091509Smrj 		windowp = (rootnex_window_t *)
3092509Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
3093509Smrj 	}
3094509Smrj 
3095509Smrj 	/*
3096509Smrj 	 * figure out how much state we need to track the copy buffer. Add an
3097509Smrj 	 * addition 8 bytes for pointer alignemnt later.
3098509Smrj 	 */
3099509Smrj 	if (dma->dp_copybuf_size > 0) {
3100509Smrj 		copy_state_size = sinfo->si_max_pages *
3101509Smrj 		    sizeof (rootnex_pgmap_t);
3102509Smrj 	} else {
3103509Smrj 		copy_state_size = 0;
3104509Smrj 	}
3105509Smrj 	/* add an additional 8 bytes for pointer alignment */
3106509Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
3107509Smrj 
3108509Smrj 	/* if we have enough space already, use it */
3109509Smrj 	if (state_available >= space_needed) {
3110509Smrj 		dma->dp_window = windowp;
3111509Smrj 		dma->dp_need_to_free_window = B_FALSE;
3112509Smrj 
3113509Smrj 	/* not enough space, need to allocate more. */
3114509Smrj 	} else {
3115509Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3116509Smrj 		if (dma->dp_window == NULL) {
3117509Smrj 			return (DDI_DMA_NORESOURCES);
3118509Smrj 		}
3119509Smrj 		dma->dp_need_to_free_window = B_TRUE;
3120509Smrj 		dma->dp_window_size = space_needed;
3121509Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3122509Smrj 		    dma->dp_dip, size_t, space_needed);
3123509Smrj 	}
3124509Smrj 
3125509Smrj 	/*
3126509Smrj 	 * we allocate copy buffer state and window state at the same time.
3127509Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
3128509Smrj 	 */
3129509Smrj 	if (dma->dp_copybuf_size > 0) {
3130509Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3131509Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3132509Smrj 
3133509Smrj #if !defined(__amd64)
3134509Smrj 		/*
3135509Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3136509Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
3137509Smrj 		 */
3138509Smrj 		bzero(dma->dp_pgmap, copy_state_size);
3139509Smrj #endif
3140509Smrj 	} else {
3141509Smrj 		dma->dp_pgmap = NULL;
3142509Smrj 	}
3143509Smrj 
3144509Smrj 	return (DDI_SUCCESS);
3145509Smrj }
3146509Smrj 
3147509Smrj 
3148509Smrj /*
3149509Smrj  * rootnex_teardown_copybuf()
3150509Smrj  *    cleans up after rootnex_setup_copybuf()
3151509Smrj  */
3152509Smrj static void
3153509Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
3154509Smrj {
3155509Smrj #if !defined(__amd64)
3156509Smrj 	int i;
3157509Smrj 
3158509Smrj 	/*
3159509Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
3160509Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3161509Smrj 	 * arena. Then free the VMEM space.
3162509Smrj 	 */
3163509Smrj 	if (dma->dp_kva != NULL) {
3164509Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3165509Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
3166509Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3167509Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
3168509Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3169509Smrj 			}
3170509Smrj 		}
3171509Smrj 
3172509Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3173509Smrj 	}
3174509Smrj 
3175509Smrj #endif
3176509Smrj 
3177509Smrj 	/* if we allocated a copy buffer, free it */
3178509Smrj 	if (dma->dp_cbaddr != NULL) {
31791900Seota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
3180509Smrj 	}
3181509Smrj }
3182509Smrj 
3183509Smrj 
3184509Smrj /*
3185509Smrj  * rootnex_teardown_windows()
3186509Smrj  *    cleans up after rootnex_setup_windows()
3187509Smrj  */
3188509Smrj static void
3189509Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
3190509Smrj {
3191509Smrj 	/*
3192509Smrj 	 * if we had to allocate window state on the last bind (because we
3193509Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
3194509Smrj 	 */
3195509Smrj 	if (dma->dp_need_to_free_window) {
3196509Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
3197509Smrj 	}
3198509Smrj }
3199509Smrj 
3200509Smrj 
3201509Smrj /*
3202509Smrj  * rootnex_init_win()
3203509Smrj  *    Called in bind slow path during creation of a new window. Initializes
3204509Smrj  *    window state to default values.
3205509Smrj  */
3206509Smrj /*ARGSUSED*/
3207509Smrj static void
3208509Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3209509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3210509Smrj {
3211509Smrj 	hp->dmai_nwin++;
3212509Smrj 	window->wd_dosync = B_FALSE;
3213509Smrj 	window->wd_offset = cur_offset;
3214509Smrj 	window->wd_size = 0;
3215509Smrj 	window->wd_first_cookie = cookie;
3216509Smrj 	window->wd_cookie_cnt = 0;
3217509Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
3218509Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
3219509Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3220509Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3221509Smrj #if !defined(__amd64)
3222509Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3223509Smrj #endif
3224509Smrj }
3225509Smrj 
3226509Smrj 
3227509Smrj /*
3228509Smrj  * rootnex_setup_cookie()
3229509Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3230509Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3231509Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
3232509Smrj  *    need during sync.
3233509Smrj  */
3234509Smrj static void
3235509Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3236509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3237509Smrj     page_t **cur_pp)
3238509Smrj {
3239509Smrj 	boolean_t copybuf_sz_power_2;
3240509Smrj 	rootnex_sglinfo_t *sinfo;
32415084Sjohnlev 	paddr_t paddr;
3242509Smrj 	uint_t pidx;
3243509Smrj 	uint_t pcnt;
3244509Smrj 	off_t poff;
3245509Smrj #if defined(__amd64)
3246509Smrj 	pfn_t pfn;
3247509Smrj #else
3248509Smrj 	page_t **pplist;
3249509Smrj #endif
3250509Smrj 
3251509Smrj 	sinfo = &dma->dp_sglinfo;
3252509Smrj 
3253509Smrj 	/*
3254509Smrj 	 * Calculate the page index relative to the start of the buffer. The
3255509Smrj 	 * index to the current page for our buffer is the offset into the
3256509Smrj 	 * first page of the buffer plus our current offset into the buffer
3257509Smrj 	 * itself, shifted of course...
3258509Smrj 	 */
3259509Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3260509Smrj 	ASSERT(pidx < sinfo->si_max_pages);
3261509Smrj 
3262509Smrj 	/* if this cookie uses the copy buffer */
3263509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3264509Smrj 		/*
3265509Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
3266509Smrj 		 * is <= MMU_PAGESIZE.
3267509Smrj 		 */
3268509Smrj 
3269509Smrj 		/*
3270509Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
3271509Smrj 		 * pfn which we'll use with seg kpm.
3272509Smrj 		 */
32735084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3274509Smrj #if defined(__amd64)
32755084Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
32765084Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
32775084Sjohnlev #endif /* __amd64 */
3278509Smrj 
3279509Smrj 		/* figure out if the copybuf size is a power of 2 */
3280509Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3281509Smrj 			copybuf_sz_power_2 = B_FALSE;
3282509Smrj 		} else {
3283509Smrj 			copybuf_sz_power_2 = B_TRUE;
3284509Smrj 		}
3285509Smrj 
3286509Smrj 		/* This page uses the copy buffer */
3287509Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3288509Smrj 
3289509Smrj 		/*
3290509Smrj 		 * save the copy buffer KVA that we'll use with this page.
3291509Smrj 		 * if we still fit within the copybuf, it's a simple add.
3292509Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
3293509Smrj 		 */
3294509Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3295509Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3296509Smrj 			    *copybuf_used;
3297509Smrj 		} else {
3298509Smrj 			if (copybuf_sz_power_2) {
3299509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3300509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3301509Smrj 				    (*copybuf_used &
3302509Smrj 				    (dma->dp_copybuf_size - 1)));
33030Sstevel@tonic-gate 			} else {
3304509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3305509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3306509Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
33070Sstevel@tonic-gate 			}
3308509Smrj 		}
3309509Smrj 
3310509Smrj 		/*
3311509Smrj 		 * over write the cookie physical address with the address of
3312509Smrj 		 * the physical address of the copy buffer page that we will
3313509Smrj 		 * use.
3314509Smrj 		 */
33155084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3316509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3317509Smrj 
33185084Sjohnlev #ifdef __xpv
33195084Sjohnlev 		/*
33205084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
33215084Sjohnlev 		 * the cookies with MAs instead of PAs.
33225084Sjohnlev 		 */
33235084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
33245084Sjohnlev #else
33255084Sjohnlev 		cookie->dmac_laddress = paddr;
33265084Sjohnlev #endif
33275084Sjohnlev 
3328509Smrj 		/* if we have a kernel VA, it's easy, just save that address */
3329509Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3330509Smrj 		    (sinfo->si_asp == &kas)) {
3331509Smrj 			/*
3332509Smrj 			 * save away the page aligned virtual address of the
3333509Smrj 			 * driver buffer. Offsets are handled in the sync code.
3334509Smrj 			 */
3335509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3336509Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3337509Smrj 			    & MMU_PAGEMASK);
3338509Smrj #if !defined(__amd64)
3339509Smrj 			/*
3340509Smrj 			 * we didn't need to, and will never need to map this
3341509Smrj 			 * page.
3342509Smrj 			 */
3343509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3344509Smrj #endif
3345509Smrj 
3346509Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
3347509Smrj 		} else {
3348509Smrj #if defined(__amd64)
3349509Smrj 			/*
3350509Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3351509Smrj 			 * get a Kernel VA for the corresponding pfn.
3352509Smrj 			 */
3353509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3354509Smrj #else
3355509Smrj 			/*
3356509Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
3357509Smrj 			 * save away the page_t or user VA for this page. This
3358509Smrj 			 * is needed in rootnex_dma_win() when we switch to a
3359509Smrj 			 * new window which requires us to re-map the copy
3360509Smrj 			 * buffer.
3361509Smrj 			 */
3362509Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3363509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3364509Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3365509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3366509Smrj 			} else if (pplist != NULL) {
3367509Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3368509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3369509Smrj 			} else {
3370509Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
3371509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3372509Smrj 				    (((uintptr_t)
3373509Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
3374509Smrj 				    cur_offset) & MMU_PAGEMASK);
3375509Smrj 			}
3376509Smrj 
3377509Smrj 			/*
3378509Smrj 			 * save away the page aligned virtual address which was
3379509Smrj 			 * allocated from the kernel heap arena (taking into
3380509Smrj 			 * account if we need more copy buffer than we alloced
3381509Smrj 			 * and use multiple windows to handle this, i.e. &,%).
3382509Smrj 			 * NOTE: there isn't and physical memory backing up this
3383509Smrj 			 * virtual address space currently.
3384509Smrj 			 */
3385509Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
3386509Smrj 			    dma->dp_copybuf_size) {
3387509Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3388509Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3389509Smrj 				    MMU_PAGEMASK);
3390509Smrj 			} else {
3391509Smrj 				if (copybuf_sz_power_2) {
3392509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3393509Smrj 					    (((uintptr_t)dma->dp_kva +
3394509Smrj 					    (*copybuf_used &
3395509Smrj 					    (dma->dp_copybuf_size - 1))) &
3396509Smrj 					    MMU_PAGEMASK);
3397509Smrj 				} else {
3398509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3399509Smrj 					    (((uintptr_t)dma->dp_kva +
3400509Smrj 					    (*copybuf_used %
3401509Smrj 					    dma->dp_copybuf_size)) &
3402509Smrj 					    MMU_PAGEMASK);
3403509Smrj 				}
3404509Smrj 			}
3405509Smrj 
3406509Smrj 			/*
3407509Smrj 			 * if we haven't used up the available copy buffer yet,
3408509Smrj 			 * map the kva to the physical page.
3409509Smrj 			 */
3410509Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3411509Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3412509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3413509Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3414509Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3415509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3416509Smrj 				} else {
3417509Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3418509Smrj 					    sinfo->si_asp,
3419509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3420509Smrj 				}
3421509Smrj 
3422509Smrj 			/*
3423509Smrj 			 * we've used up the available copy buffer, this page
3424509Smrj 			 * will have to be mapped during rootnex_dma_win() when
3425509Smrj 			 * we switch to a new window which requires a re-map
3426509Smrj 			 * the copy buffer. (32-bit kernel only)
3427509Smrj 			 */
3428509Smrj 			} else {
3429509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3430509Smrj 			}
3431509Smrj #endif
3432509Smrj 			/* go to the next page_t */
3433509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3434509Smrj 				*cur_pp = (*cur_pp)->p_next;
3435509Smrj 			}
34360Sstevel@tonic-gate 		}
3437509Smrj 
3438509Smrj 		/* add to the copy buffer count */
3439509Smrj 		*copybuf_used += MMU_PAGESIZE;
3440509Smrj 
3441509Smrj 	/*
3442509Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3443509Smrj 	 * cookie occupies to reflect this.
3444509Smrj 	 */
3445509Smrj 	} else {
3446509Smrj 		/*
3447509Smrj 		 * figure out how many pages the cookie occupies. We need to
3448509Smrj 		 * use the original page offset of the buffer and the cookies
3449509Smrj 		 * offset in the buffer to do this.
3450509Smrj 		 */
3451509Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3452509Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3453509Smrj 
3454509Smrj 		while (pcnt > 0) {
3455509Smrj #if !defined(__amd64)
3456509Smrj 			/*
3457509Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
3458509Smrj 			 * to map in the driver buffer (if it didn't come down
3459509Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
3460509Smrj 			 * use the copy buffer, it's not, or will it ever, have
3461509Smrj 			 * to be mapped in.
3462509Smrj 			 */
3463509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3464509Smrj #endif
3465509Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3466509Smrj 
3467509Smrj 			/*
3468509Smrj 			 * we need to update pidx and cur_pp or we'll loose
3469509Smrj 			 * track of where we are.
3470509Smrj 			 */
3471509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3472509Smrj 				*cur_pp = (*cur_pp)->p_next;
3473509Smrj 			}
3474509Smrj 			pidx++;
3475509Smrj 			pcnt--;
3476509Smrj 		}
3477509Smrj 	}
3478509Smrj }
3479509Smrj 
3480509Smrj 
3481509Smrj /*
3482509Smrj  * rootnex_sgllen_window_boundary()
3483509Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
3484509Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3485509Smrj  *    length supported by the DMA H/W.
3486509Smrj  */
3487509Smrj static int
3488509Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3489509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3490509Smrj     off_t cur_offset)
3491509Smrj {
3492509Smrj 	off_t new_offset;
3493509Smrj 	size_t trim_sz;
3494509Smrj 	off_t coffset;
3495509Smrj 
3496509Smrj 
3497509Smrj 	/*
3498509Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3499509Smrj 	 * the next window and init it. We're done.
3500509Smrj 	 */
3501509Smrj 	if (!dma->dp_trim_required) {
3502509Smrj 		(*windowp)++;
3503509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3504509Smrj 		(*windowp)->wd_cookie_cnt++;
3505509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3506509Smrj 		return (DDI_SUCCESS);
3507509Smrj 	}
3508509Smrj 
3509509Smrj 	/* figure out how much we need to trim from the window */
3510509Smrj 	ASSERT(attr->dma_attr_granular != 0);
3511509Smrj 	if (dma->dp_granularity_power_2) {
3512509Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3513509Smrj 	} else {
3514509Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3515509Smrj 	}
3516509Smrj 
3517509Smrj 	/* The window's a whole multiple of granularity. We're done */
3518509Smrj 	if (trim_sz == 0) {
3519509Smrj 		(*windowp)++;
3520509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3521509Smrj 		(*windowp)->wd_cookie_cnt++;
3522509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3523509Smrj 		return (DDI_SUCCESS);
3524509Smrj 	}
3525509Smrj 
3526509Smrj 	/*
3527509Smrj 	 * The window's not a whole multiple of granularity, since we know this
3528509Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
3529509Smrj 	 * that one, add the left over part of the old cookie into the new
3530509Smrj 	 * window, and then add in the new cookie into the new window.
3531509Smrj 	 */
3532509Smrj 
3533509Smrj 	/*
3534509Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
3535509Smrj 	 * sgllen == 1 don't go together.
3536509Smrj 	 */
3537509Smrj 	if (attr->dma_attr_sgllen == 1) {
3538509Smrj 		return (DDI_DMA_NOMAPPING);
3539509Smrj 	}
3540509Smrj 
3541509Smrj 	/*
3542509Smrj 	 * first, setup the current window to account for the trim. Need to go
3543509Smrj 	 * back to the last cookie for this.
3544509Smrj 	 */
3545509Smrj 	cookie--;
3546509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3547509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
35485084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3549509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3550509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3551509Smrj 	(*windowp)->wd_size -= trim_sz;
3552509Smrj 
3553509Smrj 	/* save the buffer offsets for the next window */
3554509Smrj 	coffset = cookie->dmac_size - trim_sz;
3555509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3556509Smrj 
3557509Smrj 	/*
3558509Smrj 	 * set this now in case this is the first window. all other cases are
3559509Smrj 	 * set in dma_win()
3560509Smrj 	 */
3561509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3562509Smrj 
3563509Smrj 	/*
3564509Smrj 	 * initialize the next window using what's left over in the previous
3565509Smrj 	 * cookie.
3566509Smrj 	 */
3567509Smrj 	(*windowp)++;
3568509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3569509Smrj 	(*windowp)->wd_cookie_cnt++;
3570509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
35715084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3572509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3573509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3574509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3575509Smrj 	}
3576509Smrj 
3577509Smrj 	/*
3578509Smrj 	 * now go back to the current cookie and add it to the new window. set
3579509Smrj 	 * the new window size to the what was left over from the previous
3580509Smrj 	 * cookie and what's in the current cookie.
3581509Smrj 	 */
3582509Smrj 	cookie++;
3583509Smrj 	(*windowp)->wd_cookie_cnt++;
3584509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3585509Smrj 
3586509Smrj 	/*
3587509Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3588509Smrj 	 * a max size of maxxfer). Handle that case.
3589509Smrj 	 */
3590509Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3591509Smrj 		/*
3592509Smrj 		 * maxxfer is already a whole multiple of granularity, and this
3593509Smrj 		 * trim will be <= the previous trim (since a cookie can't be
3594509Smrj 		 * larger than maxxfer). Make things simple here.
3595509Smrj 		 */
3596509Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3597509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3598509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
35995084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3600509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3601509Smrj 		(*windowp)->wd_size -= trim_sz;
3602509Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3603509Smrj 
3604509Smrj 		/* save the buffer offsets for the next window */
3605509Smrj 		coffset = cookie->dmac_size - trim_sz;
3606509Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3607509Smrj 
3608509Smrj 		/* setup the next window */
3609509Smrj 		(*windowp)++;
3610509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3611509Smrj 		(*windowp)->wd_cookie_cnt++;
3612509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
36135084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
3614509Smrj 		    coffset;
3615509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3616509Smrj 	}
3617509Smrj 
3618509Smrj 	return (DDI_SUCCESS);
3619509Smrj }
3620509Smrj 
3621509Smrj 
3622509Smrj /*
3623509Smrj  * rootnex_copybuf_window_boundary()
3624509Smrj  *    Called in bind slowpath when we get to a window boundary because we used
3625509Smrj  *    up all the copy buffer that we have.
3626509Smrj  */
3627509Smrj static int
3628509Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3629509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3630509Smrj     size_t *copybuf_used)
3631509Smrj {
3632509Smrj 	rootnex_sglinfo_t *sinfo;
3633509Smrj 	off_t new_offset;
3634509Smrj 	size_t trim_sz;
36355084Sjohnlev 	paddr_t paddr;
3636509Smrj 	off_t coffset;
3637509Smrj 	uint_t pidx;
3638509Smrj 	off_t poff;
3639509Smrj 
3640509Smrj 
3641509Smrj 	sinfo = &dma->dp_sglinfo;
3642509Smrj 
3643509Smrj 	/*
3644509Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
3645509Smrj 	 * this cookie is <= MMU_PAGESIZE.
3646509Smrj 	 */
3647509Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3648509Smrj 
3649509Smrj 	/*
3650509Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
3651509Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3652509Smrj 	 * space...
3653509Smrj 	 */
3654509Smrj #if !defined(__amd64)
3655509Smrj 	dma->dp_cb_remaping = B_TRUE;
3656509Smrj #endif
3657509Smrj 
3658509Smrj 	/* reset copybuf used */
3659509Smrj 	*copybuf_used = 0;
3660509Smrj 
3661509Smrj 	/*
3662509Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
3663509Smrj 	 * next window and add the current cookie to it. We know the current
3664509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3665509Smrj 	 */
3666509Smrj 	if (!dma->dp_trim_required) {
3667509Smrj 		(*windowp)++;
3668509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3669509Smrj 
3670509Smrj 		/* Add this cookie to the new window */
3671509Smrj 		(*windowp)->wd_cookie_cnt++;
3672509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3673509Smrj 		*copybuf_used += MMU_PAGESIZE;
3674509Smrj 		return (DDI_SUCCESS);
3675509Smrj 	}
3676509Smrj 
3677509Smrj 	/*
3678509Smrj 	 * *** may need to trim, figure it out.
3679509Smrj 	 */
3680509Smrj 
3681509Smrj 	/* figure out how much we need to trim from the window */
3682509Smrj 	if (dma->dp_granularity_power_2) {
3683509Smrj 		trim_sz = (*windowp)->wd_size &
3684509Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
3685509Smrj 	} else {
3686509Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3687509Smrj 	}
3688509Smrj 
3689509Smrj 	/*
3690509Smrj 	 * if the window's a whole multiple of granularity, go to the next
3691509Smrj 	 * window, init it, then add in the current cookie. We know the current
3692509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3693509Smrj 	 */
3694509Smrj 	if (trim_sz == 0) {
3695509Smrj 		(*windowp)++;
3696509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3697509Smrj 
3698509Smrj 		/* Add this cookie to the new window */
3699509Smrj 		(*windowp)->wd_cookie_cnt++;
3700509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3701509Smrj 		*copybuf_used += MMU_PAGESIZE;
3702509Smrj 		return (DDI_SUCCESS);
3703509Smrj 	}
3704509Smrj 
3705509Smrj 	/*
3706509Smrj 	 * *** We figured it out, we definitly need to trim
3707509Smrj 	 */
3708509Smrj 
3709509Smrj 	/*
3710509Smrj 	 * make sure the driver isn't making us do something bad...
3711509Smrj 	 * Trimming and sgllen == 1 don't go together.
3712509Smrj 	 */
3713509Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3714509Smrj 		return (DDI_DMA_NOMAPPING);
3715509Smrj 	}
3716509Smrj 
3717509Smrj 	/*
3718509Smrj 	 * first, setup the current window to account for the trim. Need to go
3719509Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
3720509Smrj 	 * the current window, and some of the last cookie will be in the new
3721509Smrj 	 * window. All of the current cookie will be in the new window.
3722509Smrj 	 */
3723509Smrj 	cookie--;
3724509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3725509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
37265084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3727509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3728509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3729509Smrj 	(*windowp)->wd_size -= trim_sz;
3730509Smrj 
3731509Smrj 	/*
3732509Smrj 	 * we're trimming the last cookie (not the current cookie). So that
3733509Smrj 	 * last cookie may have or may not have been using the copy buffer (
3734509Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
3735509Smrj 	 * this code path).
3736509Smrj 	 *
3737509Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
3738509Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
3739509Smrj 	 * last page in the current window and the first page in the next
3740509Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
3741509Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3742509Smrj 	 * current window, and the start of the copy buffer in the next window.
3743509Smrj 	 * Track that info... The cookie physical address was already set to
3744509Smrj 	 * the copy buffer physical address in setup_cookie..
3745509Smrj 	 */
3746509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3747509Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3748509Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3749509Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3750509Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3751509Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
3752509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
3753509Smrj #if !defined(__amd64)
3754509Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
3755509Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
3756509Smrj #endif
3757509Smrj 	}
3758509Smrj 
3759509Smrj 	/* save the buffer offsets for the next window */
3760509Smrj 	coffset = cookie->dmac_size - trim_sz;
3761509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3762509Smrj 
3763509Smrj 	/*
3764509Smrj 	 * set this now in case this is the first window. all other cases are
3765509Smrj 	 * set in dma_win()
3766509Smrj 	 */
3767509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3768509Smrj 
3769509Smrj 	/*
3770509Smrj 	 * initialize the next window using what's left over in the previous
3771509Smrj 	 * cookie.
3772509Smrj 	 */
3773509Smrj 	(*windowp)++;
3774509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3775509Smrj 	(*windowp)->wd_cookie_cnt++;
3776509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
37775084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3778509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3779509Smrj 
3780509Smrj 	/*
3781509Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
3782509Smrj 	 * read the comment above for more info on why we need to track
3783509Smrj 	 * additional state.
3784509Smrj 	 *
3785509Smrj 	 * For the first cookie in the new window, we need reset the physical
3786509Smrj 	 * address to DMA into to the start of the copy buffer plus any
3787509Smrj 	 * initial page offset which may be present.
3788509Smrj 	 */
3789509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3790509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3791509Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3792509Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3793509Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3794509Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
37955084Sjohnlev 
37965084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
37975084Sjohnlev 		    poff;
37985084Sjohnlev #ifdef __xpv
37995084Sjohnlev 		/*
38005084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
38015084Sjohnlev 		 * the cookies with MAs instead of PAs.
38025084Sjohnlev 		 */
38035084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
38045084Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
38055084Sjohnlev #else
38065084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
38075084Sjohnlev #endif
38085084Sjohnlev 
3809509Smrj #if !defined(__amd64)
3810509Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3811509Smrj #endif
3812509Smrj 		/* account for the cookie copybuf usage in the new window */
3813509Smrj 		*copybuf_used += MMU_PAGESIZE;
3814509Smrj 
3815509Smrj 		/*
3816509Smrj 		 * every piece of code has to have a hack, and here is this
3817509Smrj 		 * ones :-)
3818509Smrj 		 *
3819509Smrj 		 * There is a complex interaction between setup_cookie and the
3820509Smrj 		 * copybuf window boundary. The complexity had to be in either
3821509Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
3822509Smrj 		 * copybuf code.
3823509Smrj 		 *
3824509Smrj 		 * So in this code path, we have taken the last cookie,
3825509Smrj 		 * virtually broken it in half due to the trim, and it happens
3826509Smrj 		 * to use the copybuf which further complicates life. At the
3827509Smrj 		 * same time, we have already setup the current cookie, which
3828509Smrj 		 * is now wrong. More background info: the current cookie uses
3829509Smrj 		 * the copybuf, so it is only a page long max. So we need to
3830509Smrj 		 * fix the current cookies copy buffer address, physical
3831509Smrj 		 * address, and kva for the 32-bit kernel. We due this by
3832509Smrj 		 * bumping them by page size (of course, we can't due this on
3833509Smrj 		 * the physical address since the copy buffer may not be
3834509Smrj 		 * physically contiguous).
3835509Smrj 		 */
3836509Smrj 		cookie++;
3837509Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
38385084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
38395084Sjohnlev 
38405084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3841509Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
38425084Sjohnlev #ifdef __xpv
38435084Sjohnlev 		/*
38445084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
38455084Sjohnlev 		 * the cookies with MAs instead of PAs.
38465084Sjohnlev 		 */
38475084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
38485084Sjohnlev #else
38495084Sjohnlev 		cookie->dmac_laddress = paddr;
38505084Sjohnlev #endif
38515084Sjohnlev 
3852509Smrj #if !defined(__amd64)
3853509Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3854509Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3855509Smrj #endif
3856509Smrj 	} else {
3857509Smrj 		/* go back to the current cookie */
3858509Smrj 		cookie++;
3859509Smrj 	}
3860509Smrj 
3861509Smrj 	/*
3862509Smrj 	 * add the current cookie to the new window. set the new window size to
3863509Smrj 	 * the what was left over from the previous cookie and what's in the
3864509Smrj 	 * current cookie.
3865509Smrj 	 */
3866509Smrj 	(*windowp)->wd_cookie_cnt++;
3867509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3868509Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
3869509Smrj 
3870509Smrj 	/*
3871509Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
3872509Smrj 	 * wouldn't be here if it didn't.
3873509Smrj 	 */
3874509Smrj 	*copybuf_used += MMU_PAGESIZE;
3875509Smrj 
3876509Smrj 	return (DDI_SUCCESS);
3877509Smrj }
3878509Smrj 
3879509Smrj 
3880509Smrj /*
3881509Smrj  * rootnex_maxxfer_window_boundary()
3882509Smrj  *    Called in bind slowpath when we get to a window boundary because we will
3883509Smrj  *    go over maxxfer.
3884509Smrj  */
3885509Smrj static int
3886509Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3887509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
3888509Smrj {
3889509Smrj 	size_t dmac_size;
3890509Smrj 	off_t new_offset;
3891509Smrj 	size_t trim_sz;
3892509Smrj 	off_t coffset;
3893509Smrj 
3894509Smrj 
3895509Smrj 	/*
3896509Smrj 	 * calculate how much we have to trim off of the current cookie to equal
3897509Smrj 	 * maxxfer. We don't have to account for granularity here since our
3898509Smrj 	 * maxxfer already takes that into account.
3899509Smrj 	 */
3900509Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
3901509Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
3902509Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
3903509Smrj 
3904509Smrj 	/* save cookie size since we need it later and we might change it */
3905509Smrj 	dmac_size = cookie->dmac_size;
3906509Smrj 
3907509Smrj 	/*
3908509Smrj 	 * if we're not trimming the entire cookie, setup the current window to
3909509Smrj 	 * account for the trim.
3910509Smrj 	 */
3911509Smrj 	if (trim_sz < cookie->dmac_size) {
3912509Smrj 		(*windowp)->wd_cookie_cnt++;
3913509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3914509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
39155084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3916509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3917509Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
3918509Smrj 
3919509Smrj 		/*
3920509Smrj 		 * set the adjusted cookie size now in case this is the first
3921509Smrj 		 * window. All other windows are taken care of in get win
3922509Smrj 		 */
3923509Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3924509Smrj 	}
3925509Smrj 
3926509Smrj 	/*
3927509Smrj 	 * coffset is the current offset within the cookie, new_offset is the
3928509Smrj 	 * current offset with the entire buffer.
3929509Smrj 	 */
3930509Smrj 	coffset = dmac_size - trim_sz;
3931509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3932509Smrj 
3933509Smrj 	/* initialize the next window */
3934509Smrj 	(*windowp)++;
3935509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3936509Smrj 	(*windowp)->wd_cookie_cnt++;
3937509Smrj 	(*windowp)->wd_size = trim_sz;
3938509Smrj 	if (trim_sz < dmac_size) {
3939509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
39405084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
3941509Smrj 		    coffset;
3942509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3943509Smrj 	}
3944509Smrj 
3945509Smrj 	return (DDI_SUCCESS);
3946509Smrj }
3947509Smrj 
3948509Smrj 
3949509Smrj /*ARGSUSED*/
3950509Smrj static int
3951*7613SVikram.Hegde@Sun.COM rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
3952509Smrj     off_t off, size_t len, uint_t cache_flags)
3953509Smrj {
3954509Smrj 	rootnex_sglinfo_t *sinfo;
3955509Smrj 	rootnex_pgmap_t *cbpage;
3956509Smrj 	rootnex_window_t *win;
3957509Smrj 	ddi_dma_impl_t *hp;
3958509Smrj 	rootnex_dma_t *dma;
3959509Smrj 	caddr_t fromaddr;
3960509Smrj 	caddr_t toaddr;
3961509Smrj 	uint_t psize;
3962509Smrj 	off_t offset;
3963509Smrj 	uint_t pidx;
3964509Smrj 	size_t size;
3965509Smrj 	off_t poff;
3966509Smrj 	int e;
3967509Smrj 
3968509Smrj 
3969509Smrj 	hp = (ddi_dma_impl_t *)handle;
3970509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
3971509Smrj 	sinfo = &dma->dp_sglinfo;
3972509Smrj 
3973509Smrj 	/*
3974509Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
3975509Smrj 	 * will cause us to have at least one window.
3976509Smrj 	 */
3977509Smrj 	if (dma->dp_window == NULL) {
3978509Smrj 		return (DDI_SUCCESS);
3979509Smrj 	}
3980509Smrj 
3981509Smrj 	/* This window may not need to be sync'd */
3982509Smrj 	win = &dma->dp_window[dma->dp_current_win];
3983509Smrj 	if (!win->wd_dosync) {
3984509Smrj 		return (DDI_SUCCESS);
3985509Smrj 	}
3986509Smrj 
3987509Smrj 	/* handle off and len special cases */
3988509Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
3989509Smrj 		offset = win->wd_offset;
3990509Smrj 	} else {
3991509Smrj 		offset = off;
3992509Smrj 	}
3993509Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
3994509Smrj 		size = win->wd_size;
3995509Smrj 	} else {
3996509Smrj 		size = len;
3997509Smrj 	}
3998509Smrj 
3999509Smrj 	/* check the sync args to make sure they make a little sense */
4000509Smrj 	if (rootnex_sync_check_parms) {
4001509Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
4002509Smrj 		    cache_flags);
4003509Smrj 		if (e != DDI_SUCCESS) {
4004509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4005509Smrj 			return (DDI_FAILURE);
4006509Smrj 		}
4007509Smrj 	}
4008509Smrj 
4009509Smrj 	/*
4010509Smrj 	 * special case the first page to handle the offset into the page. The
4011509Smrj 	 * offset to the current page for our buffer is the offset into the
4012509Smrj 	 * first page of the buffer plus our current offset into the buffer
4013509Smrj 	 * itself, masked of course.
4014509Smrj 	 */
4015509Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4016509Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
4017509Smrj 
4018509Smrj 	/* go through all the pages that we want to sync */
4019509Smrj 	while (size > 0) {
4020509Smrj 		/*
4021509Smrj 		 * Calculate the page index relative to the start of the buffer.
4022509Smrj 		 * The index to the current page for our buffer is the offset
4023509Smrj 		 * into the first page of the buffer plus our current offset
4024509Smrj 		 * into the buffer itself, shifted of course...
4025509Smrj 		 */
4026509Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4027509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4028509Smrj 
4029509Smrj 		/*
4030509Smrj 		 * if this page uses the copy buffer, we need to sync it,
4031509Smrj 		 * otherwise, go on to the next page.
4032509Smrj 		 */
4033509Smrj 		cbpage = &dma->dp_pgmap[pidx];
4034509Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4035509Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
4036509Smrj 		if (cbpage->pm_uses_copybuf) {
4037509Smrj 			/* cbaddr and kaddr should be page aligned */
4038509Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4039509Smrj 			    MMU_PAGEOFFSET) == 0);
4040509Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
4041509Smrj 			    MMU_PAGEOFFSET) == 0);
4042509Smrj 
4043509Smrj 			/*
4044509Smrj 			 * if we're copying for the device, we are going to
4045509Smrj 			 * copy from the drivers buffer and to the rootnex
4046509Smrj 			 * allocated copy buffer.
4047509Smrj 			 */
4048509Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4049509Smrj 				fromaddr = cbpage->pm_kaddr + poff;
4050509Smrj 				toaddr = cbpage->pm_cbaddr + poff;
4051509Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
4052509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4053509Smrj 
4054509Smrj 			/*
4055509Smrj 			 * if we're copying for the cpu/kernel, we are going to
4056509Smrj 			 * copy from the rootnex allocated copy buffer to the
4057509Smrj 			 * drivers buffer.
4058509Smrj 			 */
4059509Smrj 			} else {
4060509Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
4061509Smrj 				toaddr = cbpage->pm_kaddr + poff;
4062509Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
4063509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4064509Smrj 			}
4065509Smrj 
4066509Smrj 			bcopy(fromaddr, toaddr, psize);
4067509Smrj 		}
4068509Smrj 
4069509Smrj 		/*
4070509Smrj 		 * decrement size until we're done, update our offset into the
4071509Smrj 		 * buffer, and get the next page size.
4072509Smrj 		 */
4073509Smrj 		size -= psize;
4074509Smrj 		offset += psize;
4075509Smrj 		psize = MIN(MMU_PAGESIZE, size);
4076509Smrj 
4077509Smrj 		/* page offset is zero for the rest of this loop */
4078509Smrj 		poff = 0;
4079509Smrj 	}
4080509Smrj 
4081509Smrj 	return (DDI_SUCCESS);
4082509Smrj }
4083509Smrj 
4084*7613SVikram.Hegde@Sun.COM /*
4085*7613SVikram.Hegde@Sun.COM  * rootnex_dma_sync()
4086*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4087*7613SVikram.Hegde@Sun.COM  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4088*7613SVikram.Hegde@Sun.COM  *    is set, ddi_dma_sync() returns immediately passing back success.
4089*7613SVikram.Hegde@Sun.COM  */
4090*7613SVikram.Hegde@Sun.COM /*ARGSUSED*/
4091*7613SVikram.Hegde@Sun.COM static int
4092*7613SVikram.Hegde@Sun.COM rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4093*7613SVikram.Hegde@Sun.COM     off_t off, size_t len, uint_t cache_flags)
4094*7613SVikram.Hegde@Sun.COM {
4095*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4096*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
4097*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
4098*7613SVikram.Hegde@Sun.COM 		    cache_flags));
4099*7613SVikram.Hegde@Sun.COM 	}
4100*7613SVikram.Hegde@Sun.COM #endif
4101*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
4102*7613SVikram.Hegde@Sun.COM 	    cache_flags));
4103*7613SVikram.Hegde@Sun.COM }
4104509Smrj 
4105509Smrj /*
4106509Smrj  * rootnex_valid_sync_parms()
4107509Smrj  *    checks the parameters passed to sync to verify they are correct.
4108509Smrj  */
4109509Smrj static int
4110509Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4111509Smrj     off_t offset, size_t size, uint_t cache_flags)
4112509Smrj {
4113509Smrj 	off_t woffset;
4114509Smrj 
4115509Smrj 
4116509Smrj 	/*
4117509Smrj 	 * the first part of the test to make sure the offset passed in is
4118509Smrj 	 * within the window.
4119509Smrj 	 */
4120509Smrj 	if (offset < win->wd_offset) {
4121509Smrj 		return (DDI_FAILURE);
4122509Smrj 	}
4123509Smrj 
4124509Smrj 	/*
4125509Smrj 	 * second and last part of the test to make sure the offset and length
4126509Smrj 	 * passed in is within the window.
4127509Smrj 	 */
4128509Smrj 	woffset = offset - win->wd_offset;
4129509Smrj 	if ((woffset + size) > win->wd_size) {
4130509Smrj 		return (DDI_FAILURE);
4131509Smrj 	}
4132509Smrj 
4133509Smrj 	/*
4134509Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4135509Smrj 	 * be set too.
4136509Smrj 	 */
4137509Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4138509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4139509Smrj 		return (DDI_SUCCESS);
4140509Smrj 	}
4141509Smrj 
4142509Smrj 	/*
4143509Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4144509Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4145509Smrj 	 */
4146509Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4147509Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4148509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4149509Smrj 		return (DDI_SUCCESS);
4150509Smrj 	}
4151509Smrj 
4152509Smrj 	return (DDI_FAILURE);
4153509Smrj }
4154509Smrj 
4155509Smrj 
4156509Smrj /*ARGSUSED*/
4157509Smrj static int
4158*7613SVikram.Hegde@Sun.COM rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4159509Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4160509Smrj     uint_t *ccountp)
4161509Smrj {
4162509Smrj 	rootnex_window_t *window;
4163509Smrj 	rootnex_trim_t *trim;
4164509Smrj 	ddi_dma_impl_t *hp;
4165509Smrj 	rootnex_dma_t *dma;
4166509Smrj #if !defined(__amd64)
4167509Smrj 	rootnex_sglinfo_t *sinfo;
4168509Smrj 	rootnex_pgmap_t *pmap;
4169509Smrj 	uint_t pidx;
4170509Smrj 	uint_t pcnt;
4171509Smrj 	off_t poff;
4172509Smrj 	int i;
4173509Smrj #endif
4174509Smrj 
4175509Smrj 
4176509Smrj 	hp = (ddi_dma_impl_t *)handle;
4177509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4178509Smrj #if !defined(__amd64)
4179509Smrj 	sinfo = &dma->dp_sglinfo;
4180509Smrj #endif
4181509Smrj 
4182509Smrj 	/* If we try and get a window which doesn't exist, return failure */
4183509Smrj 	if (win >= hp->dmai_nwin) {
4184509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4185509Smrj 		return (DDI_FAILURE);
4186509Smrj 	}
4187509Smrj 
4188509Smrj 	/*
4189509Smrj 	 * if we don't have any windows, and they're asking for the first
4190509Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
4191509Smrj 	 * setup our return values, then increment the cookie since we return
4192509Smrj 	 * the first cookie on the stack.
4193509Smrj 	 */
4194509Smrj 	if (dma->dp_window == NULL) {
4195509Smrj 		if (win != 0) {
4196509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4197509Smrj 			return (DDI_FAILURE);
4198509Smrj 		}
4199509Smrj 		hp->dmai_cookie = dma->dp_cookies;
4200509Smrj 		*offp = 0;
4201509Smrj 		*lenp = dma->dp_dma.dmao_size;
4202509Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4203509Smrj 		*cookiep = hp->dmai_cookie[0];
4204509Smrj 		hp->dmai_cookie++;
4205509Smrj 		return (DDI_SUCCESS);
4206509Smrj 	}
4207509Smrj 
4208509Smrj 	/* sync the old window before moving on to the new one */
4209509Smrj 	window = &dma->dp_window[dma->dp_current_win];
4210509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
4211509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4212509Smrj 		    DDI_DMA_SYNC_FORCPU);
4213509Smrj 	}
4214509Smrj 
4215509Smrj #if !defined(__amd64)
4216509Smrj 	/*
4217509Smrj 	 * before we move to the next window, if we need to re-map, unmap all
4218509Smrj 	 * the pages in this window.
4219509Smrj 	 */
4220509Smrj 	if (dma->dp_cb_remaping) {
4221509Smrj 		/*
4222509Smrj 		 * If we switch to this window again, we'll need to map in
4223509Smrj 		 * on the fly next time.
4224509Smrj 		 */
4225509Smrj 		window->wd_remap_copybuf = B_TRUE;
4226509Smrj 
4227509Smrj 		/*
4228509Smrj 		 * calculate the page index into the buffer where this window
4229509Smrj 		 * starts, and the number of pages this window takes up.
4230509Smrj 		 */
4231509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4232509Smrj 		    MMU_PAGESHIFT;
4233509Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4234509Smrj 		    MMU_PAGEOFFSET;
4235509Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
4236509Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4237509Smrj 
4238509Smrj 		/* unmap pages which are currently mapped in this window */
4239509Smrj 		for (i = 0; i < pcnt; i++) {
4240509Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
4241509Smrj 				hat_unload(kas.a_hat,
4242509Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4243509Smrj 				    HAT_UNLOAD);
4244509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4245509Smrj 			}
4246509Smrj 			pidx++;
4247509Smrj 		}
4248509Smrj 	}
4249509Smrj #endif
4250509Smrj 
4251509Smrj 	/*
4252509Smrj 	 * Move to the new window.
4253509Smrj 	 * NOTE: current_win must be set for sync to work right
4254509Smrj 	 */
4255509Smrj 	dma->dp_current_win = win;
4256509Smrj 	window = &dma->dp_window[win];
4257509Smrj 
4258509Smrj 	/* if needed, adjust the first and/or last cookies for trim */
4259509Smrj 	trim = &window->wd_trim;
4260509Smrj 	if (trim->tr_trim_first) {
42615084Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4262509Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4263509Smrj #if !defined(__amd64)
4264509Smrj 		window->wd_first_cookie->dmac_type =
4265509Smrj 		    (window->wd_first_cookie->dmac_type &
4266509Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4267509Smrj #endif
4268509Smrj 		if (trim->tr_first_copybuf_win) {
4269509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4270509Smrj 			    trim->tr_first_cbaddr;
4271509Smrj #if !defined(__amd64)
4272509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4273509Smrj 			    trim->tr_first_kaddr;
4274509Smrj #endif
4275509Smrj 		}
4276509Smrj 	}
4277509Smrj 	if (trim->tr_trim_last) {
42785084Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4279509Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4280509Smrj 		if (trim->tr_last_copybuf_win) {
4281509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4282509Smrj 			    trim->tr_last_cbaddr;
4283509Smrj #if !defined(__amd64)
4284509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4285509Smrj 			    trim->tr_last_kaddr;
4286509Smrj #endif
4287509Smrj 		}
4288509Smrj 	}
4289509Smrj 
4290509Smrj 	/*
4291509Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
4292509Smrj 	 * our return values, then increment the cookie since we return the
4293509Smrj 	 * first cookie on the stack.
4294509Smrj 	 */
4295509Smrj 	hp->dmai_cookie = window->wd_first_cookie;
4296509Smrj 	*offp = window->wd_offset;
4297509Smrj 	*lenp = window->wd_size;
4298509Smrj 	*ccountp = window->wd_cookie_cnt;
4299509Smrj 	*cookiep = hp->dmai_cookie[0];
4300509Smrj 	hp->dmai_cookie++;
4301509Smrj 
4302509Smrj #if !defined(__amd64)
4303509Smrj 	/* re-map copybuf if required for this window */
4304509Smrj 	if (dma->dp_cb_remaping) {
4305509Smrj 		/*
4306509Smrj 		 * calculate the page index into the buffer where this
4307509Smrj 		 * window starts.
4308509Smrj 		 */
4309509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4310509Smrj 		    MMU_PAGESHIFT;
4311509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4312509Smrj 
4313509Smrj 		/*
4314509Smrj 		 * the first page can get unmapped if it's shared with the
4315509Smrj 		 * previous window. Even if the rest of this window is already
4316509Smrj 		 * mapped in, we need to still check this one.
4317509Smrj 		 */
4318509Smrj 		pmap = &dma->dp_pgmap[pidx];
4319509Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4320509Smrj 			if (pmap->pm_pp != NULL) {
4321509Smrj 				pmap->pm_mapped = B_TRUE;
4322509Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4323509Smrj 			} else if (pmap->pm_vaddr != NULL) {
4324509Smrj 				pmap->pm_mapped = B_TRUE;
4325509Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4326509Smrj 				    pmap->pm_kaddr);
4327509Smrj 			}
4328509Smrj 		}
4329509Smrj 		pidx++;
4330509Smrj 
4331509Smrj 		/* map in the rest of the pages if required */
4332509Smrj 		if (window->wd_remap_copybuf) {
4333509Smrj 			window->wd_remap_copybuf = B_FALSE;
4334509Smrj 
4335509Smrj 			/* figure out many pages this window takes up */
4336509Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4337509Smrj 			    MMU_PAGEOFFSET;
4338509Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
4339509Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4340509Smrj 
4341509Smrj 			/* map pages which require it */
4342509Smrj 			for (i = 1; i < pcnt; i++) {
4343509Smrj 				pmap = &dma->dp_pgmap[pidx];
4344509Smrj 				if (pmap->pm_uses_copybuf) {
4345509Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
4346509Smrj 					if (pmap->pm_pp != NULL) {
4347509Smrj 						pmap->pm_mapped = B_TRUE;
4348509Smrj 						i86_pp_map(pmap->pm_pp,
4349509Smrj 						    pmap->pm_kaddr);
4350509Smrj 					} else if (pmap->pm_vaddr != NULL) {
4351509Smrj 						pmap->pm_mapped = B_TRUE;
4352509Smrj 						i86_va_map(pmap->pm_vaddr,
4353509Smrj 						    sinfo->si_asp,
4354509Smrj 						    pmap->pm_kaddr);
4355509Smrj 					}
4356509Smrj 				}
4357509Smrj 				pidx++;
4358509Smrj 			}
4359509Smrj 		}
4360509Smrj 	}
4361509Smrj #endif
4362509Smrj 
4363509Smrj 	/* if the new window uses the copy buffer, sync it for the device */
4364509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4365509Smrj 		(void) rootnex_dma_sync(dip, rdip, handle, 0, 0,
4366509Smrj 		    DDI_DMA_SYNC_FORDEV);
4367509Smrj 	}
4368509Smrj 
4369509Smrj 	return (DDI_SUCCESS);
4370509Smrj }
4371509Smrj 
4372*7613SVikram.Hegde@Sun.COM /*
4373*7613SVikram.Hegde@Sun.COM  * rootnex_dma_win()
4374*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_getwin()
4375*7613SVikram.Hegde@Sun.COM  */
4376*7613SVikram.Hegde@Sun.COM /*ARGSUSED*/
4377*7613SVikram.Hegde@Sun.COM static int
4378*7613SVikram.Hegde@Sun.COM rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4379*7613SVikram.Hegde@Sun.COM     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4380*7613SVikram.Hegde@Sun.COM     uint_t *ccountp)
4381*7613SVikram.Hegde@Sun.COM {
4382*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4383*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
4384*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
4385*7613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
4386*7613SVikram.Hegde@Sun.COM 	}
4387*7613SVikram.Hegde@Sun.COM #endif
4388*7613SVikram.Hegde@Sun.COM 
4389*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
4390*7613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
4391*7613SVikram.Hegde@Sun.COM }
4392509Smrj 
4393509Smrj /*
4394509Smrj  * ************************
4395509Smrj  *  obsoleted dma routines
4396509Smrj  * ************************
4397509Smrj  */
4398509Smrj 
4399509Smrj /* ARGSUSED */
4400509Smrj static int
4401*7613SVikram.Hegde@Sun.COM rootnex_coredma_map(dev_info_t *dip, dev_info_t *rdip,
4402*7613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4403509Smrj {
4404509Smrj #if defined(__amd64)
4405509Smrj 	/*
4406509Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4407509Smrj 	 * rootnex_dma_mctl()
4408509Smrj 	 */
4409509Smrj 	return (DDI_DMA_NORESOURCES);
4410509Smrj 
4411509Smrj #else /* 32-bit x86 kernel */
4412509Smrj 	ddi_dma_handle_t *lhandlep;
4413509Smrj 	ddi_dma_handle_t lhandle;
4414509Smrj 	ddi_dma_cookie_t cookie;
4415509Smrj 	ddi_dma_attr_t dma_attr;
4416509Smrj 	ddi_dma_lim_t *dma_lim;
4417509Smrj 	uint_t ccnt;
4418509Smrj 	int e;
4419509Smrj 
4420509Smrj 
4421509Smrj 	/*
4422509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4423509Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4424509Smrj 	 */
4425509Smrj 	if (handlep == NULL) {
4426509Smrj 		lhandlep = &lhandle;
4427509Smrj 	} else {
4428509Smrj 		lhandlep = handlep;
4429509Smrj 	}
4430509Smrj 
4431509Smrj 	/* convert the limit structure to a dma_attr one */
4432509Smrj 	dma_lim = dmareq->dmar_limits;
4433509Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4434509Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4435509Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4436509Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4437509Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4438509Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4439509Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4440509Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4441509Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4442509Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4443509Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4444509Smrj 	dma_attr.dma_attr_flags = 0;
4445509Smrj 
4446509Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4447509Smrj 	    dmareq->dmar_arg, lhandlep);
4448509Smrj 	if (e != DDI_SUCCESS) {
4449509Smrj 		return (e);
4450509Smrj 	}
4451509Smrj 
4452509Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4453509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4454509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4455509Smrj 		return (e);
4456509Smrj 	}
4457509Smrj 
4458509Smrj 	/*
4459509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4460509Smrj 	 * free up the local state and return the result.
4461509Smrj 	 */
4462509Smrj 	if (handlep == NULL) {
4463509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4464509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4465509Smrj 		if (e == DDI_DMA_MAPPED) {
4466509Smrj 			return (DDI_DMA_MAPOK);
44670Sstevel@tonic-gate 		} else {
4468509Smrj 			return (DDI_DMA_NOMAPPING);
4469509Smrj 		}
4470509Smrj 	}
4471509Smrj 
4472509Smrj 	return (e);
4473509Smrj #endif /* defined(__amd64) */
4474509Smrj }
4475509Smrj 
4476*7613SVikram.Hegde@Sun.COM /*
4477*7613SVikram.Hegde@Sun.COM  * rootnex_dma_map()
4478*7613SVikram.Hegde@Sun.COM  *    called from ddi_dma_setup()
4479*7613SVikram.Hegde@Sun.COM  */
4480*7613SVikram.Hegde@Sun.COM /* ARGSUSED */
4481*7613SVikram.Hegde@Sun.COM static int
4482*7613SVikram.Hegde@Sun.COM rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
4483*7613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4484*7613SVikram.Hegde@Sun.COM {
4485*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4486*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
4487*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_map(dip, rdip, dmareq, handlep));
4488*7613SVikram.Hegde@Sun.COM 	}
4489*7613SVikram.Hegde@Sun.COM #endif
4490*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_map(dip, rdip, dmareq, handlep));
4491*7613SVikram.Hegde@Sun.COM }
4492509Smrj 
4493509Smrj /*
4494509Smrj  * rootnex_dma_mctl()
4495509Smrj  *
4496509Smrj  */
4497509Smrj /* ARGSUSED */
4498509Smrj static int
4499*7613SVikram.Hegde@Sun.COM rootnex_coredma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4500509Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4501509Smrj     uint_t cache_flags)
4502509Smrj {
4503509Smrj #if defined(__amd64)
4504509Smrj 	/*
4505509Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4506509Smrj 	 * common implementation in genunix, so they no longer have x86
4507509Smrj 	 * specific functionality which called into dma_ctl.
4508509Smrj 	 *
4509509Smrj 	 * The rest of the obsoleted interfaces were never supported in the
4510509Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4511509Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4512509Smrj 	 * implementation issues.
4513509Smrj 	 *
4514509Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4515509Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4516509Smrj 	 * reflect that now too...
4517509Smrj 	 *
4518509Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4519509Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
4520509Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4521509Smrj 	 * that in a future release.
4522509Smrj 	 */
4523509Smrj 	return (DDI_FAILURE);
4524509Smrj 
4525509Smrj #else /* 32-bit x86 kernel */
4526509Smrj 	ddi_dma_cookie_t lcookie;
4527509Smrj 	ddi_dma_cookie_t *cookie;
4528509Smrj 	rootnex_window_t *window;
4529509Smrj 	ddi_dma_impl_t *hp;
4530509Smrj 	rootnex_dma_t *dma;
4531509Smrj 	uint_t nwin;
4532509Smrj 	uint_t ccnt;
4533509Smrj 	size_t len;
4534509Smrj 	off_t off;
4535509Smrj 	int e;
4536509Smrj 
4537509Smrj 
4538509Smrj 	/*
4539509Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4540509Smrj 	 * hacky since were optimizing for the current interfaces and so we can
4541509Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
4542509Smrj 	 * obsoleted routines someday soon.
4543509Smrj 	 */
4544509Smrj 
4545509Smrj 	switch (request) {
4546509Smrj 
4547509Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4548509Smrj 		hp = (ddi_dma_impl_t *)handle;
4549509Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
4550509Smrj 
4551509Smrj 		/*
4552509Smrj 		 * convert segment to cookie. We don't distinguish between the
4553509Smrj 		 * two :-)
4554509Smrj 		 */
4555509Smrj 		*cookie = *hp->dmai_cookie;
4556509Smrj 		*lenp = cookie->dmac_size;
4557509Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4558509Smrj 		return (DDI_SUCCESS);
4559509Smrj 
4560509Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4561509Smrj 		hp = (ddi_dma_impl_t *)handle;
4562509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4563509Smrj 
4564509Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4565509Smrj 			return (DDI_DMA_STALE);
45660Sstevel@tonic-gate 		}
4567509Smrj 
4568509Smrj 		/* handle the case where we don't have any windows */
4569509Smrj 		if (dma->dp_window == NULL) {
4570509Smrj 			/*
4571509Smrj 			 * if seg == NULL, and we don't have any windows,
4572509Smrj 			 * return the first cookie in the sgl.
4573509Smrj 			 */
4574509Smrj 			if (*lenp == NULL) {
4575509Smrj 				dma->dp_current_cookie = 0;
4576509Smrj 				hp->dmai_cookie = dma->dp_cookies;
4577509Smrj 				*objpp = (caddr_t)handle;
4578509Smrj 				return (DDI_SUCCESS);
4579509Smrj 
4580509Smrj 			/* if we have more cookies, go to the next cookie */
4581509Smrj 			} else {
4582509Smrj 				if ((dma->dp_current_cookie + 1) >=
4583509Smrj 				    dma->dp_sglinfo.si_sgl_size) {
4584509Smrj 					return (DDI_DMA_DONE);
4585509Smrj 				}
4586509Smrj 				dma->dp_current_cookie++;
4587509Smrj 				hp->dmai_cookie++;
4588509Smrj 				return (DDI_SUCCESS);
4589509Smrj 			}
4590509Smrj 		}
4591509Smrj 
4592509Smrj 		/* We have one or more windows */
4593509Smrj 		window = &dma->dp_window[dma->dp_current_win];
4594509Smrj 
4595509Smrj 		/*
4596509Smrj 		 * if seg == NULL, return the first cookie in the current
4597509Smrj 		 * window
4598509Smrj 		 */
4599509Smrj 		if (*lenp == NULL) {
4600509Smrj 			dma->dp_current_cookie = 0;
4601683Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4602509Smrj 
4603509Smrj 		/*
4604509Smrj 		 * go to the next cookie in the window then see if we done with
4605509Smrj 		 * this window.
4606509Smrj 		 */
4607509Smrj 		} else {
4608509Smrj 			if ((dma->dp_current_cookie + 1) >=
4609509Smrj 			    window->wd_cookie_cnt) {
4610509Smrj 				return (DDI_DMA_DONE);
4611509Smrj 			}
4612509Smrj 			dma->dp_current_cookie++;
4613509Smrj 			hp->dmai_cookie++;
4614509Smrj 		}
4615509Smrj 		*objpp = (caddr_t)handle;
4616509Smrj 		return (DDI_SUCCESS);
4617509Smrj 
4618509Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4619509Smrj 		hp = (ddi_dma_impl_t *)handle;
4620509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4621509Smrj 
4622509Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4623509Smrj 			return (DDI_DMA_STALE);
4624509Smrj 		}
4625509Smrj 
4626509Smrj 		/* if win == NULL, return the first window in the bind */
4627509Smrj 		if (*offp == NULL) {
4628509Smrj 			nwin = 0;
4629509Smrj 
4630509Smrj 		/*
4631509Smrj 		 * else, go to the next window then see if we're done with all
4632509Smrj 		 * the windows.
4633509Smrj 		 */
4634509Smrj 		} else {
4635509Smrj 			nwin = dma->dp_current_win + 1;
4636509Smrj 			if (nwin >= hp->dmai_nwin) {
4637509Smrj 				return (DDI_DMA_DONE);
4638509Smrj 			}
4639509Smrj 		}
4640509Smrj 
4641509Smrj 		/* switch to the next window */
4642509Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4643509Smrj 		    &lcookie, &ccnt);
4644509Smrj 		ASSERT(e == DDI_SUCCESS);
4645509Smrj 		if (e != DDI_SUCCESS) {
4646509Smrj 			return (DDI_DMA_STALE);
4647509Smrj 		}
4648509Smrj 
4649509Smrj 		/* reset the cookie back to the first cookie in the window */
4650509Smrj 		if (dma->dp_window != NULL) {
4651509Smrj 			window = &dma->dp_window[dma->dp_current_win];
4652509Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4653509Smrj 		} else {
4654509Smrj 			hp->dmai_cookie = dma->dp_cookies;
4655509Smrj 		}
4656509Smrj 
4657509Smrj 		*objpp = (caddr_t)handle;
4658509Smrj 		return (DDI_SUCCESS);
4659509Smrj 
4660509Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
4661509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4662509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4663509Smrj 		if (rootnex_state->r_dvma_call_list_id) {
4664509Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4665509Smrj 		}
4666509Smrj 		return (DDI_SUCCESS);
4667509Smrj 
4668509Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4669509Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4670509Smrj 		/* should never get here, handled in genunix */
4671509Smrj 		ASSERT(0);
4672509Smrj 		return (DDI_FAILURE);
4673509Smrj 
4674509Smrj 	case DDI_DMA_KVADDR:
4675509Smrj 	case DDI_DMA_GETERR:
4676509Smrj 	case DDI_DMA_COFF:
4677509Smrj 		return (DDI_FAILURE);
46780Sstevel@tonic-gate 	}
4679509Smrj 
4680509Smrj 	return (DDI_FAILURE);
4681509Smrj #endif /* defined(__amd64) */
46820Sstevel@tonic-gate }
46831414Scindi 
4684*7613SVikram.Hegde@Sun.COM /*
4685*7613SVikram.Hegde@Sun.COM  * rootnex_dma_mctl()
4686*7613SVikram.Hegde@Sun.COM  *
4687*7613SVikram.Hegde@Sun.COM  */
4688*7613SVikram.Hegde@Sun.COM /* ARGSUSED */
4689*7613SVikram.Hegde@Sun.COM static int
4690*7613SVikram.Hegde@Sun.COM rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4691*7613SVikram.Hegde@Sun.COM     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4692*7613SVikram.Hegde@Sun.COM     uint_t cache_flags)
4693*7613SVikram.Hegde@Sun.COM {
4694*7613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4695*7613SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
4696*7613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_mctl(dip, rdip, handle, request, offp,
4697*7613SVikram.Hegde@Sun.COM 		    lenp, objpp, cache_flags));
4698*7613SVikram.Hegde@Sun.COM 	}
4699*7613SVikram.Hegde@Sun.COM #endif
4700*7613SVikram.Hegde@Sun.COM 
4701*7613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_mctl(dip, rdip, handle, request, offp,
4702*7613SVikram.Hegde@Sun.COM 	    lenp, objpp, cache_flags));
4703*7613SVikram.Hegde@Sun.COM }
47041865Sdilpreet 
47051865Sdilpreet /*
47061865Sdilpreet  * *********
47071865Sdilpreet  *  FMA Code
47081865Sdilpreet  * *********
47091865Sdilpreet  */
47101865Sdilpreet 
47111865Sdilpreet /*
47121865Sdilpreet  * rootnex_fm_init()
47131865Sdilpreet  *    FMA init busop
47141865Sdilpreet  */
47151865Sdilpreet /* ARGSUSED */
47161865Sdilpreet static int
47171865Sdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
47181865Sdilpreet     ddi_iblock_cookie_t *ibc)
47191865Sdilpreet {
47201865Sdilpreet 	*ibc = rootnex_state->r_err_ibc;
47211865Sdilpreet 
47221865Sdilpreet 	return (ddi_system_fmcap);
47231865Sdilpreet }
47241865Sdilpreet 
47251865Sdilpreet /*
47261865Sdilpreet  * rootnex_dma_check()
47271865Sdilpreet  *    Function called after a dma fault occurred to find out whether the
47281865Sdilpreet  *    fault address is associated with a driver that is able to handle faults
47291865Sdilpreet  *    and recover from faults.
47301865Sdilpreet  */
47311865Sdilpreet /* ARGSUSED */
47321414Scindi static int
47331865Sdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
47341865Sdilpreet     const void *not_used)
47351414Scindi {
47361865Sdilpreet 	rootnex_window_t *window;
47371865Sdilpreet 	uint64_t start_addr;
47381865Sdilpreet 	uint64_t fault_addr;
47391865Sdilpreet 	ddi_dma_impl_t *hp;
47401865Sdilpreet 	rootnex_dma_t *dma;
47411865Sdilpreet 	uint64_t end_addr;
47421865Sdilpreet 	size_t csize;
47431865Sdilpreet 	int i;
47441865Sdilpreet 	int j;
47451865Sdilpreet 
47461865Sdilpreet 
47471865Sdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
47481865Sdilpreet 	hp = (ddi_dma_impl_t *)handle;
47491865Sdilpreet 	ASSERT(hp);
47501865Sdilpreet 
47511865Sdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
47521865Sdilpreet 
47531865Sdilpreet 	/* Get the address that we need to search for */
47541865Sdilpreet 	fault_addr = *(uint64_t *)addr;
47551865Sdilpreet 
47561865Sdilpreet 	/*
47571865Sdilpreet 	 * if we don't have any windows, we can just walk through all the
47581865Sdilpreet 	 * cookies.
47591865Sdilpreet 	 */
47601865Sdilpreet 	if (dma->dp_window == NULL) {
47611865Sdilpreet 		/* for each cookie */
47621865Sdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
47631865Sdilpreet 			/*
47641865Sdilpreet 			 * if the faulted address is within the physical address
47651865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
47661865Sdilpreet 			 */
47671865Sdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
47681865Sdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
47691865Sdilpreet 			    dma->dp_cookies[i].dmac_size))) {
47701865Sdilpreet 				return (DDI_FM_NONFATAL);
47711865Sdilpreet 			}
47721865Sdilpreet 		}
47731865Sdilpreet 
47741865Sdilpreet 		/* fault_addr not within this DMA handle */
47751865Sdilpreet 		return (DDI_FM_UNKNOWN);
47761865Sdilpreet 	}
47771865Sdilpreet 
47781865Sdilpreet 	/* we have mutiple windows, walk through each window */
47791865Sdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
47801865Sdilpreet 		window = &dma->dp_window[i];
47811865Sdilpreet 
47821865Sdilpreet 		/* Go through all the cookies in the window */
47831865Sdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
47841865Sdilpreet 
47851865Sdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
47861865Sdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
47871865Sdilpreet 
47881865Sdilpreet 			/*
47891865Sdilpreet 			 * if we are trimming the first cookie in the window,
47901865Sdilpreet 			 * and this is the first cookie, adjust the start
47911865Sdilpreet 			 * address and size of the cookie to account for the
47921865Sdilpreet 			 * trim.
47931865Sdilpreet 			 */
47941865Sdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
47951865Sdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
47961865Sdilpreet 				csize = window->wd_trim.tr_first_size;
47971865Sdilpreet 			}
47981865Sdilpreet 
47991865Sdilpreet 			/*
48001865Sdilpreet 			 * if we are trimming the last cookie in the window,
48011865Sdilpreet 			 * and this is the last cookie, adjust the start
48021865Sdilpreet 			 * address and size of the cookie to account for the
48031865Sdilpreet 			 * trim.
48041865Sdilpreet 			 */
48051865Sdilpreet 			if (window->wd_trim.tr_trim_last &&
48061865Sdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
48071865Sdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
48081865Sdilpreet 				csize = window->wd_trim.tr_last_size;
48091865Sdilpreet 			}
48101865Sdilpreet 
48111865Sdilpreet 			end_addr = start_addr + csize;
48121865Sdilpreet 
48131865Sdilpreet 			/*
48141865Sdilpreet 			 * if the faulted address is within the physical address
48151865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
48161865Sdilpreet 			 */
48171865Sdilpreet 			if ((fault_addr >= start_addr) &&
48181865Sdilpreet 			    (fault_addr <= end_addr)) {
48191865Sdilpreet 				return (DDI_FM_NONFATAL);
48201865Sdilpreet 			}
48211865Sdilpreet 		}
48221865Sdilpreet 	}
48231865Sdilpreet 
48241865Sdilpreet 	/* fault_addr not within this DMA handle */
48251865Sdilpreet 	return (DDI_FM_UNKNOWN);
48261414Scindi }
4827