xref: /onnv-gate/usr/src/uts/i86pc/io/rootnex.c (revision 12837:331f69c36b0a)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51865Sdilpreet  * Common Development and Distribution License (the "License").
61865Sdilpreet  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2212118Smark.r.johnson@oracle.com  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate /*
26509Smrj  * x86 root nexus driver
270Sstevel@tonic-gate  */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/sysmacros.h>
300Sstevel@tonic-gate #include <sys/conf.h>
310Sstevel@tonic-gate #include <sys/autoconf.h>
320Sstevel@tonic-gate #include <sys/sysmacros.h>
330Sstevel@tonic-gate #include <sys/debug.h>
340Sstevel@tonic-gate #include <sys/psw.h>
350Sstevel@tonic-gate #include <sys/ddidmareq.h>
360Sstevel@tonic-gate #include <sys/promif.h>
370Sstevel@tonic-gate #include <sys/devops.h>
380Sstevel@tonic-gate #include <sys/kmem.h>
390Sstevel@tonic-gate #include <sys/cmn_err.h>
400Sstevel@tonic-gate #include <vm/seg.h>
410Sstevel@tonic-gate #include <vm/seg_kmem.h>
420Sstevel@tonic-gate #include <vm/seg_dev.h>
430Sstevel@tonic-gate #include <sys/vmem.h>
440Sstevel@tonic-gate #include <sys/mman.h>
450Sstevel@tonic-gate #include <vm/hat.h>
460Sstevel@tonic-gate #include <vm/as.h>
470Sstevel@tonic-gate #include <vm/page.h>
480Sstevel@tonic-gate #include <sys/avintr.h>
490Sstevel@tonic-gate #include <sys/errno.h>
500Sstevel@tonic-gate #include <sys/modctl.h>
510Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
520Sstevel@tonic-gate #include <sys/sunddi.h>
530Sstevel@tonic-gate #include <sys/sunndi.h>
54916Sschwartz #include <sys/mach_intr.h>
550Sstevel@tonic-gate #include <sys/psm.h>
560Sstevel@tonic-gate #include <sys/ontrap.h>
57509Smrj #include <sys/atomic.h>
58509Smrj #include <sys/sdt.h>
59509Smrj #include <sys/rootnex.h>
60509Smrj #include <vm/hat_i86.h>
611865Sdilpreet #include <sys/ddifm.h>
625251Smrj #include <sys/ddi_isa.h>
6312683SJimmy.Vetayases@oracle.com #include <sys/apic.h>
64509Smrj 
655084Sjohnlev #ifdef __xpv
665084Sjohnlev #include <sys/bootinfo.h>
675084Sjohnlev #include <sys/hypervisor.h>
685084Sjohnlev #include <sys/bootconf.h>
695084Sjohnlev #include <vm/kboot_mmu.h>
7011600SVikram.Hegde@Sun.COM #endif
7111600SVikram.Hegde@Sun.COM 
7211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
7311600SVikram.Hegde@Sun.COM #include <sys/immu.h>
747613SVikram.Hegde@Sun.COM #endif
757613SVikram.Hegde@Sun.COM 
767589SVikram.Hegde@Sun.COM 
77509Smrj /*
78509Smrj  * enable/disable extra checking of function parameters. Useful for debugging
79509Smrj  * drivers.
80509Smrj  */
81509Smrj #ifdef	DEBUG
82509Smrj int rootnex_alloc_check_parms = 1;
83509Smrj int rootnex_bind_check_parms = 1;
84509Smrj int rootnex_bind_check_inuse = 1;
85509Smrj int rootnex_unbind_verify_buffer = 0;
86509Smrj int rootnex_sync_check_parms = 1;
87509Smrj #else
88509Smrj int rootnex_alloc_check_parms = 0;
89509Smrj int rootnex_bind_check_parms = 0;
90509Smrj int rootnex_bind_check_inuse = 0;
91509Smrj int rootnex_unbind_verify_buffer = 0;
92509Smrj int rootnex_sync_check_parms = 0;
93509Smrj #endif
94509Smrj 
9511600SVikram.Hegde@Sun.COM boolean_t rootnex_dmar_not_setup;
9611600SVikram.Hegde@Sun.COM 
971414Scindi /* Master Abort and Target Abort panic flag */
981414Scindi int rootnex_fm_ma_ta_panic_flag = 0;
991414Scindi 
100509Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
1010Sstevel@tonic-gate int rootnex_bind_fail = 1;
1020Sstevel@tonic-gate int rootnex_bind_warn = 1;
1030Sstevel@tonic-gate uint8_t *rootnex_warn_list;
1040Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1050Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1060Sstevel@tonic-gate 
1070Sstevel@tonic-gate /*
108509Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
109509Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
110509Smrj  * the offset and size into ddi_dma_sync().
1110Sstevel@tonic-gate  */
112509Smrj int rootnex_sync_ignore_params = 0;
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate /*
115509Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
116509Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
117509Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
118509Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
119509Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
120509Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
121509Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
122509Smrj  * attach.
1230Sstevel@tonic-gate  */
124509Smrj #if defined(__amd64)
125509Smrj int rootnex_prealloc_cookies = 65;
126509Smrj int rootnex_prealloc_windows = 4;
127509Smrj int rootnex_prealloc_copybuf = 2;
128509Smrj #else
129509Smrj int rootnex_prealloc_cookies = 33;
130509Smrj int rootnex_prealloc_windows = 4;
131509Smrj int rootnex_prealloc_copybuf = 2;
132509Smrj #endif
133509Smrj 
134509Smrj /* driver global state */
135509Smrj static rootnex_state_t *rootnex_state;
136509Smrj 
137*12837Sfrank.van.der.linden@oracle.com #ifdef DEBUG
138509Smrj /* shortcut to rootnex counters */
139509Smrj static uint64_t *rootnex_cnt;
140*12837Sfrank.van.der.linden@oracle.com #endif
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate /*
143509Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1440Sstevel@tonic-gate  */
145509Smrj /* statically defined integer/boolean properties for the root node */
146509Smrj static rootnex_intprop_t rootnex_intprp[] = {
147509Smrj 	{ "PAGESIZE",			PAGESIZE },
148509Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
149509Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
150509Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
151509Smrj };
152509Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
153509Smrj 
1545084Sjohnlev #ifdef __xpv
1555084Sjohnlev typedef maddr_t rootnex_addr_t;
1565084Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
1575084Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
1585084Sjohnlev #else
1595084Sjohnlev typedef paddr_t rootnex_addr_t;
1605084Sjohnlev #endif
1615084Sjohnlev 
1627613SVikram.Hegde@Sun.COM #if !defined(__xpv)
16312683SJimmy.Vetayases@oracle.com char _depends_on[] = "misc/iommulib misc/acpica";
1647613SVikram.Hegde@Sun.COM #endif
165509Smrj 
166509Smrj static struct cb_ops rootnex_cb_ops = {
167509Smrj 	nodev,		/* open */
168509Smrj 	nodev,		/* close */
169509Smrj 	nodev,		/* strategy */
170509Smrj 	nodev,		/* print */
171509Smrj 	nodev,		/* dump */
172509Smrj 	nodev,		/* read */
173509Smrj 	nodev,		/* write */
174509Smrj 	nodev,		/* ioctl */
175509Smrj 	nodev,		/* devmap */
176509Smrj 	nodev,		/* mmap */
177509Smrj 	nodev,		/* segmap */
178509Smrj 	nochpoll,	/* chpoll */
179509Smrj 	ddi_prop_op,	/* cb_prop_op */
180509Smrj 	NULL,		/* struct streamtab */
181509Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
182509Smrj 	CB_REV,		/* Rev */
183509Smrj 	nodev,		/* cb_aread */
184509Smrj 	nodev		/* cb_awrite */
185509Smrj };
186509Smrj 
187509Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1880Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
189509Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1900Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1910Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
192509Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1930Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
194509Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
195509Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
196509Smrj     ddi_dma_handle_t *handlep);
197509Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
198509Smrj     ddi_dma_handle_t handle);
199509Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
200509Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
201509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
202509Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
203509Smrj     ddi_dma_handle_t handle);
204509Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
205509Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
206509Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
207509Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
208509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
209509Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2100Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2110Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
212509Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
213509Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
2141865Sdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
2151865Sdilpreet     ddi_iblock_cookie_t *ibc);
216509Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
217509Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
21812683SJimmy.Vetayases@oracle.com static int rootnex_alloc_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *,
21912683SJimmy.Vetayases@oracle.com     void *);
22012683SJimmy.Vetayases@oracle.com static int rootnex_free_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *);
221509Smrj 
2227613SVikram.Hegde@Sun.COM static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
2237613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
2247613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep);
2257613SVikram.Hegde@Sun.COM static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
2267613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2277613SVikram.Hegde@Sun.COM static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2287613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2297613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2307613SVikram.Hegde@Sun.COM static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2317613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
23211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
2337613SVikram.Hegde@Sun.COM static void rootnex_coredma_reset_cookies(dev_info_t *dip,
2347613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2357613SVikram.Hegde@Sun.COM static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2368215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t **cookiepp, uint_t *ccountp);
2378215SVikram.Hegde@Sun.COM static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2388215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t ccount);
2398215SVikram.Hegde@Sun.COM static int rootnex_coredma_clear_cookies(dev_info_t *dip,
2408215SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2418215SVikram.Hegde@Sun.COM static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle);
2427617SVikram.Hegde@Sun.COM #endif
2437613SVikram.Hegde@Sun.COM static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
2447613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
2457613SVikram.Hegde@Sun.COM static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
2467613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
2477613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2500Sstevel@tonic-gate 	BUSO_REV,
2510Sstevel@tonic-gate 	rootnex_map,
2520Sstevel@tonic-gate 	NULL,
2530Sstevel@tonic-gate 	NULL,
2540Sstevel@tonic-gate 	NULL,
2550Sstevel@tonic-gate 	rootnex_map_fault,
2560Sstevel@tonic-gate 	rootnex_dma_map,
2570Sstevel@tonic-gate 	rootnex_dma_allochdl,
2580Sstevel@tonic-gate 	rootnex_dma_freehdl,
2590Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2600Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
261509Smrj 	rootnex_dma_sync,
2620Sstevel@tonic-gate 	rootnex_dma_win,
2630Sstevel@tonic-gate 	rootnex_dma_mctl,
2640Sstevel@tonic-gate 	rootnex_ctlops,
2650Sstevel@tonic-gate 	ddi_bus_prop_op,
2660Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2670Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2680Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2690Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2700Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2710Sstevel@tonic-gate 	0,			/* bus_config */
2720Sstevel@tonic-gate 	0,			/* bus_unconfig */
2731865Sdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2740Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2750Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2760Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2770Sstevel@tonic-gate 	NULL,			/* bus_powr */
2780Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2790Sstevel@tonic-gate };
2800Sstevel@tonic-gate 
281509Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
282509Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
28311600SVikram.Hegde@Sun.COM static int rootnex_quiesce(dev_info_t *dip);
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2860Sstevel@tonic-gate 	DEVO_REV,
287509Smrj 	0,
288509Smrj 	ddi_no_info,
289509Smrj 	nulldev,
2900Sstevel@tonic-gate 	nulldev,
2910Sstevel@tonic-gate 	rootnex_attach,
292509Smrj 	rootnex_detach,
293509Smrj 	nulldev,
294509Smrj 	&rootnex_cb_ops,
2957656SSherry.Moore@Sun.COM 	&rootnex_bus_ops,
2967656SSherry.Moore@Sun.COM 	NULL,
29711600SVikram.Hegde@Sun.COM 	rootnex_quiesce,		/* quiesce */
2980Sstevel@tonic-gate };
2990Sstevel@tonic-gate 
300509Smrj static struct modldrv rootnex_modldrv = {
301509Smrj 	&mod_driverops,
3027542SRichard.Bean@Sun.COM 	"i86pc root nexus",
303509Smrj 	&rootnex_ops
304509Smrj };
305509Smrj 
306509Smrj static struct modlinkage rootnex_modlinkage = {
307509Smrj 	MODREV_1,
308509Smrj 	(void *)&rootnex_modldrv,
309509Smrj 	NULL
310509Smrj };
311509Smrj 
31211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
3137613SVikram.Hegde@Sun.COM static iommulib_nexops_t iommulib_nexops = {
3147613SVikram.Hegde@Sun.COM 	IOMMU_NEXOPS_VERSION,
3157613SVikram.Hegde@Sun.COM 	"Rootnex IOMMU ops Vers 1.1",
3167613SVikram.Hegde@Sun.COM 	NULL,
3177613SVikram.Hegde@Sun.COM 	rootnex_coredma_allochdl,
3187613SVikram.Hegde@Sun.COM 	rootnex_coredma_freehdl,
3197613SVikram.Hegde@Sun.COM 	rootnex_coredma_bindhdl,
3207613SVikram.Hegde@Sun.COM 	rootnex_coredma_unbindhdl,
3217613SVikram.Hegde@Sun.COM 	rootnex_coredma_reset_cookies,
3227613SVikram.Hegde@Sun.COM 	rootnex_coredma_get_cookies,
3238215SVikram.Hegde@Sun.COM 	rootnex_coredma_set_cookies,
3248215SVikram.Hegde@Sun.COM 	rootnex_coredma_clear_cookies,
3258215SVikram.Hegde@Sun.COM 	rootnex_coredma_get_sleep_flags,
3267613SVikram.Hegde@Sun.COM 	rootnex_coredma_sync,
3277613SVikram.Hegde@Sun.COM 	rootnex_coredma_win,
32810216SVikram.Hegde@Sun.COM 	rootnex_dma_map,
32910216SVikram.Hegde@Sun.COM 	rootnex_dma_mctl
3307613SVikram.Hegde@Sun.COM };
3317617SVikram.Hegde@Sun.COM #endif
332509Smrj 
333509Smrj /*
334509Smrj  *  extern hacks
335509Smrj  */
336509Smrj extern struct seg_ops segdev_ops;
337509Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
338509Smrj #ifdef	DDI_MAP_DEBUG
339509Smrj extern int ddi_map_debug_flag;
340509Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
341509Smrj #endif
342509Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
343509Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
344509Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
345509Smrj     psm_intr_op_t, int *);
346509Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
347509Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
3485251Smrj 
349509Smrj /*
350509Smrj  * Use device arena to use for device control register mappings.
351509Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
352509Smrj  * to avoid this address range to prevent undesired device activity.
353509Smrj  */
354509Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
355509Smrj extern void device_arena_free(void * vaddr, size_t size);
356509Smrj 
357509Smrj 
3580Sstevel@tonic-gate /*
359509Smrj  *  Internal functions
3600Sstevel@tonic-gate  */
361509Smrj static int rootnex_dma_init();
362509Smrj static void rootnex_add_props(dev_info_t *);
363509Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
364509Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
365509Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
366509Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
367509Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
368509Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
369509Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
370509Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
371509Smrj     ddi_dma_attr_t *attr);
372509Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
373509Smrj     rootnex_sglinfo_t *sglinfo);
374509Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
375509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
376509Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
377509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
378509Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
379509Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
380509Smrj     ddi_dma_attr_t *attr, int kmflag);
381509Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
382509Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
383509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
384509Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
385509Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
386509Smrj     size_t *copybuf_used, page_t **cur_pp);
387509Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
388509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
389509Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
390509Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
391509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
392509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
393509Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
394509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
395509Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
396509Smrj     off_t offset, size_t size, uint_t cache_flags);
397509Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
3981865Sdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
3991865Sdilpreet     const void *comp_addr, const void *not_used);
40011793SMark.Johnson@Sun.COM static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object,
40111793SMark.Johnson@Sun.COM     rootnex_sglinfo_t *sglinfo);
402509Smrj 
403509Smrj /*
404509Smrj  * _init()
405509Smrj  *
406509Smrj  */
4070Sstevel@tonic-gate int
4080Sstevel@tonic-gate _init(void)
4090Sstevel@tonic-gate {
410509Smrj 
411509Smrj 	rootnex_state = NULL;
412509Smrj 	return (mod_install(&rootnex_modlinkage));
4130Sstevel@tonic-gate }
4140Sstevel@tonic-gate 
415509Smrj 
416509Smrj /*
417509Smrj  * _info()
418509Smrj  *
419509Smrj  */
420509Smrj int
421509Smrj _info(struct modinfo *modinfop)
422509Smrj {
423509Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
424509Smrj }
425509Smrj 
426509Smrj 
427509Smrj /*
428509Smrj  * _fini()
429509Smrj  *
430509Smrj  */
4310Sstevel@tonic-gate int
4320Sstevel@tonic-gate _fini(void)
4330Sstevel@tonic-gate {
4340Sstevel@tonic-gate 	return (EBUSY);
4350Sstevel@tonic-gate }
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 
4380Sstevel@tonic-gate /*
439509Smrj  * rootnex_attach()
4400Sstevel@tonic-gate  *
4410Sstevel@tonic-gate  */
442509Smrj static int
443509Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
444509Smrj {
4451414Scindi 	int fmcap;
446509Smrj 	int e;
447509Smrj 
448509Smrj 	switch (cmd) {
449509Smrj 	case DDI_ATTACH:
450509Smrj 		break;
451509Smrj 	case DDI_RESUME:
45211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
45311600SVikram.Hegde@Sun.COM 		return (immu_unquiesce());
45411600SVikram.Hegde@Sun.COM #else
455509Smrj 		return (DDI_SUCCESS);
45611600SVikram.Hegde@Sun.COM #endif
457509Smrj 	default:
458509Smrj 		return (DDI_FAILURE);
459509Smrj 	}
460509Smrj 
461509Smrj 	/*
462509Smrj 	 * We should only have one instance of rootnex. Save it away since we
463509Smrj 	 * don't have an easy way to get it back later.
464509Smrj 	 */
465509Smrj 	ASSERT(rootnex_state == NULL);
466509Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
467509Smrj 
468509Smrj 	rootnex_state->r_dip = dip;
4691414Scindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
470509Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
471*12837Sfrank.van.der.linden@oracle.com #ifdef DEBUG
472509Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
473*12837Sfrank.van.der.linden@oracle.com #endif
474509Smrj 
4751414Scindi 	/*
4761414Scindi 	 * Set minimum fm capability level for i86pc platforms and then
4771414Scindi 	 * initialize error handling. Since we're the rootnex, we don't
4781414Scindi 	 * care what's returned in the fmcap field.
4791414Scindi 	 */
4801865Sdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
4811865Sdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4821414Scindi 	fmcap = ddi_system_fmcap;
4831414Scindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4841414Scindi 
485509Smrj 	/* initialize DMA related state */
486509Smrj 	e = rootnex_dma_init();
487509Smrj 	if (e != DDI_SUCCESS) {
488509Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
489509Smrj 		return (DDI_FAILURE);
490509Smrj 	}
491509Smrj 
492509Smrj 	/* Add static root node properties */
493509Smrj 	rootnex_add_props(dip);
494509Smrj 
495509Smrj 	/* since we can't call ddi_report_dev() */
496509Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
497509Smrj 
498509Smrj 	/* Initialize rootnex event handle */
499509Smrj 	i_ddi_rootnex_init_events(dip);
500509Smrj 
50111600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
5027613SVikram.Hegde@Sun.COM 	e = iommulib_nexus_register(dip, &iommulib_nexops,
5037613SVikram.Hegde@Sun.COM 	    &rootnex_state->r_iommulib_handle);
5047613SVikram.Hegde@Sun.COM 
5057613SVikram.Hegde@Sun.COM 	ASSERT(e == DDI_SUCCESS);
5067613SVikram.Hegde@Sun.COM #endif
5077613SVikram.Hegde@Sun.COM 
508509Smrj 	return (DDI_SUCCESS);
509509Smrj }
510509Smrj 
511509Smrj 
512509Smrj /*
513509Smrj  * rootnex_detach()
514509Smrj  *
515509Smrj  */
5160Sstevel@tonic-gate /*ARGSUSED*/
5170Sstevel@tonic-gate static int
518509Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
519509Smrj {
520509Smrj 	switch (cmd) {
521509Smrj 	case DDI_SUSPEND:
52211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
52311600SVikram.Hegde@Sun.COM 		return (immu_quiesce());
52411600SVikram.Hegde@Sun.COM #else
52511600SVikram.Hegde@Sun.COM 		return (DDI_SUCCESS);
52611600SVikram.Hegde@Sun.COM #endif
527509Smrj 	default:
528509Smrj 		return (DDI_FAILURE);
529509Smrj 	}
53011600SVikram.Hegde@Sun.COM 	/*NOTREACHED*/
53111600SVikram.Hegde@Sun.COM 
532509Smrj }
533509Smrj 
534509Smrj 
535509Smrj /*
536509Smrj  * rootnex_dma_init()
537509Smrj  *
538509Smrj  */
539509Smrj /*ARGSUSED*/
540509Smrj static int
541509Smrj rootnex_dma_init()
5420Sstevel@tonic-gate {
543509Smrj 	size_t bufsize;
544509Smrj 
545509Smrj 
546509Smrj 	/*
547509Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
548509Smrj 	 * pre-alloc in dma_alloc_handle
549509Smrj 	 */
550509Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
551509Smrj 	rootnex_state->r_prealloc_size =
552509Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
553509Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
554509Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
555509Smrj 
556509Smrj 	/*
557509Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
558509Smrj 	 * allocate 16 extra bytes for struct pointer alignment
559509Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
560509Smrj 	 */
561509Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
562509Smrj 	    rootnex_state->r_prealloc_size + 0x10;
563509Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
564509Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
565509Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
566509Smrj 		return (DDI_FAILURE);
567509Smrj 	}
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	/*
5700Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
5710Sstevel@tonic-gate 	 * for.
5720Sstevel@tonic-gate 	 */
5730Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
5740Sstevel@tonic-gate 	    KM_SLEEP);
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 	return (DDI_SUCCESS);
5770Sstevel@tonic-gate }
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate /*
581509Smrj  * rootnex_add_props()
582509Smrj  *
5830Sstevel@tonic-gate  */
5840Sstevel@tonic-gate static void
585509Smrj rootnex_add_props(dev_info_t *dip)
5860Sstevel@tonic-gate {
587509Smrj 	rootnex_intprop_t *rpp;
5880Sstevel@tonic-gate 	int i;
589509Smrj 
590509Smrj 	/* Add static integer/boolean properties to the root node */
591509Smrj 	rpp = rootnex_intprp;
592509Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
593509Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
594509Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
5950Sstevel@tonic-gate 	}
5960Sstevel@tonic-gate }
5970Sstevel@tonic-gate 
598509Smrj 
599509Smrj 
600509Smrj /*
601509Smrj  * *************************
602509Smrj  *  ctlops related routines
603509Smrj  * *************************
604509Smrj  */
605509Smrj 
6060Sstevel@tonic-gate /*
607509Smrj  * rootnex_ctlops()
608509Smrj  *
6090Sstevel@tonic-gate  */
610693Sgovinda /*ARGSUSED*/
611509Smrj static int
612509Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
613509Smrj     void *arg, void *result)
614509Smrj {
615509Smrj 	int n, *ptr;
616509Smrj 	struct ddi_parent_private_data *pdp;
617509Smrj 
618509Smrj 	switch (ctlop) {
619509Smrj 	case DDI_CTLOPS_DMAPMAPC:
620509Smrj 		/*
621509Smrj 		 * Return 'partial' to indicate that dma mapping
622509Smrj 		 * has to be done in the main MMU.
623509Smrj 		 */
624509Smrj 		return (DDI_DMA_PARTIAL);
625509Smrj 
626509Smrj 	case DDI_CTLOPS_BTOP:
627509Smrj 		/*
628509Smrj 		 * Convert byte count input to physical page units.
629509Smrj 		 * (byte counts that are not a page-size multiple
630509Smrj 		 * are rounded down)
631509Smrj 		 */
632509Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
633509Smrj 		return (DDI_SUCCESS);
634509Smrj 
635509Smrj 	case DDI_CTLOPS_PTOB:
636509Smrj 		/*
637509Smrj 		 * Convert size in physical pages to bytes
638509Smrj 		 */
639509Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
640509Smrj 		return (DDI_SUCCESS);
641509Smrj 
642509Smrj 	case DDI_CTLOPS_BTOPR:
643509Smrj 		/*
644509Smrj 		 * Convert byte count input to physical page units
645509Smrj 		 * (byte counts that are not a page-size multiple
646509Smrj 		 * are rounded up)
647509Smrj 		 */
648509Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
649509Smrj 		return (DDI_SUCCESS);
650509Smrj 
651509Smrj 	case DDI_CTLOPS_INITCHILD:
652509Smrj 		return (impl_ddi_sunbus_initchild(arg));
653509Smrj 
654509Smrj 	case DDI_CTLOPS_UNINITCHILD:
655509Smrj 		impl_ddi_sunbus_removechild(arg);
656509Smrj 		return (DDI_SUCCESS);
657509Smrj 
658509Smrj 	case DDI_CTLOPS_REPORTDEV:
659509Smrj 		return (rootnex_ctl_reportdev(rdip));
660509Smrj 
661509Smrj 	case DDI_CTLOPS_IOMIN:
662509Smrj 		/*
663509Smrj 		 * Nothing to do here but reflect back..
664509Smrj 		 */
665509Smrj 		return (DDI_SUCCESS);
666509Smrj 
667509Smrj 	case DDI_CTLOPS_REGSIZE:
668509Smrj 	case DDI_CTLOPS_NREGS:
669509Smrj 		break;
670509Smrj 
671509Smrj 	case DDI_CTLOPS_SIDDEV:
672509Smrj 		if (ndi_dev_is_prom_node(rdip))
673509Smrj 			return (DDI_SUCCESS);
674509Smrj 		if (ndi_dev_is_persistent_node(rdip))
675509Smrj 			return (DDI_SUCCESS);
676509Smrj 		return (DDI_FAILURE);
677509Smrj 
678509Smrj 	case DDI_CTLOPS_POWER:
679509Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
680509Smrj 
681693Sgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
682509Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
683509Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
684509Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
685693Sgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
686693Sgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
687509Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
688509Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
689509Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
690509Smrj 			    "1 or more reserved/obsolete operations.");
691509Smrj 		}
692509Smrj 		return (DDI_FAILURE);
693509Smrj 
694509Smrj 	default:
695509Smrj 		return (DDI_FAILURE);
696509Smrj 	}
697509Smrj 	/*
698509Smrj 	 * The rest are for "hardware" properties
699509Smrj 	 */
700509Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
701509Smrj 		return (DDI_FAILURE);
702509Smrj 
703509Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
704509Smrj 		ptr = (int *)result;
705509Smrj 		*ptr = pdp->par_nreg;
706509Smrj 	} else {
707509Smrj 		off_t *size = (off_t *)result;
708509Smrj 
709509Smrj 		ptr = (int *)arg;
710509Smrj 		n = *ptr;
711509Smrj 		if (n >= pdp->par_nreg) {
712509Smrj 			return (DDI_FAILURE);
713509Smrj 		}
714509Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
715509Smrj 	}
716509Smrj 	return (DDI_SUCCESS);
717509Smrj }
7180Sstevel@tonic-gate 
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate /*
721509Smrj  * rootnex_ctl_reportdev()
722509Smrj  *
7230Sstevel@tonic-gate  */
7240Sstevel@tonic-gate static int
725509Smrj rootnex_ctl_reportdev(dev_info_t *dev)
7260Sstevel@tonic-gate {
727509Smrj 	int i, n, len, f_len = 0;
728509Smrj 	char *buf;
729509Smrj 
730509Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
731509Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
732509Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
733509Smrj 	len = strlen(buf);
734509Smrj 
735509Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
736509Smrj 
737509Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
738509Smrj 
739509Smrj 		if (i == 0)
740509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
741509Smrj 			    ": ");
742509Smrj 		else
743509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
744509Smrj 			    " and ");
745509Smrj 		len = strlen(buf);
746509Smrj 
747509Smrj 		switch (rp->regspec_bustype) {
748509Smrj 
749509Smrj 		case BTEISA:
750509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
751509Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
7520Sstevel@tonic-gate 			break;
753509Smrj 
754509Smrj 		case BTISA:
755509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
756509Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
7570Sstevel@tonic-gate 			break;
758509Smrj 
759509Smrj 		default:
760509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
761509Smrj 			    "space %x offset %x",
762509Smrj 			    rp->regspec_bustype, rp->regspec_addr);
7630Sstevel@tonic-gate 			break;
7640Sstevel@tonic-gate 		}
765509Smrj 		len = strlen(buf);
7660Sstevel@tonic-gate 	}
767509Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
768509Smrj 		int pri;
769509Smrj 
770509Smrj 		if (i != 0) {
771509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
772509Smrj 			    ",");
773509Smrj 			len = strlen(buf);
774509Smrj 		}
775509Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
776509Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
777509Smrj 		    " sparc ipl %d", pri);
778509Smrj 		len = strlen(buf);
7790Sstevel@tonic-gate 	}
780509Smrj #ifdef DEBUG
781509Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
782509Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
783509Smrj 		    "printed length 1024, real length %d", f_len);
784509Smrj 	}
785509Smrj #endif /* DEBUG */
786509Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
787509Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
7880Sstevel@tonic-gate 	return (DDI_SUCCESS);
7890Sstevel@tonic-gate }
7900Sstevel@tonic-gate 
791509Smrj 
792509Smrj /*
793509Smrj  * ******************
794509Smrj  *  map related code
795509Smrj  * ******************
796509Smrj  */
797509Smrj 
798509Smrj /*
799509Smrj  * rootnex_map()
800509Smrj  *
801509Smrj  */
8020Sstevel@tonic-gate static int
803509Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
804509Smrj     off_t len, caddr_t *vaddrp)
8050Sstevel@tonic-gate {
8060Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
8070Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
8080Sstevel@tonic-gate 	int error;
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate 	mp = &mr;
8110Sstevel@tonic-gate 
8120Sstevel@tonic-gate 	switch (mp->map_op)  {
8130Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8140Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8150Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8160Sstevel@tonic-gate 		break;
8170Sstevel@tonic-gate 	default:
8180Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8190Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8200Sstevel@tonic-gate 		    mp->map_op);
8210Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8220Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8230Sstevel@tonic-gate 	}
8240Sstevel@tonic-gate 
8250Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8260Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8270Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8280Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8290Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8300Sstevel@tonic-gate 	}
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate 	/*
8330Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8340Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8350Sstevel@tonic-gate 	 */
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8400Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8410Sstevel@tonic-gate 		static char *out_of_range =
8420Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8430Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8440Sstevel@tonic-gate 
8450Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8460Sstevel@tonic-gate 		if (rp == NULL)  {
8470Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8480Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8490Sstevel@tonic-gate 			    ddi_get_name(rdip));
8500Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8510Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8520Sstevel@tonic-gate 		}
8530Sstevel@tonic-gate 
8540Sstevel@tonic-gate 		/*
8550Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8560Sstevel@tonic-gate 		 */
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8590Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8600Sstevel@tonic-gate 	}
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 	/*
8630Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8640Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8650Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8660Sstevel@tonic-gate 	 */
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8690Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8725084Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
8735084Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8745084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
8755084Sjohnlev 	    len, mp->map_handlep);
8760Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8770Sstevel@tonic-gate 
8780Sstevel@tonic-gate 	/*
8790Sstevel@tonic-gate 	 * I/O or memory mapping:
8800Sstevel@tonic-gate 	 *
8810Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8820Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8830Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8840Sstevel@tonic-gate 	 */
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8870Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8880Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8890Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8900Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8910Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8920Sstevel@tonic-gate 	}
8930Sstevel@tonic-gate 
8940Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8950Sstevel@tonic-gate 		/*
8960Sstevel@tonic-gate 		 * compatibility i/o mapping
8970Sstevel@tonic-gate 		 */
8980Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8990Sstevel@tonic-gate 	} else {
9000Sstevel@tonic-gate 		/*
9010Sstevel@tonic-gate 		 * Normal memory or i/o mapping
9020Sstevel@tonic-gate 		 */
9030Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
9040Sstevel@tonic-gate 	}
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate 	if (len != 0)
9070Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
9080Sstevel@tonic-gate 
9090Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9105084Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
9115084Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
9125084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
9135084Sjohnlev 	    offset, len, mp->map_handlep);
9140Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	/*
9170Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9180Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9190Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9200Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9210Sstevel@tonic-gate 	 */
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9240Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9250Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9260Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9290Sstevel@tonic-gate 		return (error);
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	switch (mp->map_op)  {
9320Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate 		/*
9350Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9360Sstevel@tonic-gate 		 */
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9390Sstevel@tonic-gate 
9400Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate 		/*
9430Sstevel@tonic-gate 		 * Release mapping...
9440Sstevel@tonic-gate 		 */
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate 	default:
9530Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9540Sstevel@tonic-gate 	}
9550Sstevel@tonic-gate }
9560Sstevel@tonic-gate 
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate /*
959509Smrj  * rootnex_map_fault()
9600Sstevel@tonic-gate  *
9610Sstevel@tonic-gate  *	fault in mappings for requestors
9620Sstevel@tonic-gate  */
9630Sstevel@tonic-gate /*ARGSUSED*/
9640Sstevel@tonic-gate static int
965509Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
966509Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
967509Smrj     uint_t lock)
9680Sstevel@tonic-gate {
9690Sstevel@tonic-gate 
9700Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9710Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
9720Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
9730Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
9740Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
9750Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9760Sstevel@tonic-gate 
9770Sstevel@tonic-gate 	/*
9780Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
9790Sstevel@tonic-gate 	 *
9800Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9810Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9820Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9830Sstevel@tonic-gate 	 */
9840Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
9855084Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9860Sstevel@tonic-gate 
9870Sstevel@tonic-gate 		if (hat == NULL) {
9880Sstevel@tonic-gate 			/*
9890Sstevel@tonic-gate 			 * This is one plausible interpretation of
9900Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9910Sstevel@tonic-gate 			 * address space hat list which by convention is
9920Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9930Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9940Sstevel@tonic-gate 			 */
9950Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9960Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9970Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9980Sstevel@tonic-gate 		}
9990Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
10000Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
10010Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
10020Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
10030Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
10040Sstevel@tonic-gate 	} else
10050Sstevel@tonic-gate 		return (DDI_FAILURE);
10060Sstevel@tonic-gate 	return (DDI_SUCCESS);
10070Sstevel@tonic-gate }
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate 
10100Sstevel@tonic-gate /*
1011509Smrj  * rootnex_map_regspec()
1012509Smrj  *     we don't support mapping of I/O cards above 4Gb
10130Sstevel@tonic-gate  */
1014509Smrj static int
1015509Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1016509Smrj {
10175084Sjohnlev 	rootnex_addr_t rbase;
1018509Smrj 	void *cvaddr;
1019509Smrj 	uint_t npages, pgoffset;
1020509Smrj 	struct regspec *rp;
1021509Smrj 	ddi_acc_hdl_t *hp;
1022509Smrj 	ddi_acc_impl_t *ap;
1023509Smrj 	uint_t	hat_acc_flags;
10245084Sjohnlev 	paddr_t pbase;
1025509Smrj 
1026509Smrj 	rp = mp->map_obj.rp;
1027509Smrj 	hp = mp->map_handlep;
1028509Smrj 
1029509Smrj #ifdef	DDI_MAP_DEBUG
1030509Smrj 	ddi_map_debug(
1031509Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1032509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1033509Smrj 	    rp->regspec_size, mp->map_handlep);
1034509Smrj #endif	/* DDI_MAP_DEBUG */
1035509Smrj 
1036509Smrj 	/*
1037509Smrj 	 * I/O or memory mapping
1038509Smrj 	 *
1039509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1040509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1041509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1042509Smrj 	 */
1043509Smrj 
1044509Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1045509Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1046509Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1047509Smrj 		    rp->regspec_addr, rp->regspec_size);
1048509Smrj 		return (DDI_FAILURE);
1049509Smrj 	}
1050509Smrj 
1051509Smrj 	if (rp->regspec_bustype != 0) {
1052509Smrj 		/*
1053509Smrj 		 * I/O space - needs a handle.
1054509Smrj 		 */
1055509Smrj 		if (hp == NULL) {
1056509Smrj 			return (DDI_FAILURE);
1057509Smrj 		}
1058509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1059509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1060509Smrj 		impl_acc_hdl_init(hp);
1061509Smrj 
1062509Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1063509Smrj #ifdef  DDI_MAP_DEBUG
10645084Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
10655084Sjohnlev 			    "to I/O space is not supported.\n");
1066509Smrj #endif  /* DDI_MAP_DEBUG */
1067509Smrj 			return (DDI_ME_INVAL);
1068509Smrj 		} else {
1069509Smrj 			/*
1070509Smrj 			 * 1275-compliant vs. compatibility i/o mapping
1071509Smrj 			 */
1072509Smrj 			*vaddrp =
1073509Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
10745084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
10755084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
10765084Sjohnlev #ifdef __xpv
10775084Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
10785084Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
10795084Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
10805084Sjohnlev 				    MMU_PAGEMASK));
10815084Sjohnlev 			} else {
10825084Sjohnlev 				hp->ah_pfn = mmu_btop(
10835084Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
10845084Sjohnlev 			}
10855084Sjohnlev #else
10861865Sdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
10875084Sjohnlev 			    MMU_PAGEMASK);
10885084Sjohnlev #endif
10891865Sdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
10901865Sdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1091509Smrj 		}
1092509Smrj 
1093509Smrj #ifdef	DDI_MAP_DEBUG
1094509Smrj 		ddi_map_debug(
1095509Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1096509Smrj 		    rp->regspec_size, *vaddrp);
1097509Smrj #endif	/* DDI_MAP_DEBUG */
1098509Smrj 		return (DDI_SUCCESS);
1099509Smrj 	}
1100509Smrj 
1101509Smrj 	/*
1102509Smrj 	 * Memory space
1103509Smrj 	 */
1104509Smrj 
1105509Smrj 	if (hp != NULL) {
1106509Smrj 		/*
1107509Smrj 		 * hat layer ignores
1108509Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
1109509Smrj 		 */
1110509Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
1111509Smrj 		case DDI_STRICTORDER_ACC:
1112509Smrj 			hat_acc_flags = HAT_STRICTORDER;
1113509Smrj 			break;
1114509Smrj 		case DDI_UNORDERED_OK_ACC:
1115509Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
1116509Smrj 			break;
1117509Smrj 		case DDI_MERGING_OK_ACC:
1118509Smrj 			hat_acc_flags = HAT_MERGING_OK;
1119509Smrj 			break;
1120509Smrj 		case DDI_LOADCACHING_OK_ACC:
1121509Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
1122509Smrj 			break;
1123509Smrj 		case DDI_STORECACHING_OK_ACC:
1124509Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
1125509Smrj 			break;
1126509Smrj 		}
1127509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1128509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1129509Smrj 		impl_acc_hdl_init(hp);
1130509Smrj 		hp->ah_hat_flags = hat_acc_flags;
1131509Smrj 	} else {
1132509Smrj 		hat_acc_flags = HAT_STRICTORDER;
1133509Smrj 	}
1134509Smrj 
11355084Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
11365084Sjohnlev #ifdef __xpv
11375084Sjohnlev 	/*
11385084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
11395084Sjohnlev 	 * the MA to a PA.
11405084Sjohnlev 	 */
11415084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
11425084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
11435084Sjohnlev 	} else {
11445084Sjohnlev 		pbase = rbase;
11455084Sjohnlev 	}
11465084Sjohnlev #else
11475084Sjohnlev 	pbase = rbase;
11485084Sjohnlev #endif
11495084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1150509Smrj 
1151509Smrj 	if (rp->regspec_size == 0) {
1152509Smrj #ifdef  DDI_MAP_DEBUG
1153509Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1154509Smrj #endif  /* DDI_MAP_DEBUG */
1155509Smrj 		return (DDI_ME_INVAL);
1156509Smrj 	}
1157509Smrj 
1158509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
11595084Sjohnlev 		/* extra cast to make gcc happy */
11605084Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1161509Smrj 	} else {
1162509Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1163509Smrj 
1164509Smrj #ifdef	DDI_MAP_DEBUG
11655084Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
11665084Sjohnlev 		    "physical %llx", npages, pbase);
1167509Smrj #endif	/* DDI_MAP_DEBUG */
1168509Smrj 
1169509Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1170509Smrj 		if (cvaddr == NULL)
1171509Smrj 			return (DDI_ME_NORESOURCES);
1172509Smrj 
1173509Smrj 		/*
1174509Smrj 		 * Now map in the pages we've allocated...
1175509Smrj 		 */
11765084Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
11775084Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
11785084Sjohnlev 		    HAT_LOAD_LOCK);
1179509Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
11801865Sdilpreet 
11811865Sdilpreet 		/* save away pfn and npages for FMA */
11821865Sdilpreet 		hp = mp->map_handlep;
11831865Sdilpreet 		if (hp) {
11845084Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
11851865Sdilpreet 			hp->ah_pnum = npages;
11861865Sdilpreet 		}
1187509Smrj 	}
1188509Smrj 
1189509Smrj #ifdef	DDI_MAP_DEBUG
1190509Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1191509Smrj #endif	/* DDI_MAP_DEBUG */
1192509Smrj 	return (DDI_SUCCESS);
1193509Smrj }
1194509Smrj 
11950Sstevel@tonic-gate 
11960Sstevel@tonic-gate /*
1197509Smrj  * rootnex_unmap_regspec()
1198509Smrj  *
1199509Smrj  */
1200509Smrj static int
1201509Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1202509Smrj {
1203509Smrj 	caddr_t addr = (caddr_t)*vaddrp;
1204509Smrj 	uint_t npages, pgoffset;
1205509Smrj 	struct regspec *rp;
1206509Smrj 
1207509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1208509Smrj 		return (0);
1209509Smrj 
1210509Smrj 	rp = mp->map_obj.rp;
1211509Smrj 
1212509Smrj 	if (rp->regspec_size == 0) {
1213509Smrj #ifdef  DDI_MAP_DEBUG
1214509Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1215509Smrj #endif  /* DDI_MAP_DEBUG */
1216509Smrj 		return (DDI_ME_INVAL);
1217509Smrj 	}
1218509Smrj 
1219509Smrj 	/*
1220509Smrj 	 * I/O or memory mapping:
1221509Smrj 	 *
1222509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1223509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1224509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1225509Smrj 	 */
1226509Smrj 	if (rp->regspec_bustype != 0) {
1227509Smrj 		/*
1228509Smrj 		 * This is I/O space, which requires no particular
1229509Smrj 		 * processing on unmap since it isn't mapped in the
1230509Smrj 		 * first place.
1231509Smrj 		 */
1232509Smrj 		return (DDI_SUCCESS);
1233509Smrj 	}
1234509Smrj 
1235509Smrj 	/*
1236509Smrj 	 * Memory space
1237509Smrj 	 */
1238509Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1239509Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1240509Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1241509Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
1242509Smrj 
1243509Smrj 	/*
1244509Smrj 	 * Destroy the pointer - the mapping has logically gone
1245509Smrj 	 */
1246509Smrj 	*vaddrp = NULL;
1247509Smrj 
1248509Smrj 	return (DDI_SUCCESS);
1249509Smrj }
1250509Smrj 
1251509Smrj 
1252509Smrj /*
1253509Smrj  * rootnex_map_handle()
1254509Smrj  *
12550Sstevel@tonic-gate  */
1256509Smrj static int
1257509Smrj rootnex_map_handle(ddi_map_req_t *mp)
1258509Smrj {
12595084Sjohnlev 	rootnex_addr_t rbase;
1260509Smrj 	ddi_acc_hdl_t *hp;
1261509Smrj 	uint_t pgoffset;
1262509Smrj 	struct regspec *rp;
12635084Sjohnlev 	paddr_t pbase;
1264509Smrj 
1265509Smrj 	rp = mp->map_obj.rp;
1266509Smrj 
1267509Smrj #ifdef	DDI_MAP_DEBUG
1268509Smrj 	ddi_map_debug(
1269509Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1270509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1271509Smrj 	    rp->regspec_size, mp->map_handlep);
1272509Smrj #endif	/* DDI_MAP_DEBUG */
1273509Smrj 
1274509Smrj 	/*
1275509Smrj 	 * I/O or memory mapping:
1276509Smrj 	 *
1277509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1278509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1279509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1280509Smrj 	 */
1281509Smrj 	if (rp->regspec_bustype != 0) {
1282509Smrj 		/*
1283509Smrj 		 * This refers to I/O space, and we don't support "mapping"
1284509Smrj 		 * I/O space to a user.
1285509Smrj 		 */
1286509Smrj 		return (DDI_FAILURE);
1287509Smrj 	}
1288509Smrj 
1289509Smrj 	/*
1290509Smrj 	 * Set up the hat_flags for the mapping.
1291509Smrj 	 */
1292509Smrj 	hp = mp->map_handlep;
1293509Smrj 
1294509Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1295509Smrj 	case DDI_NEVERSWAP_ACC:
1296509Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1297509Smrj 		break;
1298509Smrj 	case DDI_STRUCTURE_LE_ACC:
1299509Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1300509Smrj 		break;
1301509Smrj 	case DDI_STRUCTURE_BE_ACC:
1302509Smrj 		return (DDI_FAILURE);
1303509Smrj 	default:
1304509Smrj 		return (DDI_REGS_ACC_CONFLICT);
1305509Smrj 	}
1306509Smrj 
1307509Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
1308509Smrj 	case DDI_STRICTORDER_ACC:
1309509Smrj 		break;
1310509Smrj 	case DDI_UNORDERED_OK_ACC:
1311509Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1312509Smrj 		break;
1313509Smrj 	case DDI_MERGING_OK_ACC:
1314509Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
1315509Smrj 		break;
1316509Smrj 	case DDI_LOADCACHING_OK_ACC:
1317509Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1318509Smrj 		break;
1319509Smrj 	case DDI_STORECACHING_OK_ACC:
1320509Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1321509Smrj 		break;
1322509Smrj 	default:
1323509Smrj 		return (DDI_FAILURE);
1324509Smrj 	}
1325509Smrj 
13265084Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
13275084Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
13285084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1329509Smrj 
1330509Smrj 	if (rp->regspec_size == 0)
1331509Smrj 		return (DDI_ME_INVAL);
1332509Smrj 
13335084Sjohnlev #ifdef __xpv
13345084Sjohnlev 	/*
13355084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
13365084Sjohnlev 	 * the MA to a PA.
13375084Sjohnlev 	 */
13385084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
13395084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
13405084Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
13415084Sjohnlev 	} else {
13425084Sjohnlev 		pbase = rbase;
13435084Sjohnlev 	}
13445084Sjohnlev #else
13455084Sjohnlev 	pbase = rbase;
13465084Sjohnlev #endif
13475084Sjohnlev 
13485084Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
1349509Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1350509Smrj 
1351509Smrj 	return (DDI_SUCCESS);
1352509Smrj }
13530Sstevel@tonic-gate 
13540Sstevel@tonic-gate 
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate /*
1357509Smrj  * ************************
1358509Smrj  *  interrupt related code
1359509Smrj  * ************************
13600Sstevel@tonic-gate  */
13610Sstevel@tonic-gate 
13620Sstevel@tonic-gate /*
1363509Smrj  * rootnex_intr_ops()
13640Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13650Sstevel@tonic-gate  */
13660Sstevel@tonic-gate /* ARGSUSED */
13670Sstevel@tonic-gate static int
13680Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13690Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13700Sstevel@tonic-gate {
13710Sstevel@tonic-gate 	struct intrspec			*ispec;
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13740Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13750Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate 	/* Process the interrupt operation */
13780Sstevel@tonic-gate 	switch (intr_op) {
13790Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13800Sstevel@tonic-gate 		/* First check with pcplusmp */
13810Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13820Sstevel@tonic-gate 			return (DDI_FAILURE);
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13850Sstevel@tonic-gate 			*(int *)result = 0;
13860Sstevel@tonic-gate 			return (DDI_FAILURE);
13870Sstevel@tonic-gate 		}
13880Sstevel@tonic-gate 		break;
13890Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13900Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13910Sstevel@tonic-gate 			return (DDI_FAILURE);
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13940Sstevel@tonic-gate 			return (DDI_FAILURE);
13950Sstevel@tonic-gate 		break;
13960Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
139712683SJimmy.Vetayases@oracle.com 		ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
139812683SJimmy.Vetayases@oracle.com 		return (rootnex_alloc_intr_fixed(rdip, hdlp, result));
13990Sstevel@tonic-gate 	case DDI_INTROP_FREE:
140012683SJimmy.Vetayases@oracle.com 		ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
140112683SJimmy.Vetayases@oracle.com 		return (rootnex_free_intr_fixed(rdip, hdlp));
14020Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14030Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14040Sstevel@tonic-gate 			return (DDI_FAILURE);
14050Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14060Sstevel@tonic-gate 		break;
14070Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14080Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14090Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14100Sstevel@tonic-gate 			return (DDI_FAILURE);
14110Sstevel@tonic-gate 
14120Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14130Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14140Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14150Sstevel@tonic-gate 			return (DDI_FAILURE);
14160Sstevel@tonic-gate 
14170Sstevel@tonic-gate 		/* Change the priority */
14180Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14190Sstevel@tonic-gate 		    PSM_FAILURE)
14200Sstevel@tonic-gate 			return (DDI_FAILURE);
14210Sstevel@tonic-gate 
14220Sstevel@tonic-gate 		/* update the ispec with the new priority */
14230Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14240Sstevel@tonic-gate 		break;
14250Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14260Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14270Sstevel@tonic-gate 			return (DDI_FAILURE);
14280Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14290Sstevel@tonic-gate 		break;
14300Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14310Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14320Sstevel@tonic-gate 			return (DDI_FAILURE);
14330Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14340Sstevel@tonic-gate 		break;
14350Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14360Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14370Sstevel@tonic-gate 			return (DDI_FAILURE);
14380Sstevel@tonic-gate 
14390Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14400Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14410Sstevel@tonic-gate 			return (DDI_FAILURE);
14420Sstevel@tonic-gate 
1443916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
144411465SKerry.Shu@Sun.COM 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
144511465SKerry.Shu@Sun.COM 		    (int *)&hdlp->ih_vector) == PSM_FAILURE)
144611465SKerry.Shu@Sun.COM 			return (DDI_FAILURE);
14470Sstevel@tonic-gate 
14480Sstevel@tonic-gate 		/* Add the interrupt handler */
14490Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14500Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1451916Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
14520Sstevel@tonic-gate 			return (DDI_FAILURE);
14530Sstevel@tonic-gate 		break;
14540Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14550Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14560Sstevel@tonic-gate 			return (DDI_FAILURE);
14570Sstevel@tonic-gate 
14580Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14590Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14600Sstevel@tonic-gate 			return (DDI_FAILURE);
14610Sstevel@tonic-gate 
1462916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14630Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14640Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14650Sstevel@tonic-gate 
14660Sstevel@tonic-gate 		/* Remove the interrupt handler */
14670Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14680Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14690Sstevel@tonic-gate 		break;
14700Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14710Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14720Sstevel@tonic-gate 			return (DDI_FAILURE);
14730Sstevel@tonic-gate 
14740Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14750Sstevel@tonic-gate 			return (DDI_FAILURE);
14760Sstevel@tonic-gate 		break;
14770Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14780Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14790Sstevel@tonic-gate 			return (DDI_FAILURE);
14800Sstevel@tonic-gate 
14810Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14820Sstevel@tonic-gate 			return (DDI_FAILURE);
14830Sstevel@tonic-gate 		break;
14840Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14850Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14860Sstevel@tonic-gate 			return (DDI_FAILURE);
14870Sstevel@tonic-gate 
14880Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
14890Sstevel@tonic-gate 		    result)) {
14900Sstevel@tonic-gate 			*(int *)result = 0;
14910Sstevel@tonic-gate 			return (DDI_FAILURE);
14920Sstevel@tonic-gate 		}
14930Sstevel@tonic-gate 		break;
14942580Sanish 	case DDI_INTROP_NAVAIL:
14950Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
14962580Sanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
14972580Sanish 		if (*(int *)result == 0) {
14980Sstevel@tonic-gate 			/*
14990Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15000Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15010Sstevel@tonic-gate 			 *
15020Sstevel@tonic-gate 			 * See detailed comments on this in the function
15030Sstevel@tonic-gate 			 * rootnex_get_ispec().
15040Sstevel@tonic-gate 			 *
15050Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15060Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15070Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15080Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15090Sstevel@tonic-gate 			 */
15100Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15110Sstevel@tonic-gate 				*(int *)result = 1;
15122580Sanish 			else
15132580Sanish 				return (DDI_FAILURE);
15140Sstevel@tonic-gate 		}
15150Sstevel@tonic-gate 		break;
15160Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
15172580Sanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
15180Sstevel@tonic-gate 		break;
15190Sstevel@tonic-gate 	default:
15200Sstevel@tonic-gate 		return (DDI_FAILURE);
15210Sstevel@tonic-gate 	}
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate 	return (DDI_SUCCESS);
15240Sstevel@tonic-gate }
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate 
15270Sstevel@tonic-gate /*
1528509Smrj  * rootnex_get_ispec()
1529509Smrj  *	convert an interrupt number to an interrupt specification.
1530509Smrj  *	The interrupt number determines which interrupt spec will be
1531509Smrj  *	returned if more than one exists.
1532509Smrj  *
1533509Smrj  *	Look into the parent private data area of the 'rdip' to find out
1534509Smrj  *	the interrupt specification.  First check to make sure there is
1535509Smrj  *	one that matchs "inumber" and then return a pointer to it.
1536509Smrj  *
1537509Smrj  *	Return NULL if one could not be found.
1538509Smrj  *
1539509Smrj  *	NOTE: This is needed for rootnex_intr_ops()
1540509Smrj  */
1541509Smrj static struct intrspec *
1542509Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
1543509Smrj {
1544509Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1545509Smrj 
1546509Smrj 	/*
1547509Smrj 	 * Special case handling for drivers that provide their own
1548509Smrj 	 * intrspec structures instead of relying on the DDI framework.
1549509Smrj 	 *
1550509Smrj 	 * A broken hardware driver in ON could potentially provide its
1551509Smrj 	 * own intrspec structure, instead of relying on the hardware.
1552509Smrj 	 * If these drivers are children of 'rootnex' then we need to
1553509Smrj 	 * continue to provide backward compatibility to them here.
1554509Smrj 	 *
1555509Smrj 	 * Following check is a special case for 'pcic' driver which
1556509Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
1557509Smrj 	 *
1558509Smrj 	 * Verbatim comments from this driver are shown here:
1559509Smrj 	 * "Don't use the ddi_add_intr since we don't have a
1560509Smrj 	 * default intrspec in all cases."
1561509Smrj 	 *
1562509Smrj 	 * Since an 'ispec' may not be always created for it,
1563509Smrj 	 * check for that and create one if so.
1564509Smrj 	 *
1565509Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1566509Smrj 	 */
1567509Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1568509Smrj 		pdp->par_nintr = 1;
1569509Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1570509Smrj 		    pdp->par_nintr, KM_SLEEP);
1571509Smrj 	}
1572509Smrj 
1573509Smrj 	/* Validate the interrupt number */
1574509Smrj 	if (inum >= pdp->par_nintr)
1575509Smrj 		return (NULL);
1576509Smrj 
1577509Smrj 	/* Get the interrupt structure pointer and return that */
1578509Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
1579509Smrj }
1580509Smrj 
158112683SJimmy.Vetayases@oracle.com /*
158212683SJimmy.Vetayases@oracle.com  * Allocate interrupt vector for FIXED (legacy) type.
158312683SJimmy.Vetayases@oracle.com  */
158412683SJimmy.Vetayases@oracle.com static int
158512683SJimmy.Vetayases@oracle.com rootnex_alloc_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp,
158612683SJimmy.Vetayases@oracle.com     void *result)
158712683SJimmy.Vetayases@oracle.com {
158812683SJimmy.Vetayases@oracle.com 	struct intrspec		*ispec;
158912683SJimmy.Vetayases@oracle.com 	ddi_intr_handle_impl_t	info_hdl;
159012683SJimmy.Vetayases@oracle.com 	int			ret;
159112683SJimmy.Vetayases@oracle.com 	int			free_phdl = 0;
159212683SJimmy.Vetayases@oracle.com 	apic_get_type_t		type_info;
159312683SJimmy.Vetayases@oracle.com 
159412683SJimmy.Vetayases@oracle.com 	if (psm_intr_ops == NULL)
159512683SJimmy.Vetayases@oracle.com 		return (DDI_FAILURE);
159612683SJimmy.Vetayases@oracle.com 
159712683SJimmy.Vetayases@oracle.com 	if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
159812683SJimmy.Vetayases@oracle.com 		return (DDI_FAILURE);
159912683SJimmy.Vetayases@oracle.com 
160012683SJimmy.Vetayases@oracle.com 	/*
160112683SJimmy.Vetayases@oracle.com 	 * If the PSM module is "APIX" then pass the request for it
160212683SJimmy.Vetayases@oracle.com 	 * to allocate the vector now.
160312683SJimmy.Vetayases@oracle.com 	 */
160412683SJimmy.Vetayases@oracle.com 	bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
160512683SJimmy.Vetayases@oracle.com 	info_hdl.ih_private = &type_info;
160612683SJimmy.Vetayases@oracle.com 	if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
160712683SJimmy.Vetayases@oracle.com 	    PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
160812683SJimmy.Vetayases@oracle.com 		if (hdlp->ih_private == NULL) { /* allocate phdl structure */
160912683SJimmy.Vetayases@oracle.com 			free_phdl = 1;
161012683SJimmy.Vetayases@oracle.com 			i_ddi_alloc_intr_phdl(hdlp);
161112683SJimmy.Vetayases@oracle.com 		}
161212683SJimmy.Vetayases@oracle.com 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
161312683SJimmy.Vetayases@oracle.com 		ret = (*psm_intr_ops)(rdip, hdlp,
161412683SJimmy.Vetayases@oracle.com 		    PSM_INTR_OP_ALLOC_VECTORS, result);
161512683SJimmy.Vetayases@oracle.com 		if (free_phdl) { /* free up the phdl structure */
161612683SJimmy.Vetayases@oracle.com 			free_phdl = 0;
161712683SJimmy.Vetayases@oracle.com 			i_ddi_free_intr_phdl(hdlp);
161812683SJimmy.Vetayases@oracle.com 			hdlp->ih_private = NULL;
161912683SJimmy.Vetayases@oracle.com 		}
162012683SJimmy.Vetayases@oracle.com 	} else {
162112683SJimmy.Vetayases@oracle.com 		/*
162212683SJimmy.Vetayases@oracle.com 		 * No APIX module; fall back to the old scheme where the
162312683SJimmy.Vetayases@oracle.com 		 * interrupt vector is allocated during ddi_enable_intr() call.
162412683SJimmy.Vetayases@oracle.com 		 */
162512683SJimmy.Vetayases@oracle.com 		hdlp->ih_pri = ispec->intrspec_pri;
162612683SJimmy.Vetayases@oracle.com 		*(int *)result = hdlp->ih_scratch1;
162712683SJimmy.Vetayases@oracle.com 		ret = DDI_SUCCESS;
162812683SJimmy.Vetayases@oracle.com 	}
162912683SJimmy.Vetayases@oracle.com 
163012683SJimmy.Vetayases@oracle.com 	return (ret);
163112683SJimmy.Vetayases@oracle.com }
163212683SJimmy.Vetayases@oracle.com 
163312683SJimmy.Vetayases@oracle.com /*
163412683SJimmy.Vetayases@oracle.com  * Free up interrupt vector for FIXED (legacy) type.
163512683SJimmy.Vetayases@oracle.com  */
163612683SJimmy.Vetayases@oracle.com static int
163712683SJimmy.Vetayases@oracle.com rootnex_free_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
163812683SJimmy.Vetayases@oracle.com {
163912683SJimmy.Vetayases@oracle.com 	struct intrspec			*ispec;
164012683SJimmy.Vetayases@oracle.com 	struct ddi_parent_private_data	*pdp;
164112683SJimmy.Vetayases@oracle.com 	ddi_intr_handle_impl_t		info_hdl;
164212683SJimmy.Vetayases@oracle.com 	int				ret;
164312683SJimmy.Vetayases@oracle.com 	apic_get_type_t			type_info;
164412683SJimmy.Vetayases@oracle.com 
164512683SJimmy.Vetayases@oracle.com 	if (psm_intr_ops == NULL)
164612683SJimmy.Vetayases@oracle.com 		return (DDI_FAILURE);
164712683SJimmy.Vetayases@oracle.com 
164812683SJimmy.Vetayases@oracle.com 	/*
164912683SJimmy.Vetayases@oracle.com 	 * If the PSM module is "APIX" then pass the request for it
165012683SJimmy.Vetayases@oracle.com 	 * to free up the vector now.
165112683SJimmy.Vetayases@oracle.com 	 */
165212683SJimmy.Vetayases@oracle.com 	bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
165312683SJimmy.Vetayases@oracle.com 	info_hdl.ih_private = &type_info;
165412683SJimmy.Vetayases@oracle.com 	if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
165512683SJimmy.Vetayases@oracle.com 	    PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
165612683SJimmy.Vetayases@oracle.com 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
165712683SJimmy.Vetayases@oracle.com 			return (DDI_FAILURE);
165812683SJimmy.Vetayases@oracle.com 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
165912683SJimmy.Vetayases@oracle.com 		ret = (*psm_intr_ops)(rdip, hdlp,
166012683SJimmy.Vetayases@oracle.com 		    PSM_INTR_OP_FREE_VECTORS, NULL);
166112683SJimmy.Vetayases@oracle.com 	} else {
166212683SJimmy.Vetayases@oracle.com 		/*
166312683SJimmy.Vetayases@oracle.com 		 * No APIX module; fall back to the old scheme where
166412683SJimmy.Vetayases@oracle.com 		 * the interrupt vector was already freed during
166512683SJimmy.Vetayases@oracle.com 		 * ddi_disable_intr() call.
166612683SJimmy.Vetayases@oracle.com 		 */
166712683SJimmy.Vetayases@oracle.com 		ret = DDI_SUCCESS;
166812683SJimmy.Vetayases@oracle.com 	}
166912683SJimmy.Vetayases@oracle.com 
167012683SJimmy.Vetayases@oracle.com 	pdp = ddi_get_parent_data(rdip);
167112683SJimmy.Vetayases@oracle.com 
167212683SJimmy.Vetayases@oracle.com 	/*
167312683SJimmy.Vetayases@oracle.com 	 * Special case for 'pcic' driver' only.
167412683SJimmy.Vetayases@oracle.com 	 * If an intrspec was created for it, clean it up here
167512683SJimmy.Vetayases@oracle.com 	 * See detailed comments on this in the function
167612683SJimmy.Vetayases@oracle.com 	 * rootnex_get_ispec().
167712683SJimmy.Vetayases@oracle.com 	 */
167812683SJimmy.Vetayases@oracle.com 	if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
167912683SJimmy.Vetayases@oracle.com 		kmem_free(pdp->par_intr, sizeof (struct intrspec) *
168012683SJimmy.Vetayases@oracle.com 		    pdp->par_nintr);
168112683SJimmy.Vetayases@oracle.com 		/*
168212683SJimmy.Vetayases@oracle.com 		 * Set it to zero; so that
168312683SJimmy.Vetayases@oracle.com 		 * DDI framework doesn't free it again
168412683SJimmy.Vetayases@oracle.com 		 */
168512683SJimmy.Vetayases@oracle.com 		pdp->par_intr = NULL;
168612683SJimmy.Vetayases@oracle.com 		pdp->par_nintr = 0;
168712683SJimmy.Vetayases@oracle.com 	}
168812683SJimmy.Vetayases@oracle.com 
168912683SJimmy.Vetayases@oracle.com 	return (ret);
169012683SJimmy.Vetayases@oracle.com }
169112683SJimmy.Vetayases@oracle.com 
1692509Smrj 
1693509Smrj /*
1694509Smrj  * ******************
1695509Smrj  *  dma related code
1696509Smrj  * ******************
1697509Smrj  */
1698509Smrj 
1699509Smrj /*ARGSUSED*/
1700509Smrj static int
17017613SVikram.Hegde@Sun.COM rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
17027613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
17037613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep)
1704509Smrj {
1705509Smrj 	uint64_t maxsegmentsize_ll;
1706509Smrj 	uint_t maxsegmentsize;
1707509Smrj 	ddi_dma_impl_t *hp;
1708509Smrj 	rootnex_dma_t *dma;
1709509Smrj 	uint64_t count_max;
1710509Smrj 	uint64_t seg;
1711509Smrj 	int kmflag;
1712509Smrj 	int e;
1713509Smrj 
1714509Smrj 
1715509Smrj 	/* convert our sleep flags */
1716509Smrj 	if (waitfp == DDI_DMA_SLEEP) {
1717509Smrj 		kmflag = KM_SLEEP;
1718509Smrj 	} else {
1719509Smrj 		kmflag = KM_NOSLEEP;
1720509Smrj 	}
1721509Smrj 
1722509Smrj 	/*
1723509Smrj 	 * We try to do only one memory allocation here. We'll do a little
1724509Smrj 	 * pointer manipulation later. If the bind ends up taking more than
1725509Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
1726509Smrj 	 * bind operation. Not great, but much better than before and the
1727509Smrj 	 * best we can do with the current bind interfaces.
1728509Smrj 	 */
1729509Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1730509Smrj 	if (hp == NULL) {
1731509Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
1732509Smrj 			ddi_set_callback(waitfp, arg,
1733509Smrj 			    &rootnex_state->r_dvma_call_list_id);
1734509Smrj 		}
1735509Smrj 		return (DDI_DMA_NORESOURCES);
1736509Smrj 	}
1737509Smrj 
1738509Smrj 	/* Do our pointer manipulation now, align the structures */
1739509Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
1740509Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1741509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1742509Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1743509Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1744509Smrj 
1745509Smrj 	/* setup the handle */
1746509Smrj 	rootnex_clean_dmahdl(hp);
174712027SStephen.Hanson@Sun.COM 	hp->dmai_error.err_fep = NULL;
174812027SStephen.Hanson@Sun.COM 	hp->dmai_error.err_cf = NULL;
1749509Smrj 	dma->dp_dip = rdip;
1750509Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1751509Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1752509Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1753509Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1754509Smrj 	hp->dmai_rdip = rdip;
1755509Smrj 	hp->dmai_attr = *attr;
1756509Smrj 
1757509Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
1758509Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1759509Smrj 
1760509Smrj 	/*
1761509Smrj 	 * Figure out our maximum segment size. If the segment size is greater
1762509Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1763509Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1764509Smrj 	 * dma_attr_count_max are size-1 type values.
1765509Smrj 	 *
1766509Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
1767509Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
1768509Smrj 	 * single cookie).
1769509Smrj 	 */
1770509Smrj 
1771509Smrj 	/* handle the rollover cases */
1772509Smrj 	seg = attr->dma_attr_seg + 1;
1773509Smrj 	if (seg < attr->dma_attr_seg) {
1774509Smrj 		seg = attr->dma_attr_seg;
1775509Smrj 	}
1776509Smrj 	count_max = attr->dma_attr_count_max + 1;
1777509Smrj 	if (count_max < attr->dma_attr_count_max) {
1778509Smrj 		count_max = attr->dma_attr_count_max;
1779509Smrj 	}
1780509Smrj 
1781509Smrj 	/*
1782509Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
1783509Smrj 	 * use a simple mask.
1784509Smrj 	 */
1785509Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1786509Smrj 		dma->dp_granularity_power_2 = B_FALSE;
1787509Smrj 	} else {
1788509Smrj 		dma->dp_granularity_power_2 = B_TRUE;
1789509Smrj 	}
1790509Smrj 
1791509Smrj 	/*
1792509Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
1793509Smrj 	 * break up a window because we're greater than maxxfer, we might as
1794509Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1795509Smrj 	 * worry about triming the window later on for this case.
1796509Smrj 	 */
1797509Smrj 	if (attr->dma_attr_granular > 1) {
1798509Smrj 		if (dma->dp_granularity_power_2) {
1799509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1800509Smrj 			    (attr->dma_attr_maxxfer &
1801509Smrj 			    (attr->dma_attr_granular - 1));
1802509Smrj 		} else {
1803509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1804509Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1805509Smrj 		}
1806509Smrj 	} else {
1807509Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1808509Smrj 	}
1809509Smrj 
1810509Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1811509Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1812509Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1813509Smrj 		maxsegmentsize = 0xFFFFFFFF;
1814509Smrj 	} else {
1815509Smrj 		maxsegmentsize = maxsegmentsize_ll;
1816509Smrj 	}
1817509Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1818509Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
181911793SMark.Johnson@Sun.COM 	dma->dp_sglinfo.si_flags = attr->dma_attr_flags;
1820509Smrj 
1821509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1822509Smrj 	if (rootnex_alloc_check_parms) {
1823509Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1824509Smrj 		if (e != DDI_SUCCESS) {
1825*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1826509Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
1827509Smrj 			    (ddi_dma_handle_t)hp);
1828509Smrj 			return (e);
1829509Smrj 		}
1830509Smrj 	}
1831509Smrj 
1832509Smrj 	*handlep = (ddi_dma_handle_t)hp;
1833509Smrj 
183410902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
183510902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t,
1836509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1837509Smrj 
1838509Smrj 	return (DDI_SUCCESS);
1839509Smrj }
1840509Smrj 
1841509Smrj 
1842509Smrj /*
18437613SVikram.Hegde@Sun.COM  * rootnex_dma_allochdl()
18447613SVikram.Hegde@Sun.COM  *    called from ddi_dma_alloc_handle().
1845509Smrj  */
18467613SVikram.Hegde@Sun.COM static int
18477613SVikram.Hegde@Sun.COM rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
18487613SVikram.Hegde@Sun.COM     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
18497613SVikram.Hegde@Sun.COM {
185012027SStephen.Hanson@Sun.COM 	int retval;
185111600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
18527613SVikram.Hegde@Sun.COM 	uint_t error = ENOTSUP;
18537613SVikram.Hegde@Sun.COM 
18547613SVikram.Hegde@Sun.COM 	retval = iommulib_nex_open(rdip, &error);
18557613SVikram.Hegde@Sun.COM 
18567613SVikram.Hegde@Sun.COM 	if (retval != DDI_SUCCESS && error == ENOTSUP) {
18577613SVikram.Hegde@Sun.COM 		/* No IOMMU */
18587613SVikram.Hegde@Sun.COM 		return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
18597613SVikram.Hegde@Sun.COM 		    handlep));
18607613SVikram.Hegde@Sun.COM 	} else if (retval != DDI_SUCCESS) {
18617613SVikram.Hegde@Sun.COM 		return (DDI_FAILURE);
18627613SVikram.Hegde@Sun.COM 	}
18637613SVikram.Hegde@Sun.COM 
186410216SVikram.Hegde@Sun.COM 	ASSERT(IOMMU_USED(rdip));
18657613SVikram.Hegde@Sun.COM 
18667613SVikram.Hegde@Sun.COM 	/* has an IOMMU */
186712027SStephen.Hanson@Sun.COM 	retval = iommulib_nexdma_allochdl(dip, rdip, attr,
186812027SStephen.Hanson@Sun.COM 	    waitfp, arg, handlep);
18697613SVikram.Hegde@Sun.COM #else
187012027SStephen.Hanson@Sun.COM 	retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
187112027SStephen.Hanson@Sun.COM 	    handlep);
18727613SVikram.Hegde@Sun.COM #endif
187312027SStephen.Hanson@Sun.COM 	if (retval == DDI_SUCCESS)
187412027SStephen.Hanson@Sun.COM 		ndi_fmc_insert(rdip, DMA_HANDLE, *handlep, NULL);
187512027SStephen.Hanson@Sun.COM 	return (retval);
18767613SVikram.Hegde@Sun.COM }
18777613SVikram.Hegde@Sun.COM 
1878509Smrj /*ARGSUSED*/
1879509Smrj static int
18807613SVikram.Hegde@Sun.COM rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
18817613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
1882509Smrj {
1883509Smrj 	ddi_dma_impl_t *hp;
1884509Smrj 	rootnex_dma_t *dma;
1885509Smrj 
1886509Smrj 
1887509Smrj 	hp = (ddi_dma_impl_t *)handle;
1888509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1889509Smrj 
1890509Smrj 	/* unbind should have been called first */
1891509Smrj 	ASSERT(!dma->dp_inuse);
1892509Smrj 
1893509Smrj 	mutex_destroy(&dma->dp_mutex);
1894509Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1895509Smrj 
189610902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
189710902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t,
1898509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1899509Smrj 
1900509Smrj 	if (rootnex_state->r_dvma_call_list_id)
1901509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1902509Smrj 
1903509Smrj 	return (DDI_SUCCESS);
1904509Smrj }
1905509Smrj 
1906509Smrj /*
19077613SVikram.Hegde@Sun.COM  * rootnex_dma_freehdl()
19087613SVikram.Hegde@Sun.COM  *    called from ddi_dma_free_handle().
1909509Smrj  */
19107613SVikram.Hegde@Sun.COM static int
19117613SVikram.Hegde@Sun.COM rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
19127613SVikram.Hegde@Sun.COM {
191312027SStephen.Hanson@Sun.COM 	ndi_fmc_remove(rdip, DMA_HANDLE, handle);
191411600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
191510216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
19167613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_freehdl(dip, rdip, handle));
19177613SVikram.Hegde@Sun.COM 	}
19187613SVikram.Hegde@Sun.COM #endif
19197613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_freehdl(dip, rdip, handle));
19207613SVikram.Hegde@Sun.COM }
19217613SVikram.Hegde@Sun.COM 
1922509Smrj /*ARGSUSED*/
1923509Smrj static int
19247613SVikram.Hegde@Sun.COM rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
19257613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
19267613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
19270Sstevel@tonic-gate {
1928509Smrj 	rootnex_sglinfo_t *sinfo;
1929509Smrj 	ddi_dma_attr_t *attr;
1930509Smrj 	ddi_dma_impl_t *hp;
1931509Smrj 	rootnex_dma_t *dma;
1932509Smrj 	int kmflag;
1933509Smrj 	int e;
1934509Smrj 
1935509Smrj 	hp = (ddi_dma_impl_t *)handle;
1936509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1937509Smrj 	sinfo = &dma->dp_sglinfo;
1938509Smrj 	attr = &hp->dmai_attr;
1939509Smrj 
19408215SVikram.Hegde@Sun.COM 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
19418215SVikram.Hegde@Sun.COM 		dma->dp_sleep_flags = KM_SLEEP;
19428215SVikram.Hegde@Sun.COM 	} else {
19438215SVikram.Hegde@Sun.COM 		dma->dp_sleep_flags = KM_NOSLEEP;
19448215SVikram.Hegde@Sun.COM 	}
19458215SVikram.Hegde@Sun.COM 
1946509Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1947509Smrj 
1948509Smrj 	/*
1949509Smrj 	 * This is useful for debugging a driver. Not as useful in a production
1950509Smrj 	 * system. The only time this will fail is if you have a driver bug.
1951509Smrj 	 */
1952509Smrj 	if (rootnex_bind_check_inuse) {
1953509Smrj 		/*
1954509Smrj 		 * No one else should ever have this lock unless someone else
1955509Smrj 		 * is trying to use this handle. So contention on the lock
1956509Smrj 		 * is the same as inuse being set.
1957509Smrj 		 */
1958509Smrj 		e = mutex_tryenter(&dma->dp_mutex);
1959509Smrj 		if (e == 0) {
1960*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1961509Smrj 			return (DDI_DMA_INUSE);
1962509Smrj 		}
1963509Smrj 		if (dma->dp_inuse) {
1964509Smrj 			mutex_exit(&dma->dp_mutex);
1965*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1966509Smrj 			return (DDI_DMA_INUSE);
1967509Smrj 		}
1968509Smrj 		dma->dp_inuse = B_TRUE;
1969509Smrj 		mutex_exit(&dma->dp_mutex);
1970509Smrj 	}
1971509Smrj 
1972509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1973509Smrj 	if (rootnex_bind_check_parms) {
1974509Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
1975509Smrj 		if (e != DDI_SUCCESS) {
1976*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1977509Smrj 			rootnex_clean_dmahdl(hp);
1978509Smrj 			return (e);
1979509Smrj 		}
1980509Smrj 	}
1981509Smrj 
1982509Smrj 	/* save away the original bind info */
1983509Smrj 	dma->dp_dma = dmareq->dmar_object;
1984509Smrj 
198511600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
198611600SVikram.Hegde@Sun.COM 	e = immu_map_sgl(hp, dmareq, rootnex_prealloc_cookies, rdip);
198711600SVikram.Hegde@Sun.COM 	switch (e) {
198811600SVikram.Hegde@Sun.COM 	case DDI_DMA_MAPPED:
198911600SVikram.Hegde@Sun.COM 		goto out;
199011600SVikram.Hegde@Sun.COM 	case DDI_DMA_USE_PHYSICAL:
199111600SVikram.Hegde@Sun.COM 		break;
199211600SVikram.Hegde@Sun.COM 	case DDI_DMA_PARTIAL:
199311600SVikram.Hegde@Sun.COM 		ddi_err(DER_PANIC, rdip, "Partial DVMA map");
199411600SVikram.Hegde@Sun.COM 		e = DDI_DMA_NORESOURCES;
199511600SVikram.Hegde@Sun.COM 		/*FALLTHROUGH*/
199611600SVikram.Hegde@Sun.COM 	default:
199711600SVikram.Hegde@Sun.COM 		ddi_err(DER_MODE, rdip, "DVMA map failed");
1998*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
199911600SVikram.Hegde@Sun.COM 		rootnex_clean_dmahdl(hp);
200011600SVikram.Hegde@Sun.COM 		return (e);
20017589SVikram.Hegde@Sun.COM 	}
20027613SVikram.Hegde@Sun.COM #endif
20037589SVikram.Hegde@Sun.COM 
2004509Smrj 	/*
2005509Smrj 	 * Figure out a rough estimate of what maximum number of pages this
2006509Smrj 	 * buffer could use (a high estimate of course).
2007509Smrj 	 */
2008509Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
2009509Smrj 
2010509Smrj 	/*
2011509Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
2012509Smrj 	 * fit (more important to be consistent, we don't want to create
2013509Smrj 	 * additional degenerate cases).
2014509Smrj 	 */
2015509Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
2016509Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
2017509Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
2018*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
2019509Smrj 		    uint_t, sinfo->si_max_pages);
2020509Smrj 
2021509Smrj 	/*
2022509Smrj 	 * For anything larger than that, we'll go ahead and allocate the
2023509Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
2024509Smrj 	 * seeing this path in the fast path for high performance devices very
2025509Smrj 	 * frequently.
2026509Smrj 	 *
2027509Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
2028509Smrj 	 * the bind interface would speed this case up.
2029509Smrj 	 */
2030509Smrj 	} else {
2031509Smrj 		/* convert the sleep flags */
2032509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
2033509Smrj 			kmflag =  KM_SLEEP;
2034509Smrj 		} else {
2035509Smrj 			kmflag =  KM_NOSLEEP;
2036509Smrj 		}
2037509Smrj 
2038509Smrj 		/*
2039509Smrj 		 * Save away how much memory we allocated. If we're doing a
2040509Smrj 		 * nosleep, the alloc could fail...
2041509Smrj 		 */
2042509Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
2043509Smrj 		    sizeof (ddi_dma_cookie_t);
2044509Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
2045509Smrj 		if (dma->dp_cookies == NULL) {
2046*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2047509Smrj 			rootnex_clean_dmahdl(hp);
2048509Smrj 			return (DDI_DMA_NORESOURCES);
2049509Smrj 		}
2050509Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
2051*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROBE2(rootnex__bind__alloc, dev_info_t *, rdip,
2052*12837Sfrank.van.der.linden@oracle.com 		    uint_t, sinfo->si_max_pages);
2053509Smrj 	}
2054509Smrj 	hp->dmai_cookie = dma->dp_cookies;
2055509Smrj 
2056509Smrj 	/*
2057509Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
205811600SVikram.Hegde@Sun.COM 	 * looking at the constraints in the dma structure. It will then put
205911600SVikram.Hegde@Sun.COM 	 * some additional state about the sgl in the dma struct (i.e. is
206011600SVikram.Hegde@Sun.COM 	 * the sgl clean, or do we need to do some munging; how many pages
206111600SVikram.Hegde@Sun.COM 	 * need to be copied, etc.)
2062509Smrj 	 */
2063509Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
2064509Smrj 	    &dma->dp_sglinfo);
20657589SVikram.Hegde@Sun.COM 
206611600SVikram.Hegde@Sun.COM out:
2067509Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
2068509Smrj 	/* if we don't need a copy buffer, we don't need to sync */
2069509Smrj 	if (sinfo->si_copybuf_req == 0) {
2070509Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
2071509Smrj 	}
2072509Smrj 
2073509Smrj 	/*
2074509Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
2075509Smrj 	 * hit the fast path. All the high performance devices should be trying
2076509Smrj 	 * to hit this path. To hit this path, a device should be able to reach
2077509Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
2078509Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
2079509Smrj 	 * handle [sgllen]).
2080509Smrj 	 */
2081509Smrj 	if ((sinfo->si_copybuf_req == 0) &&
2082509Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
2083509Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
2084509Smrj 		/*
20855591Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
20865591Sstephh 		 * handle cache.
20875591Sstephh 		 */
208812027SStephen.Hanson@Sun.COM 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
20895591Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
20905591Sstephh 
20915591Sstephh 		/*
2092509Smrj 		 * copy out the first cookie and ccountp, set the cookie
2093509Smrj 		 * pointer to the second cookie. The first cookie is passed
2094509Smrj 		 * back on the stack. Additional cookies are accessed via
2095509Smrj 		 * ddi_dma_nextcookie()
2096509Smrj 		 */
2097509Smrj 		*cookiep = dma->dp_cookies[0];
2098509Smrj 		*ccountp = sinfo->si_sgl_size;
2099509Smrj 		hp->dmai_cookie++;
2100509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2101*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2102*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROBE3(rootnex__bind__fast, dev_info_t *, rdip,
210311600SVikram.Hegde@Sun.COM 		    uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS],
210411600SVikram.Hegde@Sun.COM 		    uint_t, dma->dp_dma.dmao_size);
210511600SVikram.Hegde@Sun.COM 
210611600SVikram.Hegde@Sun.COM 
2107509Smrj 		return (DDI_DMA_MAPPED);
2108509Smrj 	}
2109509Smrj 
2110509Smrj 	/*
2111509Smrj 	 * go to the slow path, we may need to alloc more memory, create
2112509Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
2113509Smrj 	 */
2114509Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
2115509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2116509Smrj 		if (dma->dp_need_to_free_cookie) {
2117509Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2118509Smrj 		}
2119*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2120509Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2121509Smrj 		return (e);
2122509Smrj 	}
2123509Smrj 
21245591Sstephh 	/*
21255591Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
21265591Sstephh 	 * cache.
21275591Sstephh 	 */
212812027SStephen.Hanson@Sun.COM 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
21295591Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
21305591Sstephh 
2131509Smrj 	/* if the first window uses the copy buffer, sync it for the device */
2132509Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2133509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
21348215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2135509Smrj 		    DDI_DMA_SYNC_FORDEV);
2136509Smrj 	}
2137509Smrj 
2138509Smrj 	/*
2139509Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2140509Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2141509Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
2142509Smrj 	 * cookies we return is the number of cookies in the first window.
2143509Smrj 	 */
2144509Smrj 	if (e == DDI_DMA_MAPPED) {
2145509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2146509Smrj 		*ccountp = sinfo->si_sgl_size;
214711600SVikram.Hegde@Sun.COM 		hp->dmai_nwin = 1;
2148509Smrj 	} else {
2149509Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2150509Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2151509Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2152509Smrj 	}
2153509Smrj 	*cookiep = dma->dp_cookies[0];
2154509Smrj 	hp->dmai_cookie++;
2155509Smrj 
215610902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
215710902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2158509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2159509Smrj 	    dma->dp_dma.dmao_size);
2160509Smrj 	return (e);
2161509Smrj }
2162509Smrj 
2163509Smrj /*
21647613SVikram.Hegde@Sun.COM  * rootnex_dma_bindhdl()
21657613SVikram.Hegde@Sun.COM  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2166509Smrj  */
21677613SVikram.Hegde@Sun.COM static int
21687613SVikram.Hegde@Sun.COM rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
21697613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
21707613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
21717613SVikram.Hegde@Sun.COM {
217211600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
217310216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
21747613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
21757613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
21767613SVikram.Hegde@Sun.COM 	}
21777613SVikram.Hegde@Sun.COM #endif
21787613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
21797613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
21807613SVikram.Hegde@Sun.COM }
21817613SVikram.Hegde@Sun.COM 
218211600SVikram.Hegde@Sun.COM 
218311600SVikram.Hegde@Sun.COM 
2184509Smrj /*ARGSUSED*/
2185509Smrj static int
21867613SVikram.Hegde@Sun.COM rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2187509Smrj     ddi_dma_handle_t handle)
2188509Smrj {
2189509Smrj 	ddi_dma_impl_t *hp;
2190509Smrj 	rootnex_dma_t *dma;
2191509Smrj 	int e;
2192509Smrj 
2193509Smrj 	hp = (ddi_dma_impl_t *)handle;
2194509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2195509Smrj 
2196509Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
2197509Smrj 	if (rootnex_unbind_verify_buffer) {
2198509Smrj 		e = rootnex_verify_buffer(dma);
2199509Smrj 		if (e != DDI_SUCCESS) {
2200509Smrj 			ASSERT(0);
2201509Smrj 			return (DDI_FAILURE);
2202509Smrj 		}
2203509Smrj 	}
2204509Smrj 
2205509Smrj 	/* sync the current window before unbinding the buffer */
2206509Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2207509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
22088215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2209509Smrj 		    DDI_DMA_SYNC_FORCPU);
2210509Smrj 	}
2211509Smrj 
2212509Smrj 	/*
2213509Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
2214509Smrj 	 * buffer or windows, there won't be much to do :-)
2215509Smrj 	 */
2216509Smrj 	rootnex_teardown_copybuf(dma);
2217509Smrj 	rootnex_teardown_windows(dma);
2218509Smrj 
221911600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
2220509Smrj 	/*
222111600SVikram.Hegde@Sun.COM 	 * Clean up the page tables and free the dvma
22227589SVikram.Hegde@Sun.COM 	 */
222311600SVikram.Hegde@Sun.COM 	e = immu_unmap_sgl(hp, rdip);
222411600SVikram.Hegde@Sun.COM 	if (e != DDI_DMA_USE_PHYSICAL && e != DDI_SUCCESS) {
222511600SVikram.Hegde@Sun.COM 		return (e);
22267589SVikram.Hegde@Sun.COM 	}
22277613SVikram.Hegde@Sun.COM #endif
22287589SVikram.Hegde@Sun.COM 
22297589SVikram.Hegde@Sun.COM 	/*
2230509Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
2231509Smrj 	 * fit into our pre-allocate buffer), free that up now
2232509Smrj 	 */
2233509Smrj 	if (dma->dp_need_to_free_cookie) {
2234509Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2235509Smrj 	}
2236509Smrj 
2237509Smrj 	/*
2238509Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
2239509Smrj 	 * handle is reused).
2240509Smrj 	 */
2241509Smrj 	rootnex_clean_dmahdl(hp);
224212027SStephen.Hanson@Sun.COM 	hp->dmai_error.err_cf = NULL;
2243509Smrj 
2244509Smrj 	if (rootnex_state->r_dvma_call_list_id)
2245509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2246509Smrj 
224710902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
224810902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__unbind, uint64_t,
2249509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2250509Smrj 
2251509Smrj 	return (DDI_SUCCESS);
2252509Smrj }
2253509Smrj 
22547613SVikram.Hegde@Sun.COM /*
22557613SVikram.Hegde@Sun.COM  * rootnex_dma_unbindhdl()
22567613SVikram.Hegde@Sun.COM  *    called from ddi_dma_unbind_handle()
22577613SVikram.Hegde@Sun.COM  */
22587613SVikram.Hegde@Sun.COM /*ARGSUSED*/
22597613SVikram.Hegde@Sun.COM static int
22607613SVikram.Hegde@Sun.COM rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
22617613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
22627613SVikram.Hegde@Sun.COM {
226311600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
226410216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
22657613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_unbindhdl(dip, rdip, handle));
22667613SVikram.Hegde@Sun.COM 	}
22677613SVikram.Hegde@Sun.COM #endif
22687613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_unbindhdl(dip, rdip, handle));
22697613SVikram.Hegde@Sun.COM }
22707613SVikram.Hegde@Sun.COM 
227111600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
22728215SVikram.Hegde@Sun.COM 
22738215SVikram.Hegde@Sun.COM static int
22748215SVikram.Hegde@Sun.COM rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle)
22758215SVikram.Hegde@Sun.COM {
22768215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
22778215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
22788215SVikram.Hegde@Sun.COM 
22798215SVikram.Hegde@Sun.COM 	if (dma->dp_sleep_flags != KM_SLEEP &&
22808215SVikram.Hegde@Sun.COM 	    dma->dp_sleep_flags != KM_NOSLEEP)
22818215SVikram.Hegde@Sun.COM 		cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle");
22828215SVikram.Hegde@Sun.COM 	return (dma->dp_sleep_flags);
22838215SVikram.Hegde@Sun.COM }
22847613SVikram.Hegde@Sun.COM /*ARGSUSED*/
22857613SVikram.Hegde@Sun.COM static void
22867613SVikram.Hegde@Sun.COM rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
22877613SVikram.Hegde@Sun.COM {
22887613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
22897613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
22908215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
22918215SVikram.Hegde@Sun.COM 
22928215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
22938215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
22948215SVikram.Hegde@Sun.COM 		hp->dmai_cookie = window->wd_first_cookie;
22958215SVikram.Hegde@Sun.COM 	} else {
22968215SVikram.Hegde@Sun.COM 		hp->dmai_cookie = dma->dp_cookies;
22978215SVikram.Hegde@Sun.COM 	}
22987613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
22997613SVikram.Hegde@Sun.COM }
23007613SVikram.Hegde@Sun.COM 
23017613SVikram.Hegde@Sun.COM /*ARGSUSED*/
23027613SVikram.Hegde@Sun.COM static int
23037613SVikram.Hegde@Sun.COM rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
23048215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
23058215SVikram.Hegde@Sun.COM {
23068215SVikram.Hegde@Sun.COM 	int i;
23078215SVikram.Hegde@Sun.COM 	int km_flags;
23088215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
23098215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
23108215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
23118215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cp;
23128215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cookie;
23138215SVikram.Hegde@Sun.COM 
23148215SVikram.Hegde@Sun.COM 	ASSERT(*cookiepp == NULL);
23158215SVikram.Hegde@Sun.COM 	ASSERT(*ccountp == 0);
23168215SVikram.Hegde@Sun.COM 
23178215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
23188215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
23198215SVikram.Hegde@Sun.COM 		cp = window->wd_first_cookie;
23208215SVikram.Hegde@Sun.COM 		*ccountp = window->wd_cookie_cnt;
23218215SVikram.Hegde@Sun.COM 	} else {
23228215SVikram.Hegde@Sun.COM 		cp = dma->dp_cookies;
23238215SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_sglinfo.si_sgl_size;
23248215SVikram.Hegde@Sun.COM 	}
23258215SVikram.Hegde@Sun.COM 
23268215SVikram.Hegde@Sun.COM 	km_flags = rootnex_coredma_get_sleep_flags(handle);
23278215SVikram.Hegde@Sun.COM 	cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags);
23288215SVikram.Hegde@Sun.COM 	if (cookie == NULL) {
23298215SVikram.Hegde@Sun.COM 		return (DDI_DMA_NORESOURCES);
23308215SVikram.Hegde@Sun.COM 	}
23318215SVikram.Hegde@Sun.COM 
23328215SVikram.Hegde@Sun.COM 	for (i = 0; i < *ccountp; i++) {
23338215SVikram.Hegde@Sun.COM 		cookie[i].dmac_notused = cp[i].dmac_notused;
23348215SVikram.Hegde@Sun.COM 		cookie[i].dmac_type = cp[i].dmac_type;
23358215SVikram.Hegde@Sun.COM 		cookie[i].dmac_address = cp[i].dmac_address;
23368215SVikram.Hegde@Sun.COM 		cookie[i].dmac_size = cp[i].dmac_size;
23378215SVikram.Hegde@Sun.COM 	}
23388215SVikram.Hegde@Sun.COM 
23398215SVikram.Hegde@Sun.COM 	*cookiepp = cookie;
23408215SVikram.Hegde@Sun.COM 
23418215SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
23428215SVikram.Hegde@Sun.COM }
23438215SVikram.Hegde@Sun.COM 
23448215SVikram.Hegde@Sun.COM /*ARGSUSED*/
23458215SVikram.Hegde@Sun.COM static int
23468215SVikram.Hegde@Sun.COM rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
23478215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t ccount)
23487613SVikram.Hegde@Sun.COM {
23497613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
23507613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
23518215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
23528215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cur_cookiep;
23538215SVikram.Hegde@Sun.COM 
23548215SVikram.Hegde@Sun.COM 	ASSERT(cookiep);
23558215SVikram.Hegde@Sun.COM 	ASSERT(ccount != 0);
23568215SVikram.Hegde@Sun.COM 	ASSERT(dma->dp_need_to_switch_cookies == B_FALSE);
23578215SVikram.Hegde@Sun.COM 
23588215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
23598215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
23608215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = window->wd_first_cookie;
23618215SVikram.Hegde@Sun.COM 		window->wd_first_cookie = cookiep;
23628215SVikram.Hegde@Sun.COM 		ASSERT(ccount == window->wd_cookie_cnt);
23638215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
23648215SVikram.Hegde@Sun.COM 		    + window->wd_first_cookie;
23657613SVikram.Hegde@Sun.COM 	} else {
23668215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = dma->dp_cookies;
23678215SVikram.Hegde@Sun.COM 		dma->dp_cookies = cookiep;
23688215SVikram.Hegde@Sun.COM 		ASSERT(ccount == dma->dp_sglinfo.si_sgl_size);
23698215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
23708215SVikram.Hegde@Sun.COM 		    + dma->dp_cookies;
23717613SVikram.Hegde@Sun.COM 	}
23728215SVikram.Hegde@Sun.COM 
23738215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_TRUE;
23748215SVikram.Hegde@Sun.COM 	hp->dmai_cookie = cur_cookiep;
23757613SVikram.Hegde@Sun.COM 
23767613SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
23777613SVikram.Hegde@Sun.COM }
23788215SVikram.Hegde@Sun.COM 
23798215SVikram.Hegde@Sun.COM /*ARGSUSED*/
23808215SVikram.Hegde@Sun.COM static int
23818215SVikram.Hegde@Sun.COM rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
23828215SVikram.Hegde@Sun.COM {
23838215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
23848215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
23858215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
23868215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cur_cookiep;
23878215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cookie_array;
23888215SVikram.Hegde@Sun.COM 	uint_t ccount;
23898215SVikram.Hegde@Sun.COM 
23908215SVikram.Hegde@Sun.COM 	/* check if cookies have not been switched */
23918215SVikram.Hegde@Sun.COM 	if (dma->dp_need_to_switch_cookies == B_FALSE)
23928215SVikram.Hegde@Sun.COM 		return (DDI_SUCCESS);
23938215SVikram.Hegde@Sun.COM 
23948215SVikram.Hegde@Sun.COM 	ASSERT(dma->dp_saved_cookies);
23958215SVikram.Hegde@Sun.COM 
23968215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
23978215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
23988215SVikram.Hegde@Sun.COM 		cookie_array = window->wd_first_cookie;
23998215SVikram.Hegde@Sun.COM 		window->wd_first_cookie = dma->dp_saved_cookies;
24008215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = NULL;
24018215SVikram.Hegde@Sun.COM 		ccount = window->wd_cookie_cnt;
24028215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - cookie_array)
24038215SVikram.Hegde@Sun.COM 		    + window->wd_first_cookie;
24048215SVikram.Hegde@Sun.COM 	} else {
24058215SVikram.Hegde@Sun.COM 		cookie_array = dma->dp_cookies;
24068215SVikram.Hegde@Sun.COM 		dma->dp_cookies = dma->dp_saved_cookies;
24078215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = NULL;
24088215SVikram.Hegde@Sun.COM 		ccount = dma->dp_sglinfo.si_sgl_size;
24098215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - cookie_array)
24108215SVikram.Hegde@Sun.COM 		    + dma->dp_cookies;
24118215SVikram.Hegde@Sun.COM 	}
24128215SVikram.Hegde@Sun.COM 
24138215SVikram.Hegde@Sun.COM 	kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
24148215SVikram.Hegde@Sun.COM 
24158215SVikram.Hegde@Sun.COM 	hp->dmai_cookie = cur_cookiep;
24168215SVikram.Hegde@Sun.COM 
24178215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_FALSE;
24188215SVikram.Hegde@Sun.COM 
24198215SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
24208215SVikram.Hegde@Sun.COM }
24218215SVikram.Hegde@Sun.COM 
24227617SVikram.Hegde@Sun.COM #endif
2423509Smrj 
2424509Smrj /*
2425509Smrj  * rootnex_verify_buffer()
2426509Smrj  *   verify buffer wasn't free'd
2427509Smrj  */
2428509Smrj static int
2429509Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
2430509Smrj {
2431509Smrj 	page_t **pplist;
2432509Smrj 	caddr_t vaddr;
2433509Smrj 	uint_t pcnt;
2434509Smrj 	uint_t poff;
2435509Smrj 	page_t *pp;
24361865Sdilpreet 	char b;
2437509Smrj 	int i;
2438509Smrj 
2439509Smrj 	/* Figure out how many pages this buffer occupies */
2440509Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2441509Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2442509Smrj 	} else {
2443509Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2444509Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2445509Smrj 	}
2446509Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2447509Smrj 
2448509Smrj 	switch (dma->dp_dma.dmao_type) {
24490Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
2450509Smrj 		/*
2451509Smrj 		 * for a linked list of pp's walk through them to make sure
2452509Smrj 		 * they're locked and not free.
2453509Smrj 		 */
2454509Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2455509Smrj 		for (i = 0; i < pcnt; i++) {
2456509Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2457509Smrj 				return (DDI_FAILURE);
24580Sstevel@tonic-gate 			}
2459509Smrj 			pp = pp->p_next;
24600Sstevel@tonic-gate 		}
24610Sstevel@tonic-gate 		break;
2462509Smrj 
24630Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
24640Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
2465509Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2466509Smrj 		/*
2467509Smrj 		 * for an array of pp's walk through them to make sure they're
2468509Smrj 		 * not free. It's possible that they may not be locked.
2469509Smrj 		 */
2470509Smrj 		if (pplist) {
2471509Smrj 			for (i = 0; i < pcnt; i++) {
2472509Smrj 				if (PP_ISFREE(pplist[i])) {
2473509Smrj 					return (DDI_FAILURE);
2474509Smrj 				}
2475509Smrj 			}
2476509Smrj 
2477509Smrj 		/* For a virtual address, try to peek at each page */
2478509Smrj 		} else {
2479509Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
2480509Smrj 				for (i = 0; i < pcnt; i++) {
24811865Sdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
24821865Sdilpreet 					    DDI_FAILURE)
2483509Smrj 						return (DDI_FAILURE);
24841865Sdilpreet 					vaddr += MMU_PAGESIZE;
2485509Smrj 				}
2486509Smrj 			}
2487509Smrj 		}
2488509Smrj 		break;
2489509Smrj 
2490509Smrj 	default:
2491509Smrj 		ASSERT(0);
2492509Smrj 		break;
2493509Smrj 	}
2494509Smrj 
2495509Smrj 	return (DDI_SUCCESS);
2496509Smrj }
2497509Smrj 
2498509Smrj 
2499509Smrj /*
2500509Smrj  * rootnex_clean_dmahdl()
2501509Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
2502509Smrj  *    unbind handle. Set the handle state to the default settings.
2503509Smrj  */
2504509Smrj static void
2505509Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2506509Smrj {
2507509Smrj 	rootnex_dma_t *dma;
2508509Smrj 
2509509Smrj 
2510509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2511509Smrj 
2512509Smrj 	hp->dmai_nwin = 0;
2513509Smrj 	dma->dp_current_cookie = 0;
2514509Smrj 	dma->dp_copybuf_size = 0;
2515509Smrj 	dma->dp_window = NULL;
2516509Smrj 	dma->dp_cbaddr = NULL;
2517509Smrj 	dma->dp_inuse = B_FALSE;
2518509Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
25198215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_FALSE;
25208215SVikram.Hegde@Sun.COM 	dma->dp_saved_cookies = NULL;
25218215SVikram.Hegde@Sun.COM 	dma->dp_sleep_flags = KM_PANIC;
2522509Smrj 	dma->dp_need_to_free_window = B_FALSE;
2523509Smrj 	dma->dp_partial_required = B_FALSE;
2524509Smrj 	dma->dp_trim_required = B_FALSE;
2525509Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
2526509Smrj #if !defined(__amd64)
2527509Smrj 	dma->dp_cb_remaping = B_FALSE;
2528509Smrj 	dma->dp_kva = NULL;
2529509Smrj #endif
2530509Smrj 
2531509Smrj 	/* FMA related initialization */
2532509Smrj 	hp->dmai_fault = 0;
2533509Smrj 	hp->dmai_fault_check = NULL;
2534509Smrj 	hp->dmai_fault_notify = NULL;
2535509Smrj 	hp->dmai_error.err_ena = 0;
2536509Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
2537509Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2538509Smrj 	hp->dmai_error.err_ontrap = NULL;
2539509Smrj }
2540509Smrj 
2541509Smrj 
2542509Smrj /*
2543509Smrj  * rootnex_valid_alloc_parms()
2544509Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2545509Smrj  */
2546509Smrj static int
2547509Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2548509Smrj {
2549509Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2550509Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2551509Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2552509Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2553509Smrj 		return (DDI_DMA_BADATTR);
2554509Smrj 	}
2555509Smrj 
2556509Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2557509Smrj 		return (DDI_DMA_BADATTR);
2558509Smrj 	}
2559509Smrj 
2560509Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2561509Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2562509Smrj 	    attr->dma_attr_sgllen <= 0) {
2563509Smrj 		return (DDI_DMA_BADATTR);
2564509Smrj 	}
2565509Smrj 
2566509Smrj 	/* We should be able to DMA into every byte offset in a page */
2567509Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
2568509Smrj 		return (DDI_DMA_BADATTR);
2569509Smrj 	}
2570509Smrj 
257111793SMark.Johnson@Sun.COM 	/* if we're bouncing on seg, seg must be <= addr_hi */
257211793SMark.Johnson@Sun.COM 	if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) &&
257311793SMark.Johnson@Sun.COM 	    (attr->dma_attr_seg > attr->dma_attr_addr_hi)) {
257411793SMark.Johnson@Sun.COM 		return (DDI_DMA_BADATTR);
257511793SMark.Johnson@Sun.COM 	}
2576509Smrj 	return (DDI_SUCCESS);
2577509Smrj }
2578509Smrj 
2579509Smrj /*
2580509Smrj  * rootnex_valid_bind_parms()
2581509Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2582509Smrj  */
2583509Smrj /* ARGSUSED */
2584509Smrj static int
2585509Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2586509Smrj {
2587509Smrj #if !defined(__amd64)
2588509Smrj 	/*
2589509Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2590509Smrj 	 * we can track the offset for the obsoleted interfaces.
2591509Smrj 	 */
2592509Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2593509Smrj 		return (DDI_DMA_TOOBIG);
2594509Smrj 	}
2595509Smrj #endif
2596509Smrj 
2597509Smrj 	return (DDI_SUCCESS);
2598509Smrj }
2599509Smrj 
2600509Smrj 
2601509Smrj /*
260211793SMark.Johnson@Sun.COM  * rootnex_need_bounce_seg()
260311793SMark.Johnson@Sun.COM  *    check to see if the buffer lives on both side of the seg.
260411793SMark.Johnson@Sun.COM  */
260511793SMark.Johnson@Sun.COM static boolean_t
260611793SMark.Johnson@Sun.COM rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo)
260711793SMark.Johnson@Sun.COM {
260811793SMark.Johnson@Sun.COM 	ddi_dma_atyp_t buftype;
260911793SMark.Johnson@Sun.COM 	rootnex_addr_t raddr;
261011793SMark.Johnson@Sun.COM 	boolean_t lower_addr;
261111793SMark.Johnson@Sun.COM 	boolean_t upper_addr;
261211793SMark.Johnson@Sun.COM 	uint64_t offset;
261311793SMark.Johnson@Sun.COM 	page_t **pplist;
261411793SMark.Johnson@Sun.COM 	uint64_t paddr;
261511793SMark.Johnson@Sun.COM 	uint32_t psize;
261611793SMark.Johnson@Sun.COM 	uint32_t size;
261711793SMark.Johnson@Sun.COM 	caddr_t vaddr;
261811793SMark.Johnson@Sun.COM 	uint_t pcnt;
261911793SMark.Johnson@Sun.COM 	page_t *pp;
262011793SMark.Johnson@Sun.COM 
262111793SMark.Johnson@Sun.COM 
262211793SMark.Johnson@Sun.COM 	/* shortcuts */
262311793SMark.Johnson@Sun.COM 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
262411793SMark.Johnson@Sun.COM 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
262511793SMark.Johnson@Sun.COM 	buftype = dmar_object->dmao_type;
262611793SMark.Johnson@Sun.COM 	size = dmar_object->dmao_size;
262711793SMark.Johnson@Sun.COM 
262811793SMark.Johnson@Sun.COM 	lower_addr = B_FALSE;
262911793SMark.Johnson@Sun.COM 	upper_addr = B_FALSE;
263011793SMark.Johnson@Sun.COM 	pcnt = 0;
263111793SMark.Johnson@Sun.COM 
263211793SMark.Johnson@Sun.COM 	/*
263311793SMark.Johnson@Sun.COM 	 * Process the first page to handle the initial offset of the buffer.
263411793SMark.Johnson@Sun.COM 	 * We'll use the base address we get later when we loop through all
263511793SMark.Johnson@Sun.COM 	 * the pages.
263611793SMark.Johnson@Sun.COM 	 */
263711793SMark.Johnson@Sun.COM 	if (buftype == DMA_OTYP_PAGES) {
263811793SMark.Johnson@Sun.COM 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
263911793SMark.Johnson@Sun.COM 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
264011793SMark.Johnson@Sun.COM 		    MMU_PAGEOFFSET;
264111793SMark.Johnson@Sun.COM 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
264211793SMark.Johnson@Sun.COM 		psize = MIN(size, (MMU_PAGESIZE - offset));
264311793SMark.Johnson@Sun.COM 		pp = pp->p_next;
264411793SMark.Johnson@Sun.COM 		sglinfo->si_asp = NULL;
264511793SMark.Johnson@Sun.COM 	} else if (pplist != NULL) {
264611793SMark.Johnson@Sun.COM 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
264711793SMark.Johnson@Sun.COM 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
264811793SMark.Johnson@Sun.COM 		if (sglinfo->si_asp == NULL) {
264911793SMark.Johnson@Sun.COM 			sglinfo->si_asp = &kas;
265011793SMark.Johnson@Sun.COM 		}
265111793SMark.Johnson@Sun.COM 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
265211793SMark.Johnson@Sun.COM 		paddr += offset;
265311793SMark.Johnson@Sun.COM 		psize = MIN(size, (MMU_PAGESIZE - offset));
265411793SMark.Johnson@Sun.COM 		pcnt++;
265511793SMark.Johnson@Sun.COM 	} else {
265611793SMark.Johnson@Sun.COM 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
265711793SMark.Johnson@Sun.COM 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
265811793SMark.Johnson@Sun.COM 		if (sglinfo->si_asp == NULL) {
265911793SMark.Johnson@Sun.COM 			sglinfo->si_asp = &kas;
266011793SMark.Johnson@Sun.COM 		}
266111793SMark.Johnson@Sun.COM 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
266211793SMark.Johnson@Sun.COM 		paddr += offset;
266311793SMark.Johnson@Sun.COM 		psize = MIN(size, (MMU_PAGESIZE - offset));
266411793SMark.Johnson@Sun.COM 		vaddr += psize;
266511793SMark.Johnson@Sun.COM 	}
266611793SMark.Johnson@Sun.COM 
266711793SMark.Johnson@Sun.COM #ifdef __xpv
266811793SMark.Johnson@Sun.COM 	/*
266911793SMark.Johnson@Sun.COM 	 * If we're dom0, we're using a real device so we need to load
267011793SMark.Johnson@Sun.COM 	 * the cookies with MFNs instead of PFNs.
267111793SMark.Johnson@Sun.COM 	 */
267211793SMark.Johnson@Sun.COM 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
267311793SMark.Johnson@Sun.COM #else
267411793SMark.Johnson@Sun.COM 	raddr = paddr;
267511793SMark.Johnson@Sun.COM #endif
267611793SMark.Johnson@Sun.COM 
267711793SMark.Johnson@Sun.COM 	if ((raddr + psize) > sglinfo->si_segmask) {
267811793SMark.Johnson@Sun.COM 		upper_addr = B_TRUE;
267911793SMark.Johnson@Sun.COM 	} else {
268011793SMark.Johnson@Sun.COM 		lower_addr = B_TRUE;
268111793SMark.Johnson@Sun.COM 	}
268211793SMark.Johnson@Sun.COM 	size -= psize;
268311793SMark.Johnson@Sun.COM 
268411793SMark.Johnson@Sun.COM 	/*
268511793SMark.Johnson@Sun.COM 	 * Walk through the rest of the pages in the buffer. Track to see
268611793SMark.Johnson@Sun.COM 	 * if we have pages on both sides of the segment boundary.
268711793SMark.Johnson@Sun.COM 	 */
268811793SMark.Johnson@Sun.COM 	while (size > 0) {
268911793SMark.Johnson@Sun.COM 		/* partial or full page */
269011793SMark.Johnson@Sun.COM 		psize = MIN(size, MMU_PAGESIZE);
269111793SMark.Johnson@Sun.COM 
269211793SMark.Johnson@Sun.COM 		if (buftype == DMA_OTYP_PAGES) {
269311793SMark.Johnson@Sun.COM 			/* get the paddr from the page_t */
269411793SMark.Johnson@Sun.COM 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
269511793SMark.Johnson@Sun.COM 			paddr = pfn_to_pa(pp->p_pagenum);
269611793SMark.Johnson@Sun.COM 			pp = pp->p_next;
269711793SMark.Johnson@Sun.COM 		} else if (pplist != NULL) {
269811793SMark.Johnson@Sun.COM 			/* index into the array of page_t's to get the paddr */
269911793SMark.Johnson@Sun.COM 			ASSERT(!PP_ISFREE(pplist[pcnt]));
270011793SMark.Johnson@Sun.COM 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
270111793SMark.Johnson@Sun.COM 			pcnt++;
270211793SMark.Johnson@Sun.COM 		} else {
270311793SMark.Johnson@Sun.COM 			/* call into the VM to get the paddr */
270411793SMark.Johnson@Sun.COM 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
270511793SMark.Johnson@Sun.COM 			    vaddr));
270611793SMark.Johnson@Sun.COM 			vaddr += psize;
270711793SMark.Johnson@Sun.COM 		}
270811793SMark.Johnson@Sun.COM 
270911793SMark.Johnson@Sun.COM #ifdef __xpv
271011793SMark.Johnson@Sun.COM 		/*
271111793SMark.Johnson@Sun.COM 		 * If we're dom0, we're using a real device so we need to load
271211793SMark.Johnson@Sun.COM 		 * the cookies with MFNs instead of PFNs.
271311793SMark.Johnson@Sun.COM 		 */
271411793SMark.Johnson@Sun.COM 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
271511793SMark.Johnson@Sun.COM #else
271611793SMark.Johnson@Sun.COM 		raddr = paddr;
271711793SMark.Johnson@Sun.COM #endif
271811793SMark.Johnson@Sun.COM 
271911793SMark.Johnson@Sun.COM 		if ((raddr + psize) > sglinfo->si_segmask) {
272011793SMark.Johnson@Sun.COM 			upper_addr = B_TRUE;
272111793SMark.Johnson@Sun.COM 		} else {
272211793SMark.Johnson@Sun.COM 			lower_addr = B_TRUE;
272311793SMark.Johnson@Sun.COM 		}
272411793SMark.Johnson@Sun.COM 		/*
272511793SMark.Johnson@Sun.COM 		 * if the buffer lives both above and below the segment
272611793SMark.Johnson@Sun.COM 		 * boundary, or the current page is the page immediately
272711793SMark.Johnson@Sun.COM 		 * after the segment, we will use a copy/bounce buffer for
272811793SMark.Johnson@Sun.COM 		 * all pages > seg.
272911793SMark.Johnson@Sun.COM 		 */
273011793SMark.Johnson@Sun.COM 		if ((lower_addr && upper_addr) ||
273111793SMark.Johnson@Sun.COM 		    (raddr == (sglinfo->si_segmask + 1))) {
273211793SMark.Johnson@Sun.COM 			return (B_TRUE);
273311793SMark.Johnson@Sun.COM 		}
273411793SMark.Johnson@Sun.COM 
273511793SMark.Johnson@Sun.COM 		size -= psize;
273611793SMark.Johnson@Sun.COM 	}
273711793SMark.Johnson@Sun.COM 
273811793SMark.Johnson@Sun.COM 	return (B_FALSE);
273911793SMark.Johnson@Sun.COM }
274011793SMark.Johnson@Sun.COM 
274111793SMark.Johnson@Sun.COM 
274211793SMark.Johnson@Sun.COM /*
2743509Smrj  * rootnex_get_sgl()
2744509Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2745509Smrj  *    with a call to the vm layer when vm2.0 comes around...
2746509Smrj  */
2747509Smrj static void
2748509Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2749509Smrj     rootnex_sglinfo_t *sglinfo)
2750509Smrj {
2751509Smrj 	ddi_dma_atyp_t buftype;
27525084Sjohnlev 	rootnex_addr_t raddr;
2753509Smrj 	uint64_t last_page;
2754509Smrj 	uint64_t offset;
2755509Smrj 	uint64_t addrhi;
2756509Smrj 	uint64_t addrlo;
2757509Smrj 	uint64_t maxseg;
2758509Smrj 	page_t **pplist;
2759509Smrj 	uint64_t paddr;
2760509Smrj 	uint32_t psize;
2761509Smrj 	uint32_t size;
2762509Smrj 	caddr_t vaddr;
2763509Smrj 	uint_t pcnt;
2764509Smrj 	page_t *pp;
2765509Smrj 	uint_t cnt;
2766509Smrj 
2767509Smrj 
2768509Smrj 	/* shortcuts */
2769509Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2770509Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2771509Smrj 	maxseg = sglinfo->si_max_cookie_size;
2772509Smrj 	buftype = dmar_object->dmao_type;
2773509Smrj 	addrhi = sglinfo->si_max_addr;
2774509Smrj 	addrlo = sglinfo->si_min_addr;
2775509Smrj 	size = dmar_object->dmao_size;
2776509Smrj 
2777509Smrj 	pcnt = 0;
2778509Smrj 	cnt = 0;
2779509Smrj 
278011793SMark.Johnson@Sun.COM 
278111793SMark.Johnson@Sun.COM 	/*
278211793SMark.Johnson@Sun.COM 	 * check to see if we need to use the copy buffer for pages over
278311793SMark.Johnson@Sun.COM 	 * the segment attr.
278411793SMark.Johnson@Sun.COM 	 */
278511793SMark.Johnson@Sun.COM 	sglinfo->si_bounce_on_seg = B_FALSE;
278611793SMark.Johnson@Sun.COM 	if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) {
278711793SMark.Johnson@Sun.COM 		sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg(
278811793SMark.Johnson@Sun.COM 		    dmar_object, sglinfo);
278911793SMark.Johnson@Sun.COM 	}
279011793SMark.Johnson@Sun.COM 
2791509Smrj 	/*
2792509Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
2793509Smrj 	 * page_t, use this to get our physical address and buf offset.
2794509Smrj 	 */
2795509Smrj 	if (buftype == DMA_OTYP_PAGES) {
2796509Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2797509Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2798509Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2799509Smrj 		    MMU_PAGEOFFSET;
28005084Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2801509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2802509Smrj 		pp = pp->p_next;
2803509Smrj 		sglinfo->si_asp = NULL;
2804509Smrj 
2805509Smrj 	/*
2806509Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
2807509Smrj 	 * down an array of pages, use this to get our physical address and buf
2808509Smrj 	 * offset.
2809509Smrj 	 */
2810509Smrj 	} else if (pplist != NULL) {
2811509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2812509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2813509Smrj 
2814509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2815509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2816509Smrj 		if (sglinfo->si_asp == NULL) {
2817509Smrj 			sglinfo->si_asp = &kas;
2818509Smrj 		}
2819509Smrj 
2820509Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
28215084Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2822509Smrj 		paddr += offset;
2823509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2824509Smrj 		pcnt++;
2825509Smrj 
2826509Smrj 	/*
2827509Smrj 	 * All we have is a virtual address, we'll need to call into the VM
2828509Smrj 	 * to get the physical address.
2829509Smrj 	 */
2830509Smrj 	} else {
2831509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2832509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2833509Smrj 
2834509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2835509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2836509Smrj 		if (sglinfo->si_asp == NULL) {
2837509Smrj 			sglinfo->si_asp = &kas;
2838509Smrj 		}
2839509Smrj 
28405084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2841509Smrj 		paddr += offset;
2842509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2843509Smrj 		vaddr += psize;
2844509Smrj 	}
2845509Smrj 
28465084Sjohnlev #ifdef __xpv
28475084Sjohnlev 	/*
28485084Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
28495084Sjohnlev 	 * the cookies with MFNs instead of PFNs.
28505084Sjohnlev 	 */
28515084Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
28525084Sjohnlev #else
28535084Sjohnlev 	raddr = paddr;
28545084Sjohnlev #endif
28555084Sjohnlev 
2856509Smrj 	/*
2857509Smrj 	 * Setup the first cookie with the physical address of the page and the
2858509Smrj 	 * size of the page (which takes into account the initial offset into
2859509Smrj 	 * the page.
2860509Smrj 	 */
28615084Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
2862509Smrj 	sgl[cnt].dmac_size = psize;
2863509Smrj 	sgl[cnt].dmac_type = 0;
2864509Smrj 
2865509Smrj 	/*
2866509Smrj 	 * Save away the buffer offset into the page. We'll need this later in
2867509Smrj 	 * the copy buffer code to help figure out the page index within the
2868509Smrj 	 * buffer and the offset into the current page.
2869509Smrj 	 */
2870509Smrj 	sglinfo->si_buf_offset = offset;
2871509Smrj 
2872509Smrj 	/*
287311793SMark.Johnson@Sun.COM 	 * If we are using the copy buffer for anything over the segment
287411793SMark.Johnson@Sun.COM 	 * boundary, and this page is over the segment boundary.
287511793SMark.Johnson@Sun.COM 	 *   OR
287611793SMark.Johnson@Sun.COM 	 * if the DMA engine can't reach the physical address.
2877509Smrj 	 */
287811793SMark.Johnson@Sun.COM 	if (((sglinfo->si_bounce_on_seg) &&
287911793SMark.Johnson@Sun.COM 	    ((raddr + psize) > sglinfo->si_segmask)) ||
288011793SMark.Johnson@Sun.COM 	    ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
288111793SMark.Johnson@Sun.COM 		/*
288211793SMark.Johnson@Sun.COM 		 * Increase how much copy buffer we use. We always increase by
288311793SMark.Johnson@Sun.COM 		 * pagesize so we don't have to worry about converting offsets.
288411793SMark.Johnson@Sun.COM 		 * Set a flag in the cookies dmac_type to indicate that it uses
288511793SMark.Johnson@Sun.COM 		 * the copy buffer. If this isn't the last cookie, go to the
288611793SMark.Johnson@Sun.COM 		 * next cookie (since we separate each page which uses the copy
288711793SMark.Johnson@Sun.COM 		 * buffer in case the copy buffer is not physically contiguous.
288811793SMark.Johnson@Sun.COM 		 */
2889509Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2890509Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2891509Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
2892509Smrj 			cnt++;
2893509Smrj 			sgl[cnt].dmac_laddress = 0;
2894509Smrj 			sgl[cnt].dmac_size = 0;
2895509Smrj 			sgl[cnt].dmac_type = 0;
2896509Smrj 		}
2897509Smrj 	}
2898509Smrj 
2899509Smrj 	/*
2900509Smrj 	 * save this page's physical address so we can figure out if the next
2901509Smrj 	 * page is physically contiguous. Keep decrementing size until we are
2902509Smrj 	 * done with the buffer.
2903509Smrj 	 */
29045084Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
2905509Smrj 	size -= psize;
2906509Smrj 
2907509Smrj 	while (size > 0) {
2908509Smrj 		/* Get the size for this page (i.e. partial or full page) */
2909509Smrj 		psize = MIN(size, MMU_PAGESIZE);
2910509Smrj 
2911509Smrj 		if (buftype == DMA_OTYP_PAGES) {
2912509Smrj 			/* get the paddr from the page_t */
2913509Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
29145084Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
2915509Smrj 			pp = pp->p_next;
2916509Smrj 		} else if (pplist != NULL) {
2917509Smrj 			/* index into the array of page_t's to get the paddr */
2918509Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
29195084Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2920509Smrj 			pcnt++;
29210Sstevel@tonic-gate 		} else {
2922509Smrj 			/* call into the VM to get the paddr */
29235084Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2924509Smrj 			    vaddr));
2925509Smrj 			vaddr += psize;
2926509Smrj 		}
2927509Smrj 
29285084Sjohnlev #ifdef __xpv
29295084Sjohnlev 		/*
29305084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
29315084Sjohnlev 		 * the cookies with MFNs instead of PFNs.
29325084Sjohnlev 		 */
29335084Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
29345084Sjohnlev #else
29355084Sjohnlev 		raddr = paddr;
29365084Sjohnlev #endif
293711793SMark.Johnson@Sun.COM 
293811793SMark.Johnson@Sun.COM 		/*
293911793SMark.Johnson@Sun.COM 		 * If we are using the copy buffer for anything over the
294011793SMark.Johnson@Sun.COM 		 * segment boundary, and this page is over the segment
294111793SMark.Johnson@Sun.COM 		 * boundary.
294211793SMark.Johnson@Sun.COM 		 *   OR
294311793SMark.Johnson@Sun.COM 		 * if the DMA engine can't reach the physical address.
294411793SMark.Johnson@Sun.COM 		 */
294511793SMark.Johnson@Sun.COM 		if (((sglinfo->si_bounce_on_seg) &&
294611793SMark.Johnson@Sun.COM 		    ((raddr + psize) > sglinfo->si_segmask)) ||
294711793SMark.Johnson@Sun.COM 		    ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
294811793SMark.Johnson@Sun.COM 
2949509Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2950509Smrj 
29510Sstevel@tonic-gate 			/*
2952509Smrj 			 * if there is something in the current cookie, go to
2953509Smrj 			 * the next one. We only want one page in a cookie which
2954509Smrj 			 * uses the copybuf since the copybuf doesn't have to
2955509Smrj 			 * be physically contiguous.
2956509Smrj 			 */
2957509Smrj 			if (sgl[cnt].dmac_size != 0) {
2958509Smrj 				cnt++;
2959509Smrj 			}
29605084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2961509Smrj 			sgl[cnt].dmac_size = psize;
2962509Smrj #if defined(__amd64)
2963509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2964509Smrj #else
2965509Smrj 			/*
2966509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2967509Smrj 			 * obsoleted interfaces.
2968509Smrj 			 */
2969509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2970509Smrj 			    (dmar_object->dmao_size - size);
2971509Smrj #endif
2972509Smrj 			/* if this isn't the last cookie, go to the next one */
2973509Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
2974509Smrj 				cnt++;
2975509Smrj 				sgl[cnt].dmac_laddress = 0;
2976509Smrj 				sgl[cnt].dmac_size = 0;
2977509Smrj 				sgl[cnt].dmac_type = 0;
2978509Smrj 			}
2979509Smrj 
2980509Smrj 		/*
2981509Smrj 		 * this page didn't need the copy buffer, if it's not physically
2982509Smrj 		 * contiguous, or it would put us over a segment boundary, or it
2983509Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
2984509Smrj 		 * have anything in it.
2985509Smrj 		 */
29865084Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
29875084Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
2988509Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2989509Smrj 		    (sgl[cnt].dmac_size == 0)) {
2990509Smrj 			/*
2991509Smrj 			 * if we're not already in a new cookie, go to the next
2992509Smrj 			 * cookie.
2993509Smrj 			 */
2994509Smrj 			if (sgl[cnt].dmac_size != 0) {
2995509Smrj 				cnt++;
2996509Smrj 			}
2997509Smrj 
2998509Smrj 			/* save the cookie information */
29995084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
3000509Smrj 			sgl[cnt].dmac_size = psize;
3001509Smrj #if defined(__amd64)
3002509Smrj 			sgl[cnt].dmac_type = 0;
3003509Smrj #else
3004509Smrj 			/*
3005509Smrj 			 * save the buf offset for 32-bit kernel. used in the
3006509Smrj 			 * obsoleted interfaces.
3007509Smrj 			 */
3008509Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
3009509Smrj #endif
3010509Smrj 
3011509Smrj 		/*
3012509Smrj 		 * this page didn't need the copy buffer, it is physically
3013509Smrj 		 * contiguous with the last page, and it's <= the max cookie
3014509Smrj 		 * size.
3015509Smrj 		 */
3016509Smrj 		} else {
3017509Smrj 			sgl[cnt].dmac_size += psize;
3018509Smrj 
3019509Smrj 			/*
3020509Smrj 			 * if this exactly ==  the maximum cookie size, and
3021509Smrj 			 * it isn't the last cookie, go to the next cookie.
3022509Smrj 			 */
3023509Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
3024509Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
3025509Smrj 				cnt++;
3026509Smrj 				sgl[cnt].dmac_laddress = 0;
3027509Smrj 				sgl[cnt].dmac_size = 0;
3028509Smrj 				sgl[cnt].dmac_type = 0;
3029509Smrj 			}
3030509Smrj 		}
3031509Smrj 
3032509Smrj 		/*
3033509Smrj 		 * save this page's physical address so we can figure out if the
3034509Smrj 		 * next page is physically contiguous. Keep decrementing size
3035509Smrj 		 * until we are done with the buffer.
3036509Smrj 		 */
30375084Sjohnlev 		last_page = raddr;
3038509Smrj 		size -= psize;
3039509Smrj 	}
3040509Smrj 
3041509Smrj 	/* we're done, save away how many cookies the sgl has */
3042509Smrj 	if (sgl[cnt].dmac_size == 0) {
3043509Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
3044509Smrj 		sglinfo->si_sgl_size = cnt;
3045509Smrj 	} else {
3046509Smrj 		sglinfo->si_sgl_size = cnt + 1;
3047509Smrj 	}
3048509Smrj }
3049509Smrj 
3050509Smrj /*
3051509Smrj  * rootnex_bind_slowpath()
3052509Smrj  *    Call in the bind path if the calling driver can't use the sgl without
3053509Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
3054509Smrj  *    with a partial bind.
3055509Smrj  */
3056509Smrj static int
3057509Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3058509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
3059509Smrj {
3060509Smrj 	rootnex_sglinfo_t *sinfo;
3061509Smrj 	rootnex_window_t *window;
3062509Smrj 	ddi_dma_cookie_t *cookie;
3063509Smrj 	size_t copybuf_used;
3064509Smrj 	size_t dmac_size;
3065509Smrj 	boolean_t partial;
3066509Smrj 	off_t cur_offset;
3067509Smrj 	page_t *cur_pp;
3068509Smrj 	major_t mnum;
3069509Smrj 	int e;
3070509Smrj 	int i;
3071509Smrj 
3072509Smrj 
3073509Smrj 	sinfo = &dma->dp_sglinfo;
3074509Smrj 	copybuf_used = 0;
3075509Smrj 	partial = B_FALSE;
3076509Smrj 
3077509Smrj 	/*
3078509Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
3079509Smrj 	 * Needs to be first since it sets the copy buffer size.
3080509Smrj 	 */
3081509Smrj 	if (sinfo->si_copybuf_req != 0) {
3082509Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
3083509Smrj 		if (e != DDI_SUCCESS) {
3084509Smrj 			return (e);
3085509Smrj 		}
3086509Smrj 	} else {
3087509Smrj 		dma->dp_copybuf_size = 0;
3088509Smrj 	}
3089509Smrj 
3090509Smrj 	/*
3091509Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
3092509Smrj 	 * if we need to trim the buffers when we munge the sgl.
3093509Smrj 	 */
3094509Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
3095509Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
3096509Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
3097509Smrj 		dma->dp_partial_required = B_TRUE;
3098509Smrj 		if (attr->dma_attr_granular != 1) {
3099509Smrj 			dma->dp_trim_required = B_TRUE;
3100509Smrj 		}
3101509Smrj 	} else {
3102509Smrj 		dma->dp_partial_required = B_FALSE;
3103509Smrj 		dma->dp_trim_required = B_FALSE;
3104509Smrj 	}
3105509Smrj 
3106509Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
3107509Smrj 	if (dma->dp_partial_required &&
3108509Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
3109509Smrj 
3110509Smrj 		mnum = ddi_driver_major(dma->dp_dip);
3111509Smrj 		/*
3112509Smrj 		 * patchable which allows us to print one warning per major
3113509Smrj 		 * number.
3114509Smrj 		 */
3115509Smrj 		if ((rootnex_bind_warn) &&
3116509Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
3117509Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
3118509Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
3119509Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
3120509Smrj 			    "There is a small risk of data corruption in "
3121509Smrj 			    "particular with large I/Os. The driver should be "
3122509Smrj 			    "replaced with a corrected version for proper "
3123509Smrj 			    "system operation. To disable this warning, add "
3124509Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
3125509Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
3126509Smrj 		}
3127509Smrj 		return (DDI_DMA_TOOBIG);
3128509Smrj 	}
3129509Smrj 
3130509Smrj 	/*
3131509Smrj 	 * we might need multiple windows, setup state to handle them. In this
3132509Smrj 	 * code path, we will have at least one window.
3133509Smrj 	 */
3134509Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
3135509Smrj 	if (e != DDI_SUCCESS) {
3136509Smrj 		rootnex_teardown_copybuf(dma);
3137509Smrj 		return (e);
3138509Smrj 	}
3139509Smrj 
3140509Smrj 	window = &dma->dp_window[0];
3141509Smrj 	cookie = &dma->dp_cookies[0];
3142509Smrj 	cur_offset = 0;
3143509Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
3144509Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3145509Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
3146509Smrj 	}
3147509Smrj 
3148509Smrj 	/* loop though all the cookies we got back from get_sgl() */
3149509Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
3150509Smrj 		/*
3151509Smrj 		 * If we're using the copy buffer, check this cookie and setup
3152509Smrj 		 * its associated copy buffer state. If this cookie uses the
3153509Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
3154509Smrj 		 */
3155509Smrj 		if (dma->dp_copybuf_size > 0) {
3156509Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
3157509Smrj 			    cur_offset, &copybuf_used, &cur_pp);
3158509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3159509Smrj 				window->wd_dosync = B_TRUE;
3160509Smrj 			}
3161509Smrj 		}
3162509Smrj 
3163509Smrj 		/*
3164509Smrj 		 * save away the cookie size, since it could be modified in
3165509Smrj 		 * the windowing code.
3166509Smrj 		 */
3167509Smrj 		dmac_size = cookie->dmac_size;
3168509Smrj 
3169509Smrj 		/* if we went over max copybuf size */
3170509Smrj 		if (dma->dp_copybuf_size &&
3171509Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
3172509Smrj 			partial = B_TRUE;
3173509Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
3174509Smrj 			    cookie, cur_offset, &copybuf_used);
3175509Smrj 			if (e != DDI_SUCCESS) {
3176509Smrj 				rootnex_teardown_copybuf(dma);
3177509Smrj 				rootnex_teardown_windows(dma);
3178509Smrj 				return (e);
3179509Smrj 			}
3180509Smrj 
3181509Smrj 			/*
3182509Smrj 			 * if the coookie uses the copy buffer, make sure the
3183509Smrj 			 * new window we just moved to is set to sync.
3184509Smrj 			 */
3185509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3186509Smrj 				window->wd_dosync = B_TRUE;
3187509Smrj 			}
3188*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROBE1(rootnex__copybuf__window, dev_info_t *,
3189509Smrj 			    dma->dp_dip);
3190509Smrj 
3191509Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
3192509Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
3193509Smrj 			partial = B_TRUE;
3194509Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
3195509Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
3196509Smrj 			    cookie, attr, cur_offset);
3197509Smrj 			if (e != DDI_SUCCESS) {
3198509Smrj 				rootnex_teardown_copybuf(dma);
3199509Smrj 				rootnex_teardown_windows(dma);
3200509Smrj 				return (e);
3201509Smrj 			}
3202509Smrj 
3203509Smrj 			/*
3204509Smrj 			 * if the coookie uses the copy buffer, make sure the
3205509Smrj 			 * new window we just moved to is set to sync.
3206509Smrj 			 */
3207509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3208509Smrj 				window->wd_dosync = B_TRUE;
3209509Smrj 			}
3210*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROBE1(rootnex__sgllen__window, dev_info_t *,
3211509Smrj 			    dma->dp_dip);
3212509Smrj 
3213509Smrj 		/* else if we will be over maxxfer */
3214509Smrj 		} else if ((window->wd_size + dmac_size) >
3215509Smrj 		    dma->dp_maxxfer) {
3216509Smrj 			partial = B_TRUE;
3217509Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
3218509Smrj 			    cookie);
3219509Smrj 			if (e != DDI_SUCCESS) {
3220509Smrj 				rootnex_teardown_copybuf(dma);
3221509Smrj 				rootnex_teardown_windows(dma);
3222509Smrj 				return (e);
3223509Smrj 			}
3224509Smrj 
3225509Smrj 			/*
3226509Smrj 			 * if the coookie uses the copy buffer, make sure the
3227509Smrj 			 * new window we just moved to is set to sync.
32280Sstevel@tonic-gate 			 */
3229509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3230509Smrj 				window->wd_dosync = B_TRUE;
3231509Smrj 			}
3232*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROBE1(rootnex__maxxfer__window, dev_info_t *,
3233509Smrj 			    dma->dp_dip);
3234509Smrj 
3235509Smrj 		/* else this cookie fits in the current window */
3236509Smrj 		} else {
3237509Smrj 			window->wd_cookie_cnt++;
3238509Smrj 			window->wd_size += dmac_size;
3239509Smrj 		}
3240509Smrj 
3241509Smrj 		/* track our offset into the buffer, go to the next cookie */
3242509Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
3243509Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
3244509Smrj 		cur_offset += dmac_size;
3245509Smrj 		cookie++;
3246509Smrj 	}
3247509Smrj 
3248509Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
3249509Smrj 	if (window->wd_size == 0) {
3250509Smrj 		hp->dmai_nwin--;
3251509Smrj 		window--;
3252509Smrj 	}
3253509Smrj 
3254509Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
3255509Smrj 
3256509Smrj 	if (!partial) {
3257509Smrj 		return (DDI_DMA_MAPPED);
3258509Smrj 	}
3259509Smrj 
3260509Smrj 	ASSERT(dma->dp_partial_required);
3261509Smrj 	return (DDI_DMA_PARTIAL_MAP);
3262509Smrj }
3263509Smrj 
3264509Smrj 
3265509Smrj /*
3266509Smrj  * rootnex_setup_copybuf()
3267509Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
3268509Smrj  *    buffer, and if we do, sets up the basic state to handle it.
3269509Smrj  */
3270509Smrj static int
3271509Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3272509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
3273509Smrj {
3274509Smrj 	rootnex_sglinfo_t *sinfo;
3275509Smrj 	ddi_dma_attr_t lattr;
3276509Smrj 	size_t max_copybuf;
3277509Smrj 	int cansleep;
3278509Smrj 	int e;
3279509Smrj #if !defined(__amd64)
3280509Smrj 	int vmflag;
3281509Smrj #endif
3282509Smrj 
3283509Smrj 
3284509Smrj 	sinfo = &dma->dp_sglinfo;
3285509Smrj 
32865251Smrj 	/* read this first so it's consistent through the routine  */
32875251Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
3288509Smrj 
3289509Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
3290509Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
3291509Smrj 
3292509Smrj 	/* make sure the copybuf size <= the max size */
3293509Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
3294509Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
3295509Smrj 
3296509Smrj #if !defined(__amd64)
3297509Smrj 	/*
3298509Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
3299509Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3300509Smrj 	 * the 64-bit kernel.
3301509Smrj 	 */
3302509Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
3303509Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
3304509Smrj 
3305509Smrj 		/* convert the sleep flags */
3306509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3307509Smrj 			vmflag = VM_SLEEP;
3308509Smrj 		} else {
3309509Smrj 			vmflag = VM_NOSLEEP;
3310509Smrj 		}
3311509Smrj 
3312509Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
3313509Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
3314509Smrj 		    vmflag);
3315509Smrj 		if (dma->dp_kva == NULL) {
3316509Smrj 			return (DDI_DMA_NORESOURCES);
3317509Smrj 		}
3318509Smrj 	}
3319509Smrj #endif
3320509Smrj 
3321509Smrj 	/* convert the sleep flags */
3322509Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3323509Smrj 		cansleep = 1;
3324509Smrj 	} else {
3325509Smrj 		cansleep = 0;
3326509Smrj 	}
3327509Smrj 
3328509Smrj 	/*
33297173Smrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
33307173Smrj 	 * engine limits, so we can't use kmem_alloc... We don't need
33317173Smrj 	 * contiguous memory (sgllen) since we will be forcing windows on
33327173Smrj 	 * sgllen anyway.
3333509Smrj 	 */
3334509Smrj 	lattr = *attr;
3335509Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
33367173Smrj 	/*
33377173Smrj 	 * this should be < 0 to indicate no limit, but due to a bug in
33387173Smrj 	 * the rootnex, we'll set it to the maximum positive int.
33397173Smrj 	 */
33407173Smrj 	lattr.dma_attr_sgllen = 0x7fffffff;
334111793SMark.Johnson@Sun.COM 	/*
334211793SMark.Johnson@Sun.COM 	 * if we're using the copy buffer because of seg, use that for our
334311793SMark.Johnson@Sun.COM 	 * upper address limit.
334411793SMark.Johnson@Sun.COM 	 */
334511793SMark.Johnson@Sun.COM 	if (sinfo->si_bounce_on_seg) {
334611793SMark.Johnson@Sun.COM 		lattr.dma_attr_addr_hi = lattr.dma_attr_seg;
334711793SMark.Johnson@Sun.COM 	}
3348509Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
3349509Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
3350509Smrj 	if (e != DDI_SUCCESS) {
3351509Smrj #if !defined(__amd64)
3352509Smrj 		if (dma->dp_kva != NULL) {
3353509Smrj 			vmem_free(heap_arena, dma->dp_kva,
3354509Smrj 			    dma->dp_copybuf_size);
3355509Smrj 		}
3356509Smrj #endif
3357509Smrj 		return (DDI_DMA_NORESOURCES);
3358509Smrj 	}
3359509Smrj 
3360*12837Sfrank.van.der.linden@oracle.com 	ROOTNEX_DPROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
3361509Smrj 	    size_t, dma->dp_copybuf_size);
3362509Smrj 
3363509Smrj 	return (DDI_SUCCESS);
3364509Smrj }
3365509Smrj 
3366509Smrj 
3367509Smrj /*
3368509Smrj  * rootnex_setup_windows()
3369509Smrj  *    Called in bind slowpath to setup the window state. We always have windows
3370509Smrj  *    in the slowpath. Even if the window count = 1.
3371509Smrj  */
3372509Smrj static int
3373509Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3374509Smrj     ddi_dma_attr_t *attr, int kmflag)
3375509Smrj {
3376509Smrj 	rootnex_window_t *windowp;
3377509Smrj 	rootnex_sglinfo_t *sinfo;
3378509Smrj 	size_t copy_state_size;
3379509Smrj 	size_t win_state_size;
3380509Smrj 	size_t state_available;
3381509Smrj 	size_t space_needed;
3382509Smrj 	uint_t copybuf_win;
3383509Smrj 	uint_t maxxfer_win;
3384509Smrj 	size_t space_used;
3385509Smrj 	uint_t sglwin;
3386509Smrj 
3387509Smrj 
3388509Smrj 	sinfo = &dma->dp_sglinfo;
3389509Smrj 
3390509Smrj 	dma->dp_current_win = 0;
3391509Smrj 	hp->dmai_nwin = 0;
3392509Smrj 
3393509Smrj 	/* If we don't need to do a partial, we only have one window */
3394509Smrj 	if (!dma->dp_partial_required) {
3395509Smrj 		dma->dp_max_win = 1;
3396509Smrj 
3397509Smrj 	/*
3398509Smrj 	 * we need multiple windows, need to figure out the worse case number
3399509Smrj 	 * of windows.
3400509Smrj 	 */
3401509Smrj 	} else {
3402509Smrj 		/*
3403509Smrj 		 * if we need windows because we need more copy buffer that
3404509Smrj 		 * we allow, the worse case number of windows we could need
3405509Smrj 		 * here would be (copybuf space required / copybuf space that
3406509Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
3407509Smrj 		 * extra pages on the trim for the first and last pages of the
3408509Smrj 		 * buffer (a page is the minimum window size so under the right
3409509Smrj 		 * attr settings, you could have a window for each page).
3410509Smrj 		 * The last page will only be hit here if the size is not a
3411509Smrj 		 * multiple of the granularity (which theoretically shouldn't
3412509Smrj 		 * be the case but never has been enforced, so we could have
3413509Smrj 		 * broken things without it).
3414509Smrj 		 */
3415509Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3416509Smrj 			ASSERT(dma->dp_copybuf_size > 0);
3417509Smrj 			copybuf_win = (sinfo->si_copybuf_req /
3418509Smrj 			    dma->dp_copybuf_size) + 1 + 2;
3419509Smrj 		} else {
3420509Smrj 			copybuf_win = 0;
3421509Smrj 		}
3422509Smrj 
3423509Smrj 		/*
3424509Smrj 		 * if we need windows because we have more cookies than the H/W
3425509Smrj 		 * can handle, the number of windows we would need here would
342612118Smark.r.johnson@oracle.com 		 * be (cookie count / cookies count H/W supports minus 1[for
342712118Smark.r.johnson@oracle.com 		 * trim]) plus one for remainder.
3428509Smrj 		 */
3429509Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
343012118Smark.r.johnson@oracle.com 			sglwin = (sinfo->si_sgl_size /
343112118Smark.r.johnson@oracle.com 			    (attr->dma_attr_sgllen - 1)) + 1;
3432509Smrj 		} else {
3433509Smrj 			sglwin = 0;
3434509Smrj 		}
3435509Smrj 
3436509Smrj 		/*
3437509Smrj 		 * if we need windows because we're binding more memory than the
3438509Smrj 		 * H/W can transfer at once, the number of windows we would need
3439509Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
3440509Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
3441509Smrj 		 * trim (see above comment about trim)
3442509Smrj 		 */
3443509Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
3444509Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
3445509Smrj 			    dma->dp_maxxfer) + 1 + 2;
3446509Smrj 		} else {
3447509Smrj 			maxxfer_win = 0;
3448509Smrj 		}
3449509Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3450509Smrj 		ASSERT(dma->dp_max_win > 0);
3451509Smrj 	}
3452509Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3453509Smrj 
3454509Smrj 	/*
3455509Smrj 	 * Get space for window and potential copy buffer state. Before we
3456509Smrj 	 * go and allocate memory, see if we can get away with using what's
3457509Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
3458509Smrj 	 */
3459509Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3460509Smrj 	    sizeof (ddi_dma_cookie_t));
3461509Smrj 
3462509Smrj 	/* if we dynamically allocated space for the cookies */
3463509Smrj 	if (dma->dp_need_to_free_cookie) {
3464509Smrj 		/* if we have more space in the pre-allocted buffer, use it */
3465509Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
3466509Smrj 		if ((dma->dp_cookie_size - space_used) <=
3467509Smrj 		    rootnex_state->r_prealloc_size) {
3468509Smrj 			state_available = rootnex_state->r_prealloc_size;
3469509Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3470509Smrj 
3471509Smrj 		/*
3472509Smrj 		 * else, we have more free space in the dynamically allocated
3473509Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3474509Smrj 		 * didn't need a lot of cookies.
3475509Smrj 		 */
3476509Smrj 		} else {
3477509Smrj 			state_available = dma->dp_cookie_size - space_used;
3478509Smrj 			windowp = (rootnex_window_t *)
3479509Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
3480509Smrj 		}
3481509Smrj 
3482509Smrj 	/* we used the pre-alloced buffer */
3483509Smrj 	} else {
3484509Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3485509Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
3486509Smrj 		windowp = (rootnex_window_t *)
3487509Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
3488509Smrj 	}
3489509Smrj 
3490509Smrj 	/*
3491509Smrj 	 * figure out how much state we need to track the copy buffer. Add an
3492509Smrj 	 * addition 8 bytes for pointer alignemnt later.
3493509Smrj 	 */
3494509Smrj 	if (dma->dp_copybuf_size > 0) {
3495509Smrj 		copy_state_size = sinfo->si_max_pages *
3496509Smrj 		    sizeof (rootnex_pgmap_t);
3497509Smrj 	} else {
3498509Smrj 		copy_state_size = 0;
3499509Smrj 	}
3500509Smrj 	/* add an additional 8 bytes for pointer alignment */
3501509Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
3502509Smrj 
3503509Smrj 	/* if we have enough space already, use it */
3504509Smrj 	if (state_available >= space_needed) {
3505509Smrj 		dma->dp_window = windowp;
3506509Smrj 		dma->dp_need_to_free_window = B_FALSE;
3507509Smrj 
3508509Smrj 	/* not enough space, need to allocate more. */
3509509Smrj 	} else {
3510509Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3511509Smrj 		if (dma->dp_window == NULL) {
3512509Smrj 			return (DDI_DMA_NORESOURCES);
3513509Smrj 		}
3514509Smrj 		dma->dp_need_to_free_window = B_TRUE;
3515509Smrj 		dma->dp_window_size = space_needed;
3516*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3517509Smrj 		    dma->dp_dip, size_t, space_needed);
3518509Smrj 	}
3519509Smrj 
3520509Smrj 	/*
3521509Smrj 	 * we allocate copy buffer state and window state at the same time.
3522509Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
3523509Smrj 	 */
3524509Smrj 	if (dma->dp_copybuf_size > 0) {
3525509Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3526509Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3527509Smrj 
3528509Smrj #if !defined(__amd64)
3529509Smrj 		/*
3530509Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3531509Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
3532509Smrj 		 */
3533509Smrj 		bzero(dma->dp_pgmap, copy_state_size);
3534509Smrj #endif
3535509Smrj 	} else {
3536509Smrj 		dma->dp_pgmap = NULL;
3537509Smrj 	}
3538509Smrj 
3539509Smrj 	return (DDI_SUCCESS);
3540509Smrj }
3541509Smrj 
3542509Smrj 
3543509Smrj /*
3544509Smrj  * rootnex_teardown_copybuf()
3545509Smrj  *    cleans up after rootnex_setup_copybuf()
3546509Smrj  */
3547509Smrj static void
3548509Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
3549509Smrj {
3550509Smrj #if !defined(__amd64)
3551509Smrj 	int i;
3552509Smrj 
3553509Smrj 	/*
3554509Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
3555509Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3556509Smrj 	 * arena. Then free the VMEM space.
3557509Smrj 	 */
3558509Smrj 	if (dma->dp_kva != NULL) {
3559509Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3560509Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
3561509Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3562509Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
3563509Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3564509Smrj 			}
3565509Smrj 		}
3566509Smrj 
3567509Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3568509Smrj 	}
3569509Smrj 
3570509Smrj #endif
3571509Smrj 
3572509Smrj 	/* if we allocated a copy buffer, free it */
3573509Smrj 	if (dma->dp_cbaddr != NULL) {
35741900Seota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
3575509Smrj 	}
3576509Smrj }
3577509Smrj 
3578509Smrj 
3579509Smrj /*
3580509Smrj  * rootnex_teardown_windows()
3581509Smrj  *    cleans up after rootnex_setup_windows()
3582509Smrj  */
3583509Smrj static void
3584509Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
3585509Smrj {
3586509Smrj 	/*
3587509Smrj 	 * if we had to allocate window state on the last bind (because we
3588509Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
3589509Smrj 	 */
3590509Smrj 	if (dma->dp_need_to_free_window) {
3591509Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
3592509Smrj 	}
3593509Smrj }
3594509Smrj 
3595509Smrj 
3596509Smrj /*
3597509Smrj  * rootnex_init_win()
3598509Smrj  *    Called in bind slow path during creation of a new window. Initializes
3599509Smrj  *    window state to default values.
3600509Smrj  */
3601509Smrj /*ARGSUSED*/
3602509Smrj static void
3603509Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3604509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3605509Smrj {
3606509Smrj 	hp->dmai_nwin++;
3607509Smrj 	window->wd_dosync = B_FALSE;
3608509Smrj 	window->wd_offset = cur_offset;
3609509Smrj 	window->wd_size = 0;
3610509Smrj 	window->wd_first_cookie = cookie;
3611509Smrj 	window->wd_cookie_cnt = 0;
3612509Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
3613509Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
3614509Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3615509Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3616509Smrj #if !defined(__amd64)
3617509Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3618509Smrj #endif
3619509Smrj }
3620509Smrj 
3621509Smrj 
3622509Smrj /*
3623509Smrj  * rootnex_setup_cookie()
3624509Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3625509Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3626509Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
3627509Smrj  *    need during sync.
3628509Smrj  */
3629509Smrj static void
3630509Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3631509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3632509Smrj     page_t **cur_pp)
3633509Smrj {
3634509Smrj 	boolean_t copybuf_sz_power_2;
3635509Smrj 	rootnex_sglinfo_t *sinfo;
36365084Sjohnlev 	paddr_t paddr;
3637509Smrj 	uint_t pidx;
3638509Smrj 	uint_t pcnt;
3639509Smrj 	off_t poff;
3640509Smrj #if defined(__amd64)
3641509Smrj 	pfn_t pfn;
3642509Smrj #else
3643509Smrj 	page_t **pplist;
3644509Smrj #endif
3645509Smrj 
3646509Smrj 	sinfo = &dma->dp_sglinfo;
3647509Smrj 
3648509Smrj 	/*
3649509Smrj 	 * Calculate the page index relative to the start of the buffer. The
3650509Smrj 	 * index to the current page for our buffer is the offset into the
3651509Smrj 	 * first page of the buffer plus our current offset into the buffer
3652509Smrj 	 * itself, shifted of course...
3653509Smrj 	 */
3654509Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3655509Smrj 	ASSERT(pidx < sinfo->si_max_pages);
3656509Smrj 
3657509Smrj 	/* if this cookie uses the copy buffer */
3658509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3659509Smrj 		/*
3660509Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
3661509Smrj 		 * is <= MMU_PAGESIZE.
3662509Smrj 		 */
3663509Smrj 
3664509Smrj 		/*
3665509Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
3666509Smrj 		 * pfn which we'll use with seg kpm.
3667509Smrj 		 */
36685084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3669509Smrj #if defined(__amd64)
36705084Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
36715084Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
36725084Sjohnlev #endif /* __amd64 */
3673509Smrj 
3674509Smrj 		/* figure out if the copybuf size is a power of 2 */
3675509Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3676509Smrj 			copybuf_sz_power_2 = B_FALSE;
3677509Smrj 		} else {
3678509Smrj 			copybuf_sz_power_2 = B_TRUE;
3679509Smrj 		}
3680509Smrj 
3681509Smrj 		/* This page uses the copy buffer */
3682509Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3683509Smrj 
3684509Smrj 		/*
3685509Smrj 		 * save the copy buffer KVA that we'll use with this page.
3686509Smrj 		 * if we still fit within the copybuf, it's a simple add.
3687509Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
3688509Smrj 		 */
3689509Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3690509Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3691509Smrj 			    *copybuf_used;
3692509Smrj 		} else {
3693509Smrj 			if (copybuf_sz_power_2) {
3694509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3695509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3696509Smrj 				    (*copybuf_used &
3697509Smrj 				    (dma->dp_copybuf_size - 1)));
36980Sstevel@tonic-gate 			} else {
3699509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3700509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3701509Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
37020Sstevel@tonic-gate 			}
3703509Smrj 		}
3704509Smrj 
3705509Smrj 		/*
3706509Smrj 		 * over write the cookie physical address with the address of
3707509Smrj 		 * the physical address of the copy buffer page that we will
3708509Smrj 		 * use.
3709509Smrj 		 */
37105084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3711509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3712509Smrj 
37135084Sjohnlev #ifdef __xpv
37145084Sjohnlev 		/*
37155084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
37165084Sjohnlev 		 * the cookies with MAs instead of PAs.
37175084Sjohnlev 		 */
37185084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
37195084Sjohnlev #else
37205084Sjohnlev 		cookie->dmac_laddress = paddr;
37215084Sjohnlev #endif
37225084Sjohnlev 
3723509Smrj 		/* if we have a kernel VA, it's easy, just save that address */
3724509Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3725509Smrj 		    (sinfo->si_asp == &kas)) {
3726509Smrj 			/*
3727509Smrj 			 * save away the page aligned virtual address of the
3728509Smrj 			 * driver buffer. Offsets are handled in the sync code.
3729509Smrj 			 */
3730509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3731509Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3732509Smrj 			    & MMU_PAGEMASK);
3733509Smrj #if !defined(__amd64)
3734509Smrj 			/*
3735509Smrj 			 * we didn't need to, and will never need to map this
3736509Smrj 			 * page.
3737509Smrj 			 */
3738509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3739509Smrj #endif
3740509Smrj 
3741509Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
3742509Smrj 		} else {
3743509Smrj #if defined(__amd64)
3744509Smrj 			/*
3745509Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3746509Smrj 			 * get a Kernel VA for the corresponding pfn.
3747509Smrj 			 */
3748509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3749509Smrj #else
3750509Smrj 			/*
3751509Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
3752509Smrj 			 * save away the page_t or user VA for this page. This
3753509Smrj 			 * is needed in rootnex_dma_win() when we switch to a
3754509Smrj 			 * new window which requires us to re-map the copy
3755509Smrj 			 * buffer.
3756509Smrj 			 */
3757509Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3758509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3759509Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3760509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3761509Smrj 			} else if (pplist != NULL) {
3762509Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3763509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3764509Smrj 			} else {
3765509Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
3766509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3767509Smrj 				    (((uintptr_t)
3768509Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
3769509Smrj 				    cur_offset) & MMU_PAGEMASK);
3770509Smrj 			}
3771509Smrj 
3772509Smrj 			/*
3773509Smrj 			 * save away the page aligned virtual address which was
3774509Smrj 			 * allocated from the kernel heap arena (taking into
3775509Smrj 			 * account if we need more copy buffer than we alloced
3776509Smrj 			 * and use multiple windows to handle this, i.e. &,%).
3777509Smrj 			 * NOTE: there isn't and physical memory backing up this
3778509Smrj 			 * virtual address space currently.
3779509Smrj 			 */
3780509Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
3781509Smrj 			    dma->dp_copybuf_size) {
3782509Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3783509Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3784509Smrj 				    MMU_PAGEMASK);
3785509Smrj 			} else {
3786509Smrj 				if (copybuf_sz_power_2) {
3787509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3788509Smrj 					    (((uintptr_t)dma->dp_kva +
3789509Smrj 					    (*copybuf_used &
3790509Smrj 					    (dma->dp_copybuf_size - 1))) &
3791509Smrj 					    MMU_PAGEMASK);
3792509Smrj 				} else {
3793509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3794509Smrj 					    (((uintptr_t)dma->dp_kva +
3795509Smrj 					    (*copybuf_used %
3796509Smrj 					    dma->dp_copybuf_size)) &
3797509Smrj 					    MMU_PAGEMASK);
3798509Smrj 				}
3799509Smrj 			}
3800509Smrj 
3801509Smrj 			/*
3802509Smrj 			 * if we haven't used up the available copy buffer yet,
3803509Smrj 			 * map the kva to the physical page.
3804509Smrj 			 */
3805509Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3806509Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3807509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3808509Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3809509Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3810509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3811509Smrj 				} else {
3812509Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3813509Smrj 					    sinfo->si_asp,
3814509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3815509Smrj 				}
3816509Smrj 
3817509Smrj 			/*
3818509Smrj 			 * we've used up the available copy buffer, this page
3819509Smrj 			 * will have to be mapped during rootnex_dma_win() when
3820509Smrj 			 * we switch to a new window which requires a re-map
3821509Smrj 			 * the copy buffer. (32-bit kernel only)
3822509Smrj 			 */
3823509Smrj 			} else {
3824509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3825509Smrj 			}
3826509Smrj #endif
3827509Smrj 			/* go to the next page_t */
3828509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3829509Smrj 				*cur_pp = (*cur_pp)->p_next;
3830509Smrj 			}
38310Sstevel@tonic-gate 		}
3832509Smrj 
3833509Smrj 		/* add to the copy buffer count */
3834509Smrj 		*copybuf_used += MMU_PAGESIZE;
3835509Smrj 
3836509Smrj 	/*
3837509Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3838509Smrj 	 * cookie occupies to reflect this.
3839509Smrj 	 */
3840509Smrj 	} else {
3841509Smrj 		/*
3842509Smrj 		 * figure out how many pages the cookie occupies. We need to
3843509Smrj 		 * use the original page offset of the buffer and the cookies
3844509Smrj 		 * offset in the buffer to do this.
3845509Smrj 		 */
3846509Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3847509Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3848509Smrj 
3849509Smrj 		while (pcnt > 0) {
3850509Smrj #if !defined(__amd64)
3851509Smrj 			/*
3852509Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
3853509Smrj 			 * to map in the driver buffer (if it didn't come down
3854509Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
3855509Smrj 			 * use the copy buffer, it's not, or will it ever, have
3856509Smrj 			 * to be mapped in.
3857509Smrj 			 */
3858509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3859509Smrj #endif
3860509Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3861509Smrj 
3862509Smrj 			/*
3863509Smrj 			 * we need to update pidx and cur_pp or we'll loose
3864509Smrj 			 * track of where we are.
3865509Smrj 			 */
3866509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3867509Smrj 				*cur_pp = (*cur_pp)->p_next;
3868509Smrj 			}
3869509Smrj 			pidx++;
3870509Smrj 			pcnt--;
3871509Smrj 		}
3872509Smrj 	}
3873509Smrj }
3874509Smrj 
3875509Smrj 
3876509Smrj /*
3877509Smrj  * rootnex_sgllen_window_boundary()
3878509Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
3879509Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3880509Smrj  *    length supported by the DMA H/W.
3881509Smrj  */
3882509Smrj static int
3883509Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3884509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3885509Smrj     off_t cur_offset)
3886509Smrj {
3887509Smrj 	off_t new_offset;
3888509Smrj 	size_t trim_sz;
3889509Smrj 	off_t coffset;
3890509Smrj 
3891509Smrj 
3892509Smrj 	/*
3893509Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3894509Smrj 	 * the next window and init it. We're done.
3895509Smrj 	 */
3896509Smrj 	if (!dma->dp_trim_required) {
3897509Smrj 		(*windowp)++;
3898509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3899509Smrj 		(*windowp)->wd_cookie_cnt++;
3900509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3901509Smrj 		return (DDI_SUCCESS);
3902509Smrj 	}
3903509Smrj 
3904509Smrj 	/* figure out how much we need to trim from the window */
3905509Smrj 	ASSERT(attr->dma_attr_granular != 0);
3906509Smrj 	if (dma->dp_granularity_power_2) {
3907509Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3908509Smrj 	} else {
3909509Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3910509Smrj 	}
3911509Smrj 
3912509Smrj 	/* The window's a whole multiple of granularity. We're done */
3913509Smrj 	if (trim_sz == 0) {
3914509Smrj 		(*windowp)++;
3915509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3916509Smrj 		(*windowp)->wd_cookie_cnt++;
3917509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3918509Smrj 		return (DDI_SUCCESS);
3919509Smrj 	}
3920509Smrj 
3921509Smrj 	/*
3922509Smrj 	 * The window's not a whole multiple of granularity, since we know this
3923509Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
3924509Smrj 	 * that one, add the left over part of the old cookie into the new
3925509Smrj 	 * window, and then add in the new cookie into the new window.
3926509Smrj 	 */
3927509Smrj 
3928509Smrj 	/*
3929509Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
3930509Smrj 	 * sgllen == 1 don't go together.
3931509Smrj 	 */
3932509Smrj 	if (attr->dma_attr_sgllen == 1) {
3933509Smrj 		return (DDI_DMA_NOMAPPING);
3934509Smrj 	}
3935509Smrj 
3936509Smrj 	/*
3937509Smrj 	 * first, setup the current window to account for the trim. Need to go
3938509Smrj 	 * back to the last cookie for this.
3939509Smrj 	 */
3940509Smrj 	cookie--;
3941509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3942509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
39435084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3944509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3945509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3946509Smrj 	(*windowp)->wd_size -= trim_sz;
3947509Smrj 
3948509Smrj 	/* save the buffer offsets for the next window */
3949509Smrj 	coffset = cookie->dmac_size - trim_sz;
3950509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3951509Smrj 
3952509Smrj 	/*
3953509Smrj 	 * set this now in case this is the first window. all other cases are
3954509Smrj 	 * set in dma_win()
3955509Smrj 	 */
3956509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3957509Smrj 
3958509Smrj 	/*
3959509Smrj 	 * initialize the next window using what's left over in the previous
3960509Smrj 	 * cookie.
3961509Smrj 	 */
3962509Smrj 	(*windowp)++;
3963509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3964509Smrj 	(*windowp)->wd_cookie_cnt++;
3965509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
39665084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3967509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3968509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3969509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3970509Smrj 	}
3971509Smrj 
3972509Smrj 	/*
3973509Smrj 	 * now go back to the current cookie and add it to the new window. set
3974509Smrj 	 * the new window size to the what was left over from the previous
3975509Smrj 	 * cookie and what's in the current cookie.
3976509Smrj 	 */
3977509Smrj 	cookie++;
3978509Smrj 	(*windowp)->wd_cookie_cnt++;
3979509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3980509Smrj 
3981509Smrj 	/*
3982509Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3983509Smrj 	 * a max size of maxxfer). Handle that case.
3984509Smrj 	 */
3985509Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3986509Smrj 		/*
3987509Smrj 		 * maxxfer is already a whole multiple of granularity, and this
3988509Smrj 		 * trim will be <= the previous trim (since a cookie can't be
3989509Smrj 		 * larger than maxxfer). Make things simple here.
3990509Smrj 		 */
3991509Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3992509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3993509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
39945084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3995509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3996509Smrj 		(*windowp)->wd_size -= trim_sz;
3997509Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3998509Smrj 
3999509Smrj 		/* save the buffer offsets for the next window */
4000509Smrj 		coffset = cookie->dmac_size - trim_sz;
4001509Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4002509Smrj 
4003509Smrj 		/* setup the next window */
4004509Smrj 		(*windowp)++;
4005509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4006509Smrj 		(*windowp)->wd_cookie_cnt++;
4007509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
40085084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4009509Smrj 		    coffset;
4010509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
4011509Smrj 	}
4012509Smrj 
4013509Smrj 	return (DDI_SUCCESS);
4014509Smrj }
4015509Smrj 
4016509Smrj 
4017509Smrj /*
4018509Smrj  * rootnex_copybuf_window_boundary()
4019509Smrj  *    Called in bind slowpath when we get to a window boundary because we used
4020509Smrj  *    up all the copy buffer that we have.
4021509Smrj  */
4022509Smrj static int
4023509Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4024509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
4025509Smrj     size_t *copybuf_used)
4026509Smrj {
4027509Smrj 	rootnex_sglinfo_t *sinfo;
4028509Smrj 	off_t new_offset;
4029509Smrj 	size_t trim_sz;
40305084Sjohnlev 	paddr_t paddr;
4031509Smrj 	off_t coffset;
4032509Smrj 	uint_t pidx;
4033509Smrj 	off_t poff;
4034509Smrj 
4035509Smrj 
4036509Smrj 	sinfo = &dma->dp_sglinfo;
4037509Smrj 
4038509Smrj 	/*
4039509Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
4040509Smrj 	 * this cookie is <= MMU_PAGESIZE.
4041509Smrj 	 */
4042509Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
4043509Smrj 
4044509Smrj 	/*
4045509Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
4046509Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4047509Smrj 	 * space...
4048509Smrj 	 */
4049509Smrj #if !defined(__amd64)
4050509Smrj 	dma->dp_cb_remaping = B_TRUE;
4051509Smrj #endif
4052509Smrj 
4053509Smrj 	/* reset copybuf used */
4054509Smrj 	*copybuf_used = 0;
4055509Smrj 
4056509Smrj 	/*
4057509Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
4058509Smrj 	 * next window and add the current cookie to it. We know the current
4059509Smrj 	 * cookie uses the copy buffer since we're in this code path.
4060509Smrj 	 */
4061509Smrj 	if (!dma->dp_trim_required) {
4062509Smrj 		(*windowp)++;
4063509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4064509Smrj 
4065509Smrj 		/* Add this cookie to the new window */
4066509Smrj 		(*windowp)->wd_cookie_cnt++;
4067509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
4068509Smrj 		*copybuf_used += MMU_PAGESIZE;
4069509Smrj 		return (DDI_SUCCESS);
4070509Smrj 	}
4071509Smrj 
4072509Smrj 	/*
4073509Smrj 	 * *** may need to trim, figure it out.
4074509Smrj 	 */
4075509Smrj 
4076509Smrj 	/* figure out how much we need to trim from the window */
4077509Smrj 	if (dma->dp_granularity_power_2) {
4078509Smrj 		trim_sz = (*windowp)->wd_size &
4079509Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
4080509Smrj 	} else {
4081509Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
4082509Smrj 	}
4083509Smrj 
4084509Smrj 	/*
4085509Smrj 	 * if the window's a whole multiple of granularity, go to the next
4086509Smrj 	 * window, init it, then add in the current cookie. We know the current
4087509Smrj 	 * cookie uses the copy buffer since we're in this code path.
4088509Smrj 	 */
4089509Smrj 	if (trim_sz == 0) {
4090509Smrj 		(*windowp)++;
4091509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4092509Smrj 
4093509Smrj 		/* Add this cookie to the new window */
4094509Smrj 		(*windowp)->wd_cookie_cnt++;
4095509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
4096509Smrj 		*copybuf_used += MMU_PAGESIZE;
4097509Smrj 		return (DDI_SUCCESS);
4098509Smrj 	}
4099509Smrj 
4100509Smrj 	/*
4101509Smrj 	 * *** We figured it out, we definitly need to trim
4102509Smrj 	 */
4103509Smrj 
4104509Smrj 	/*
4105509Smrj 	 * make sure the driver isn't making us do something bad...
4106509Smrj 	 * Trimming and sgllen == 1 don't go together.
4107509Smrj 	 */
4108509Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
4109509Smrj 		return (DDI_DMA_NOMAPPING);
4110509Smrj 	}
4111509Smrj 
4112509Smrj 	/*
4113509Smrj 	 * first, setup the current window to account for the trim. Need to go
4114509Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
4115509Smrj 	 * the current window, and some of the last cookie will be in the new
4116509Smrj 	 * window. All of the current cookie will be in the new window.
4117509Smrj 	 */
4118509Smrj 	cookie--;
4119509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4120509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
41215084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4122509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
4123509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4124509Smrj 	(*windowp)->wd_size -= trim_sz;
4125509Smrj 
4126509Smrj 	/*
4127509Smrj 	 * we're trimming the last cookie (not the current cookie). So that
4128509Smrj 	 * last cookie may have or may not have been using the copy buffer (
4129509Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
4130509Smrj 	 * this code path).
4131509Smrj 	 *
4132509Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
4133509Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
4134509Smrj 	 * last page in the current window and the first page in the next
4135509Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
4136509Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
4137509Smrj 	 * current window, and the start of the copy buffer in the next window.
4138509Smrj 	 * Track that info... The cookie physical address was already set to
4139509Smrj 	 * the copy buffer physical address in setup_cookie..
4140509Smrj 	 */
4141509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4142509Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
4143509Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
4144509Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
4145509Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
4146509Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
4147509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
4148509Smrj #if !defined(__amd64)
4149509Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
4150509Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
4151509Smrj #endif
4152509Smrj 	}
4153509Smrj 
4154509Smrj 	/* save the buffer offsets for the next window */
4155509Smrj 	coffset = cookie->dmac_size - trim_sz;
4156509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4157509Smrj 
4158509Smrj 	/*
4159509Smrj 	 * set this now in case this is the first window. all other cases are
4160509Smrj 	 * set in dma_win()
4161509Smrj 	 */
4162509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4163509Smrj 
4164509Smrj 	/*
4165509Smrj 	 * initialize the next window using what's left over in the previous
4166509Smrj 	 * cookie.
4167509Smrj 	 */
4168509Smrj 	(*windowp)++;
4169509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4170509Smrj 	(*windowp)->wd_cookie_cnt++;
4171509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
41725084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
4173509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
4174509Smrj 
4175509Smrj 	/*
4176509Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
4177509Smrj 	 * read the comment above for more info on why we need to track
4178509Smrj 	 * additional state.
4179509Smrj 	 *
4180509Smrj 	 * For the first cookie in the new window, we need reset the physical
4181509Smrj 	 * address to DMA into to the start of the copy buffer plus any
4182509Smrj 	 * initial page offset which may be present.
4183509Smrj 	 */
4184509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4185509Smrj 		(*windowp)->wd_dosync = B_TRUE;
4186509Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
4187509Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
4188509Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
4189509Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
41905084Sjohnlev 
41915084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
41925084Sjohnlev 		    poff;
41935084Sjohnlev #ifdef __xpv
41945084Sjohnlev 		/*
41955084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
41965084Sjohnlev 		 * the cookies with MAs instead of PAs.
41975084Sjohnlev 		 */
41985084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
41995084Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
42005084Sjohnlev #else
42015084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
42025084Sjohnlev #endif
42035084Sjohnlev 
4204509Smrj #if !defined(__amd64)
4205509Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
4206509Smrj #endif
4207509Smrj 		/* account for the cookie copybuf usage in the new window */
4208509Smrj 		*copybuf_used += MMU_PAGESIZE;
4209509Smrj 
4210509Smrj 		/*
4211509Smrj 		 * every piece of code has to have a hack, and here is this
4212509Smrj 		 * ones :-)
4213509Smrj 		 *
4214509Smrj 		 * There is a complex interaction between setup_cookie and the
4215509Smrj 		 * copybuf window boundary. The complexity had to be in either
4216509Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
4217509Smrj 		 * copybuf code.
4218509Smrj 		 *
4219509Smrj 		 * So in this code path, we have taken the last cookie,
4220509Smrj 		 * virtually broken it in half due to the trim, and it happens
4221509Smrj 		 * to use the copybuf which further complicates life. At the
4222509Smrj 		 * same time, we have already setup the current cookie, which
4223509Smrj 		 * is now wrong. More background info: the current cookie uses
4224509Smrj 		 * the copybuf, so it is only a page long max. So we need to
4225509Smrj 		 * fix the current cookies copy buffer address, physical
4226509Smrj 		 * address, and kva for the 32-bit kernel. We due this by
4227509Smrj 		 * bumping them by page size (of course, we can't due this on
4228509Smrj 		 * the physical address since the copy buffer may not be
4229509Smrj 		 * physically contiguous).
4230509Smrj 		 */
4231509Smrj 		cookie++;
4232509Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
42335084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
42345084Sjohnlev 
42355084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
4236509Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
42375084Sjohnlev #ifdef __xpv
42385084Sjohnlev 		/*
42395084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
42405084Sjohnlev 		 * the cookies with MAs instead of PAs.
42415084Sjohnlev 		 */
42425084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
42435084Sjohnlev #else
42445084Sjohnlev 		cookie->dmac_laddress = paddr;
42455084Sjohnlev #endif
42465084Sjohnlev 
4247509Smrj #if !defined(__amd64)
4248509Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
4249509Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
4250509Smrj #endif
4251509Smrj 	} else {
4252509Smrj 		/* go back to the current cookie */
4253509Smrj 		cookie++;
4254509Smrj 	}
4255509Smrj 
4256509Smrj 	/*
4257509Smrj 	 * add the current cookie to the new window. set the new window size to
4258509Smrj 	 * the what was left over from the previous cookie and what's in the
4259509Smrj 	 * current cookie.
4260509Smrj 	 */
4261509Smrj 	(*windowp)->wd_cookie_cnt++;
4262509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
4263509Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
4264509Smrj 
4265509Smrj 	/*
4266509Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
4267509Smrj 	 * wouldn't be here if it didn't.
4268509Smrj 	 */
4269509Smrj 	*copybuf_used += MMU_PAGESIZE;
4270509Smrj 
4271509Smrj 	return (DDI_SUCCESS);
4272509Smrj }
4273509Smrj 
4274509Smrj 
4275509Smrj /*
4276509Smrj  * rootnex_maxxfer_window_boundary()
4277509Smrj  *    Called in bind slowpath when we get to a window boundary because we will
4278509Smrj  *    go over maxxfer.
4279509Smrj  */
4280509Smrj static int
4281509Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4282509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
4283509Smrj {
4284509Smrj 	size_t dmac_size;
4285509Smrj 	off_t new_offset;
4286509Smrj 	size_t trim_sz;
4287509Smrj 	off_t coffset;
4288509Smrj 
4289509Smrj 
4290509Smrj 	/*
4291509Smrj 	 * calculate how much we have to trim off of the current cookie to equal
4292509Smrj 	 * maxxfer. We don't have to account for granularity here since our
4293509Smrj 	 * maxxfer already takes that into account.
4294509Smrj 	 */
4295509Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
4296509Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
4297509Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
4298509Smrj 
4299509Smrj 	/* save cookie size since we need it later and we might change it */
4300509Smrj 	dmac_size = cookie->dmac_size;
4301509Smrj 
4302509Smrj 	/*
4303509Smrj 	 * if we're not trimming the entire cookie, setup the current window to
4304509Smrj 	 * account for the trim.
4305509Smrj 	 */
4306509Smrj 	if (trim_sz < cookie->dmac_size) {
4307509Smrj 		(*windowp)->wd_cookie_cnt++;
4308509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4309509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
43105084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4311509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4312509Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
4313509Smrj 
4314509Smrj 		/*
4315509Smrj 		 * set the adjusted cookie size now in case this is the first
4316509Smrj 		 * window. All other windows are taken care of in get win
4317509Smrj 		 */
4318509Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4319509Smrj 	}
4320509Smrj 
4321509Smrj 	/*
4322509Smrj 	 * coffset is the current offset within the cookie, new_offset is the
4323509Smrj 	 * current offset with the entire buffer.
4324509Smrj 	 */
4325509Smrj 	coffset = dmac_size - trim_sz;
4326509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4327509Smrj 
4328509Smrj 	/* initialize the next window */
4329509Smrj 	(*windowp)++;
4330509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4331509Smrj 	(*windowp)->wd_cookie_cnt++;
4332509Smrj 	(*windowp)->wd_size = trim_sz;
4333509Smrj 	if (trim_sz < dmac_size) {
4334509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
43355084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4336509Smrj 		    coffset;
4337509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
4338509Smrj 	}
4339509Smrj 
4340509Smrj 	return (DDI_SUCCESS);
4341509Smrj }
4342509Smrj 
4343509Smrj 
4344509Smrj /*ARGSUSED*/
4345509Smrj static int
43467613SVikram.Hegde@Sun.COM rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4347509Smrj     off_t off, size_t len, uint_t cache_flags)
4348509Smrj {
4349509Smrj 	rootnex_sglinfo_t *sinfo;
4350509Smrj 	rootnex_pgmap_t *cbpage;
4351509Smrj 	rootnex_window_t *win;
4352509Smrj 	ddi_dma_impl_t *hp;
4353509Smrj 	rootnex_dma_t *dma;
4354509Smrj 	caddr_t fromaddr;
4355509Smrj 	caddr_t toaddr;
4356509Smrj 	uint_t psize;
4357509Smrj 	off_t offset;
4358509Smrj 	uint_t pidx;
4359509Smrj 	size_t size;
4360509Smrj 	off_t poff;
4361509Smrj 	int e;
4362509Smrj 
4363509Smrj 
4364509Smrj 	hp = (ddi_dma_impl_t *)handle;
4365509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4366509Smrj 	sinfo = &dma->dp_sglinfo;
4367509Smrj 
4368509Smrj 	/*
4369509Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
4370509Smrj 	 * will cause us to have at least one window.
4371509Smrj 	 */
4372509Smrj 	if (dma->dp_window == NULL) {
4373509Smrj 		return (DDI_SUCCESS);
4374509Smrj 	}
4375509Smrj 
4376509Smrj 	/* This window may not need to be sync'd */
4377509Smrj 	win = &dma->dp_window[dma->dp_current_win];
4378509Smrj 	if (!win->wd_dosync) {
4379509Smrj 		return (DDI_SUCCESS);
4380509Smrj 	}
4381509Smrj 
4382509Smrj 	/* handle off and len special cases */
4383509Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
4384509Smrj 		offset = win->wd_offset;
4385509Smrj 	} else {
4386509Smrj 		offset = off;
4387509Smrj 	}
4388509Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
4389509Smrj 		size = win->wd_size;
4390509Smrj 	} else {
4391509Smrj 		size = len;
4392509Smrj 	}
4393509Smrj 
4394509Smrj 	/* check the sync args to make sure they make a little sense */
4395509Smrj 	if (rootnex_sync_check_parms) {
4396509Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
4397509Smrj 		    cache_flags);
4398509Smrj 		if (e != DDI_SUCCESS) {
4399*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4400509Smrj 			return (DDI_FAILURE);
4401509Smrj 		}
4402509Smrj 	}
4403509Smrj 
4404509Smrj 	/*
4405509Smrj 	 * special case the first page to handle the offset into the page. The
4406509Smrj 	 * offset to the current page for our buffer is the offset into the
4407509Smrj 	 * first page of the buffer plus our current offset into the buffer
4408509Smrj 	 * itself, masked of course.
4409509Smrj 	 */
4410509Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4411509Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
4412509Smrj 
4413509Smrj 	/* go through all the pages that we want to sync */
4414509Smrj 	while (size > 0) {
4415509Smrj 		/*
4416509Smrj 		 * Calculate the page index relative to the start of the buffer.
4417509Smrj 		 * The index to the current page for our buffer is the offset
4418509Smrj 		 * into the first page of the buffer plus our current offset
4419509Smrj 		 * into the buffer itself, shifted of course...
4420509Smrj 		 */
4421509Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4422509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4423509Smrj 
4424509Smrj 		/*
4425509Smrj 		 * if this page uses the copy buffer, we need to sync it,
4426509Smrj 		 * otherwise, go on to the next page.
4427509Smrj 		 */
4428509Smrj 		cbpage = &dma->dp_pgmap[pidx];
4429509Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4430509Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
4431509Smrj 		if (cbpage->pm_uses_copybuf) {
4432509Smrj 			/* cbaddr and kaddr should be page aligned */
4433509Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4434509Smrj 			    MMU_PAGEOFFSET) == 0);
4435509Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
4436509Smrj 			    MMU_PAGEOFFSET) == 0);
4437509Smrj 
4438509Smrj 			/*
4439509Smrj 			 * if we're copying for the device, we are going to
4440509Smrj 			 * copy from the drivers buffer and to the rootnex
4441509Smrj 			 * allocated copy buffer.
4442509Smrj 			 */
4443509Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4444509Smrj 				fromaddr = cbpage->pm_kaddr + poff;
4445509Smrj 				toaddr = cbpage->pm_cbaddr + poff;
4446*12837Sfrank.van.der.linden@oracle.com 				ROOTNEX_DPROBE2(rootnex__sync__dev,
4447509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4448509Smrj 
4449509Smrj 			/*
4450509Smrj 			 * if we're copying for the cpu/kernel, we are going to
4451509Smrj 			 * copy from the rootnex allocated copy buffer to the
4452509Smrj 			 * drivers buffer.
4453509Smrj 			 */
4454509Smrj 			} else {
4455509Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
4456509Smrj 				toaddr = cbpage->pm_kaddr + poff;
4457*12837Sfrank.van.der.linden@oracle.com 				ROOTNEX_DPROBE2(rootnex__sync__cpu,
4458509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4459509Smrj 			}
4460509Smrj 
4461509Smrj 			bcopy(fromaddr, toaddr, psize);
4462509Smrj 		}
4463509Smrj 
4464509Smrj 		/*
4465509Smrj 		 * decrement size until we're done, update our offset into the
4466509Smrj 		 * buffer, and get the next page size.
4467509Smrj 		 */
4468509Smrj 		size -= psize;
4469509Smrj 		offset += psize;
4470509Smrj 		psize = MIN(MMU_PAGESIZE, size);
4471509Smrj 
4472509Smrj 		/* page offset is zero for the rest of this loop */
4473509Smrj 		poff = 0;
4474509Smrj 	}
4475509Smrj 
4476509Smrj 	return (DDI_SUCCESS);
4477509Smrj }
4478509Smrj 
44797613SVikram.Hegde@Sun.COM /*
44807613SVikram.Hegde@Sun.COM  * rootnex_dma_sync()
44817613SVikram.Hegde@Sun.COM  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
44827613SVikram.Hegde@Sun.COM  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
44837613SVikram.Hegde@Sun.COM  *    is set, ddi_dma_sync() returns immediately passing back success.
44847613SVikram.Hegde@Sun.COM  */
44857613SVikram.Hegde@Sun.COM /*ARGSUSED*/
44867613SVikram.Hegde@Sun.COM static int
44877613SVikram.Hegde@Sun.COM rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
44887613SVikram.Hegde@Sun.COM     off_t off, size_t len, uint_t cache_flags)
44897613SVikram.Hegde@Sun.COM {
449011600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
449110216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
44927613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
44937613SVikram.Hegde@Sun.COM 		    cache_flags));
44947613SVikram.Hegde@Sun.COM 	}
44957613SVikram.Hegde@Sun.COM #endif
44967613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
44977613SVikram.Hegde@Sun.COM 	    cache_flags));
44987613SVikram.Hegde@Sun.COM }
4499509Smrj 
4500509Smrj /*
4501509Smrj  * rootnex_valid_sync_parms()
4502509Smrj  *    checks the parameters passed to sync to verify they are correct.
4503509Smrj  */
4504509Smrj static int
4505509Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4506509Smrj     off_t offset, size_t size, uint_t cache_flags)
4507509Smrj {
4508509Smrj 	off_t woffset;
4509509Smrj 
4510509Smrj 
4511509Smrj 	/*
4512509Smrj 	 * the first part of the test to make sure the offset passed in is
4513509Smrj 	 * within the window.
4514509Smrj 	 */
4515509Smrj 	if (offset < win->wd_offset) {
4516509Smrj 		return (DDI_FAILURE);
4517509Smrj 	}
4518509Smrj 
4519509Smrj 	/*
4520509Smrj 	 * second and last part of the test to make sure the offset and length
4521509Smrj 	 * passed in is within the window.
4522509Smrj 	 */
4523509Smrj 	woffset = offset - win->wd_offset;
4524509Smrj 	if ((woffset + size) > win->wd_size) {
4525509Smrj 		return (DDI_FAILURE);
4526509Smrj 	}
4527509Smrj 
4528509Smrj 	/*
4529509Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4530509Smrj 	 * be set too.
4531509Smrj 	 */
4532509Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4533509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4534509Smrj 		return (DDI_SUCCESS);
4535509Smrj 	}
4536509Smrj 
4537509Smrj 	/*
4538509Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4539509Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4540509Smrj 	 */
4541509Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4542509Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4543509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4544509Smrj 		return (DDI_SUCCESS);
4545509Smrj 	}
4546509Smrj 
4547509Smrj 	return (DDI_FAILURE);
4548509Smrj }
4549509Smrj 
4550509Smrj 
4551509Smrj /*ARGSUSED*/
4552509Smrj static int
45537613SVikram.Hegde@Sun.COM rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4554509Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4555509Smrj     uint_t *ccountp)
4556509Smrj {
4557509Smrj 	rootnex_window_t *window;
4558509Smrj 	rootnex_trim_t *trim;
4559509Smrj 	ddi_dma_impl_t *hp;
4560509Smrj 	rootnex_dma_t *dma;
4561509Smrj #if !defined(__amd64)
4562509Smrj 	rootnex_sglinfo_t *sinfo;
4563509Smrj 	rootnex_pgmap_t *pmap;
4564509Smrj 	uint_t pidx;
4565509Smrj 	uint_t pcnt;
4566509Smrj 	off_t poff;
4567509Smrj 	int i;
4568509Smrj #endif
4569509Smrj 
4570509Smrj 
4571509Smrj 	hp = (ddi_dma_impl_t *)handle;
4572509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4573509Smrj #if !defined(__amd64)
4574509Smrj 	sinfo = &dma->dp_sglinfo;
4575509Smrj #endif
4576509Smrj 
4577509Smrj 	/* If we try and get a window which doesn't exist, return failure */
4578509Smrj 	if (win >= hp->dmai_nwin) {
4579*12837Sfrank.van.der.linden@oracle.com 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4580509Smrj 		return (DDI_FAILURE);
4581509Smrj 	}
4582509Smrj 
4583509Smrj 	/*
4584509Smrj 	 * if we don't have any windows, and they're asking for the first
4585509Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
4586509Smrj 	 * setup our return values, then increment the cookie since we return
4587509Smrj 	 * the first cookie on the stack.
4588509Smrj 	 */
4589509Smrj 	if (dma->dp_window == NULL) {
4590509Smrj 		if (win != 0) {
4591*12837Sfrank.van.der.linden@oracle.com 			ROOTNEX_DPROF_INC(
4592*12837Sfrank.van.der.linden@oracle.com 			    &rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4593509Smrj 			return (DDI_FAILURE);
4594509Smrj 		}
4595509Smrj 		hp->dmai_cookie = dma->dp_cookies;
4596509Smrj 		*offp = 0;
4597509Smrj 		*lenp = dma->dp_dma.dmao_size;
4598509Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4599509Smrj 		*cookiep = hp->dmai_cookie[0];
4600509Smrj 		hp->dmai_cookie++;
4601509Smrj 		return (DDI_SUCCESS);
4602509Smrj 	}
4603509Smrj 
4604509Smrj 	/* sync the old window before moving on to the new one */
4605509Smrj 	window = &dma->dp_window[dma->dp_current_win];
4606509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
46078215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4608509Smrj 		    DDI_DMA_SYNC_FORCPU);
4609509Smrj 	}
4610509Smrj 
4611509Smrj #if !defined(__amd64)
4612509Smrj 	/*
4613509Smrj 	 * before we move to the next window, if we need to re-map, unmap all
4614509Smrj 	 * the pages in this window.
4615509Smrj 	 */
4616509Smrj 	if (dma->dp_cb_remaping) {
4617509Smrj 		/*
4618509Smrj 		 * If we switch to this window again, we'll need to map in
4619509Smrj 		 * on the fly next time.
4620509Smrj 		 */
4621509Smrj 		window->wd_remap_copybuf = B_TRUE;
4622509Smrj 
4623509Smrj 		/*
4624509Smrj 		 * calculate the page index into the buffer where this window
4625509Smrj 		 * starts, and the number of pages this window takes up.
4626509Smrj 		 */
4627509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4628509Smrj 		    MMU_PAGESHIFT;
4629509Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4630509Smrj 		    MMU_PAGEOFFSET;
4631509Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
4632509Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4633509Smrj 
4634509Smrj 		/* unmap pages which are currently mapped in this window */
4635509Smrj 		for (i = 0; i < pcnt; i++) {
4636509Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
4637509Smrj 				hat_unload(kas.a_hat,
4638509Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4639509Smrj 				    HAT_UNLOAD);
4640509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4641509Smrj 			}
4642509Smrj 			pidx++;
4643509Smrj 		}
4644509Smrj 	}
4645509Smrj #endif
4646509Smrj 
4647509Smrj 	/*
4648509Smrj 	 * Move to the new window.
4649509Smrj 	 * NOTE: current_win must be set for sync to work right
4650509Smrj 	 */
4651509Smrj 	dma->dp_current_win = win;
4652509Smrj 	window = &dma->dp_window[win];
4653509Smrj 
4654509Smrj 	/* if needed, adjust the first and/or last cookies for trim */
4655509Smrj 	trim = &window->wd_trim;
4656509Smrj 	if (trim->tr_trim_first) {
46575084Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4658509Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4659509Smrj #if !defined(__amd64)
4660509Smrj 		window->wd_first_cookie->dmac_type =
4661509Smrj 		    (window->wd_first_cookie->dmac_type &
4662509Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4663509Smrj #endif
4664509Smrj 		if (trim->tr_first_copybuf_win) {
4665509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4666509Smrj 			    trim->tr_first_cbaddr;
4667509Smrj #if !defined(__amd64)
4668509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4669509Smrj 			    trim->tr_first_kaddr;
4670509Smrj #endif
4671509Smrj 		}
4672509Smrj 	}
4673509Smrj 	if (trim->tr_trim_last) {
46745084Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4675509Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4676509Smrj 		if (trim->tr_last_copybuf_win) {
4677509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4678509Smrj 			    trim->tr_last_cbaddr;
4679509Smrj #if !defined(__amd64)
4680509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4681509Smrj 			    trim->tr_last_kaddr;
4682509Smrj #endif
4683509Smrj 		}
4684509Smrj 	}
4685509Smrj 
4686509Smrj 	/*
4687509Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
4688509Smrj 	 * our return values, then increment the cookie since we return the
4689509Smrj 	 * first cookie on the stack.
4690509Smrj 	 */
4691509Smrj 	hp->dmai_cookie = window->wd_first_cookie;
4692509Smrj 	*offp = window->wd_offset;
4693509Smrj 	*lenp = window->wd_size;
4694509Smrj 	*ccountp = window->wd_cookie_cnt;
4695509Smrj 	*cookiep = hp->dmai_cookie[0];
4696509Smrj 	hp->dmai_cookie++;
4697509Smrj 
4698509Smrj #if !defined(__amd64)
4699509Smrj 	/* re-map copybuf if required for this window */
4700509Smrj 	if (dma->dp_cb_remaping) {
4701509Smrj 		/*
4702509Smrj 		 * calculate the page index into the buffer where this
4703509Smrj 		 * window starts.
4704509Smrj 		 */
4705509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4706509Smrj 		    MMU_PAGESHIFT;
4707509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4708509Smrj 
4709509Smrj 		/*
4710509Smrj 		 * the first page can get unmapped if it's shared with the
4711509Smrj 		 * previous window. Even if the rest of this window is already
4712509Smrj 		 * mapped in, we need to still check this one.
4713509Smrj 		 */
4714509Smrj 		pmap = &dma->dp_pgmap[pidx];
4715509Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4716509Smrj 			if (pmap->pm_pp != NULL) {
4717509Smrj 				pmap->pm_mapped = B_TRUE;
4718509Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4719509Smrj 			} else if (pmap->pm_vaddr != NULL) {
4720509Smrj 				pmap->pm_mapped = B_TRUE;
4721509Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4722509Smrj 				    pmap->pm_kaddr);
4723509Smrj 			}
4724509Smrj 		}
4725509Smrj 		pidx++;
4726509Smrj 
4727509Smrj 		/* map in the rest of the pages if required */
4728509Smrj 		if (window->wd_remap_copybuf) {
4729509Smrj 			window->wd_remap_copybuf = B_FALSE;
4730509Smrj 
4731509Smrj 			/* figure out many pages this window takes up */
4732509Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4733509Smrj 			    MMU_PAGEOFFSET;
4734509Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
4735509Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4736509Smrj 
4737509Smrj 			/* map pages which require it */
4738509Smrj 			for (i = 1; i < pcnt; i++) {
4739509Smrj 				pmap = &dma->dp_pgmap[pidx];
4740509Smrj 				if (pmap->pm_uses_copybuf) {
4741509Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
4742509Smrj 					if (pmap->pm_pp != NULL) {
4743509Smrj 						pmap->pm_mapped = B_TRUE;
4744509Smrj 						i86_pp_map(pmap->pm_pp,
4745509Smrj 						    pmap->pm_kaddr);
4746509Smrj 					} else if (pmap->pm_vaddr != NULL) {
4747509Smrj 						pmap->pm_mapped = B_TRUE;
4748509Smrj 						i86_va_map(pmap->pm_vaddr,
4749509Smrj 						    sinfo->si_asp,
4750509Smrj 						    pmap->pm_kaddr);
4751509Smrj 					}
4752509Smrj 				}
4753509Smrj 				pidx++;
4754509Smrj 			}
4755509Smrj 		}
4756509Smrj 	}
4757509Smrj #endif
4758509Smrj 
4759509Smrj 	/* if the new window uses the copy buffer, sync it for the device */
4760509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
47618215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4762509Smrj 		    DDI_DMA_SYNC_FORDEV);
4763509Smrj 	}
4764509Smrj 
4765509Smrj 	return (DDI_SUCCESS);
4766509Smrj }
4767509Smrj 
47687613SVikram.Hegde@Sun.COM /*
47697613SVikram.Hegde@Sun.COM  * rootnex_dma_win()
47707613SVikram.Hegde@Sun.COM  *    called from ddi_dma_getwin()
47717613SVikram.Hegde@Sun.COM  */
47727613SVikram.Hegde@Sun.COM /*ARGSUSED*/
47737613SVikram.Hegde@Sun.COM static int
47747613SVikram.Hegde@Sun.COM rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
47757613SVikram.Hegde@Sun.COM     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
47767613SVikram.Hegde@Sun.COM     uint_t *ccountp)
47777613SVikram.Hegde@Sun.COM {
477811600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
477910216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
47807613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
47817613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
47827613SVikram.Hegde@Sun.COM 	}
47837613SVikram.Hegde@Sun.COM #endif
47847613SVikram.Hegde@Sun.COM 
47857613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
47867613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
47877613SVikram.Hegde@Sun.COM }
4788509Smrj 
4789509Smrj /*
4790509Smrj  * ************************
4791509Smrj  *  obsoleted dma routines
4792509Smrj  * ************************
4793509Smrj  */
4794509Smrj 
479510216SVikram.Hegde@Sun.COM /*
479610216SVikram.Hegde@Sun.COM  * rootnex_dma_map()
479710216SVikram.Hegde@Sun.COM  *    called from ddi_dma_setup()
479810216SVikram.Hegde@Sun.COM  * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode.
479910216SVikram.Hegde@Sun.COM  */
4800509Smrj /* ARGSUSED */
4801509Smrj static int
480210216SVikram.Hegde@Sun.COM rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
48037613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4804509Smrj {
4805509Smrj #if defined(__amd64)
4806509Smrj 	/*
4807509Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4808509Smrj 	 * rootnex_dma_mctl()
4809509Smrj 	 */
4810509Smrj 	return (DDI_DMA_NORESOURCES);
4811509Smrj 
4812509Smrj #else /* 32-bit x86 kernel */
4813509Smrj 	ddi_dma_handle_t *lhandlep;
4814509Smrj 	ddi_dma_handle_t lhandle;
4815509Smrj 	ddi_dma_cookie_t cookie;
4816509Smrj 	ddi_dma_attr_t dma_attr;
4817509Smrj 	ddi_dma_lim_t *dma_lim;
4818509Smrj 	uint_t ccnt;
4819509Smrj 	int e;
4820509Smrj 
4821509Smrj 
4822509Smrj 	/*
4823509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4824509Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4825509Smrj 	 */
4826509Smrj 	if (handlep == NULL) {
4827509Smrj 		lhandlep = &lhandle;
4828509Smrj 	} else {
4829509Smrj 		lhandlep = handlep;
4830509Smrj 	}
4831509Smrj 
4832509Smrj 	/* convert the limit structure to a dma_attr one */
4833509Smrj 	dma_lim = dmareq->dmar_limits;
4834509Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4835509Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4836509Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4837509Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4838509Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4839509Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4840509Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4841509Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4842509Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4843509Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4844509Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4845509Smrj 	dma_attr.dma_attr_flags = 0;
4846509Smrj 
4847509Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4848509Smrj 	    dmareq->dmar_arg, lhandlep);
4849509Smrj 	if (e != DDI_SUCCESS) {
4850509Smrj 		return (e);
4851509Smrj 	}
4852509Smrj 
4853509Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4854509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4855509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4856509Smrj 		return (e);
4857509Smrj 	}
4858509Smrj 
4859509Smrj 	/*
4860509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4861509Smrj 	 * free up the local state and return the result.
4862509Smrj 	 */
4863509Smrj 	if (handlep == NULL) {
4864509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4865509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4866509Smrj 		if (e == DDI_DMA_MAPPED) {
4867509Smrj 			return (DDI_DMA_MAPOK);
48680Sstevel@tonic-gate 		} else {
4869509Smrj 			return (DDI_DMA_NOMAPPING);
4870509Smrj 		}
4871509Smrj 	}
4872509Smrj 
4873509Smrj 	return (e);
4874509Smrj #endif /* defined(__amd64) */
4875509Smrj }
4876509Smrj 
48777613SVikram.Hegde@Sun.COM /*
487810216SVikram.Hegde@Sun.COM  * rootnex_dma_mctl()
487910216SVikram.Hegde@Sun.COM  *
488010216SVikram.Hegde@Sun.COM  * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode.
48817613SVikram.Hegde@Sun.COM  */
48827613SVikram.Hegde@Sun.COM /* ARGSUSED */
48837613SVikram.Hegde@Sun.COM static int
488410216SVikram.Hegde@Sun.COM rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4885509Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4886509Smrj     uint_t cache_flags)
4887509Smrj {
4888509Smrj #if defined(__amd64)
4889509Smrj 	/*
4890509Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4891509Smrj 	 * common implementation in genunix, so they no longer have x86
4892509Smrj 	 * specific functionality which called into dma_ctl.
4893509Smrj 	 *
4894509Smrj 	 * The rest of the obsoleted interfaces were never supported in the
4895509Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4896509Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4897509Smrj 	 * implementation issues.
4898509Smrj 	 *
4899509Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4900509Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4901509Smrj 	 * reflect that now too...
4902509Smrj 	 *
4903509Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4904509Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
4905509Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4906509Smrj 	 * that in a future release.
4907509Smrj 	 */
4908509Smrj 	return (DDI_FAILURE);
4909509Smrj 
4910509Smrj #else /* 32-bit x86 kernel */
4911509Smrj 	ddi_dma_cookie_t lcookie;
4912509Smrj 	ddi_dma_cookie_t *cookie;
4913509Smrj 	rootnex_window_t *window;
4914509Smrj 	ddi_dma_impl_t *hp;
4915509Smrj 	rootnex_dma_t *dma;
4916509Smrj 	uint_t nwin;
4917509Smrj 	uint_t ccnt;
4918509Smrj 	size_t len;
4919509Smrj 	off_t off;
4920509Smrj 	int e;
4921509Smrj 
4922509Smrj 
4923509Smrj 	/*
4924509Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4925509Smrj 	 * hacky since were optimizing for the current interfaces and so we can
4926509Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
4927509Smrj 	 * obsoleted routines someday soon.
4928509Smrj 	 */
4929509Smrj 
4930509Smrj 	switch (request) {
4931509Smrj 
4932509Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4933509Smrj 		hp = (ddi_dma_impl_t *)handle;
4934509Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
4935509Smrj 
4936509Smrj 		/*
4937509Smrj 		 * convert segment to cookie. We don't distinguish between the
4938509Smrj 		 * two :-)
4939509Smrj 		 */
4940509Smrj 		*cookie = *hp->dmai_cookie;
4941509Smrj 		*lenp = cookie->dmac_size;
4942509Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4943509Smrj 		return (DDI_SUCCESS);
4944509Smrj 
4945509Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4946509Smrj 		hp = (ddi_dma_impl_t *)handle;
4947509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4948509Smrj 
4949509Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4950509Smrj 			return (DDI_DMA_STALE);
49510Sstevel@tonic-gate 		}
4952509Smrj 
4953509Smrj 		/* handle the case where we don't have any windows */
4954509Smrj 		if (dma->dp_window == NULL) {
4955509Smrj 			/*
4956509Smrj 			 * if seg == NULL, and we don't have any windows,
4957509Smrj 			 * return the first cookie in the sgl.
4958509Smrj 			 */
4959509Smrj 			if (*lenp == NULL) {
4960509Smrj 				dma->dp_current_cookie = 0;
4961509Smrj 				hp->dmai_cookie = dma->dp_cookies;
4962509Smrj 				*objpp = (caddr_t)handle;
4963509Smrj 				return (DDI_SUCCESS);
4964509Smrj 
4965509Smrj 			/* if we have more cookies, go to the next cookie */
4966509Smrj 			} else {
4967509Smrj 				if ((dma->dp_current_cookie + 1) >=
4968509Smrj 				    dma->dp_sglinfo.si_sgl_size) {
4969509Smrj 					return (DDI_DMA_DONE);
4970509Smrj 				}
4971509Smrj 				dma->dp_current_cookie++;
4972509Smrj 				hp->dmai_cookie++;
4973509Smrj 				return (DDI_SUCCESS);
4974509Smrj 			}
4975509Smrj 		}
4976509Smrj 
4977509Smrj 		/* We have one or more windows */
4978509Smrj 		window = &dma->dp_window[dma->dp_current_win];
4979509Smrj 
4980509Smrj 		/*
4981509Smrj 		 * if seg == NULL, return the first cookie in the current
4982509Smrj 		 * window
4983509Smrj 		 */
4984509Smrj 		if (*lenp == NULL) {
4985509Smrj 			dma->dp_current_cookie = 0;
4986683Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4987509Smrj 
4988509Smrj 		/*
4989509Smrj 		 * go to the next cookie in the window then see if we done with
4990509Smrj 		 * this window.
4991509Smrj 		 */
4992509Smrj 		} else {
4993509Smrj 			if ((dma->dp_current_cookie + 1) >=
4994509Smrj 			    window->wd_cookie_cnt) {
4995509Smrj 				return (DDI_DMA_DONE);
4996509Smrj 			}
4997509Smrj 			dma->dp_current_cookie++;
4998509Smrj 			hp->dmai_cookie++;
4999509Smrj 		}
5000509Smrj 		*objpp = (caddr_t)handle;
5001509Smrj 		return (DDI_SUCCESS);
5002509Smrj 
5003509Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
5004509Smrj 		hp = (ddi_dma_impl_t *)handle;
5005509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
5006509Smrj 
5007509Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
5008509Smrj 			return (DDI_DMA_STALE);
5009509Smrj 		}
5010509Smrj 
5011509Smrj 		/* if win == NULL, return the first window in the bind */
5012509Smrj 		if (*offp == NULL) {
5013509Smrj 			nwin = 0;
5014509Smrj 
5015509Smrj 		/*
5016509Smrj 		 * else, go to the next window then see if we're done with all
5017509Smrj 		 * the windows.
5018509Smrj 		 */
5019509Smrj 		} else {
5020509Smrj 			nwin = dma->dp_current_win + 1;
5021509Smrj 			if (nwin >= hp->dmai_nwin) {
5022509Smrj 				return (DDI_DMA_DONE);
5023509Smrj 			}
5024509Smrj 		}
5025509Smrj 
5026509Smrj 		/* switch to the next window */
5027509Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
5028509Smrj 		    &lcookie, &ccnt);
5029509Smrj 		ASSERT(e == DDI_SUCCESS);
5030509Smrj 		if (e != DDI_SUCCESS) {
5031509Smrj 			return (DDI_DMA_STALE);
5032509Smrj 		}
5033509Smrj 
5034509Smrj 		/* reset the cookie back to the first cookie in the window */
5035509Smrj 		if (dma->dp_window != NULL) {
5036509Smrj 			window = &dma->dp_window[dma->dp_current_win];
5037509Smrj 			hp->dmai_cookie = window->wd_first_cookie;
5038509Smrj 		} else {
5039509Smrj 			hp->dmai_cookie = dma->dp_cookies;
5040509Smrj 		}
5041509Smrj 
5042509Smrj 		*objpp = (caddr_t)handle;
5043509Smrj 		return (DDI_SUCCESS);
5044509Smrj 
5045509Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
5046509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
5047509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
5048509Smrj 		if (rootnex_state->r_dvma_call_list_id) {
5049509Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
5050509Smrj 		}
5051509Smrj 		return (DDI_SUCCESS);
5052509Smrj 
5053509Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
5054509Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
5055509Smrj 		/* should never get here, handled in genunix */
5056509Smrj 		ASSERT(0);
5057509Smrj 		return (DDI_FAILURE);
5058509Smrj 
5059509Smrj 	case DDI_DMA_KVADDR:
5060509Smrj 	case DDI_DMA_GETERR:
5061509Smrj 	case DDI_DMA_COFF:
5062509Smrj 		return (DDI_FAILURE);
50630Sstevel@tonic-gate 	}
5064509Smrj 
5065509Smrj 	return (DDI_FAILURE);
5066509Smrj #endif /* defined(__amd64) */
50670Sstevel@tonic-gate }
50681414Scindi 
50697613SVikram.Hegde@Sun.COM /*
50701865Sdilpreet  * *********
50711865Sdilpreet  *  FMA Code
50721865Sdilpreet  * *********
50731865Sdilpreet  */
50741865Sdilpreet 
50751865Sdilpreet /*
50761865Sdilpreet  * rootnex_fm_init()
50771865Sdilpreet  *    FMA init busop
50781865Sdilpreet  */
50791865Sdilpreet /* ARGSUSED */
50801865Sdilpreet static int
50811865Sdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
50821865Sdilpreet     ddi_iblock_cookie_t *ibc)
50831865Sdilpreet {
50841865Sdilpreet 	*ibc = rootnex_state->r_err_ibc;
50851865Sdilpreet 
50861865Sdilpreet 	return (ddi_system_fmcap);
50871865Sdilpreet }
50881865Sdilpreet 
50891865Sdilpreet /*
50901865Sdilpreet  * rootnex_dma_check()
50911865Sdilpreet  *    Function called after a dma fault occurred to find out whether the
50921865Sdilpreet  *    fault address is associated with a driver that is able to handle faults
50931865Sdilpreet  *    and recover from faults.
50941865Sdilpreet  */
50951865Sdilpreet /* ARGSUSED */
50961414Scindi static int
50971865Sdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
50981865Sdilpreet     const void *not_used)
50991414Scindi {
51001865Sdilpreet 	rootnex_window_t *window;
51011865Sdilpreet 	uint64_t start_addr;
51021865Sdilpreet 	uint64_t fault_addr;
51031865Sdilpreet 	ddi_dma_impl_t *hp;
51041865Sdilpreet 	rootnex_dma_t *dma;
51051865Sdilpreet 	uint64_t end_addr;
51061865Sdilpreet 	size_t csize;
51071865Sdilpreet 	int i;
51081865Sdilpreet 	int j;
51091865Sdilpreet 
51101865Sdilpreet 
51111865Sdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
51121865Sdilpreet 	hp = (ddi_dma_impl_t *)handle;
51131865Sdilpreet 	ASSERT(hp);
51141865Sdilpreet 
51151865Sdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
51161865Sdilpreet 
51171865Sdilpreet 	/* Get the address that we need to search for */
51181865Sdilpreet 	fault_addr = *(uint64_t *)addr;
51191865Sdilpreet 
51201865Sdilpreet 	/*
51211865Sdilpreet 	 * if we don't have any windows, we can just walk through all the
51221865Sdilpreet 	 * cookies.
51231865Sdilpreet 	 */
51241865Sdilpreet 	if (dma->dp_window == NULL) {
51251865Sdilpreet 		/* for each cookie */
51261865Sdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
51271865Sdilpreet 			/*
51281865Sdilpreet 			 * if the faulted address is within the physical address
51291865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
51301865Sdilpreet 			 */
51311865Sdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
51321865Sdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
51331865Sdilpreet 			    dma->dp_cookies[i].dmac_size))) {
51341865Sdilpreet 				return (DDI_FM_NONFATAL);
51351865Sdilpreet 			}
51361865Sdilpreet 		}
51371865Sdilpreet 
51381865Sdilpreet 		/* fault_addr not within this DMA handle */
51391865Sdilpreet 		return (DDI_FM_UNKNOWN);
51401865Sdilpreet 	}
51411865Sdilpreet 
51421865Sdilpreet 	/* we have mutiple windows, walk through each window */
51431865Sdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
51441865Sdilpreet 		window = &dma->dp_window[i];
51451865Sdilpreet 
51461865Sdilpreet 		/* Go through all the cookies in the window */
51471865Sdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
51481865Sdilpreet 
51491865Sdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
51501865Sdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
51511865Sdilpreet 
51521865Sdilpreet 			/*
51531865Sdilpreet 			 * if we are trimming the first cookie in the window,
51541865Sdilpreet 			 * and this is the first cookie, adjust the start
51551865Sdilpreet 			 * address and size of the cookie to account for the
51561865Sdilpreet 			 * trim.
51571865Sdilpreet 			 */
51581865Sdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
51591865Sdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
51601865Sdilpreet 				csize = window->wd_trim.tr_first_size;
51611865Sdilpreet 			}
51621865Sdilpreet 
51631865Sdilpreet 			/*
51641865Sdilpreet 			 * if we are trimming the last cookie in the window,
51651865Sdilpreet 			 * and this is the last cookie, adjust the start
51661865Sdilpreet 			 * address and size of the cookie to account for the
51671865Sdilpreet 			 * trim.
51681865Sdilpreet 			 */
51691865Sdilpreet 			if (window->wd_trim.tr_trim_last &&
51701865Sdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
51711865Sdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
51721865Sdilpreet 				csize = window->wd_trim.tr_last_size;
51731865Sdilpreet 			}
51741865Sdilpreet 
51751865Sdilpreet 			end_addr = start_addr + csize;
51761865Sdilpreet 
51771865Sdilpreet 			/*
517811600SVikram.Hegde@Sun.COM 			 * if the faulted address is within the physical
517911600SVikram.Hegde@Sun.COM 			 * address of the cookie, return DDI_FM_NONFATAL.
51801865Sdilpreet 			 */
51811865Sdilpreet 			if ((fault_addr >= start_addr) &&
51821865Sdilpreet 			    (fault_addr <= end_addr)) {
51831865Sdilpreet 				return (DDI_FM_NONFATAL);
51841865Sdilpreet 			}
51851865Sdilpreet 		}
51861865Sdilpreet 	}
51871865Sdilpreet 
51881865Sdilpreet 	/* fault_addr not within this DMA handle */
51891865Sdilpreet 	return (DDI_FM_UNKNOWN);
51901414Scindi }
519111600SVikram.Hegde@Sun.COM 
519211600SVikram.Hegde@Sun.COM /*ARGSUSED*/
519311600SVikram.Hegde@Sun.COM static int
519411600SVikram.Hegde@Sun.COM rootnex_quiesce(dev_info_t *dip)
519511600SVikram.Hegde@Sun.COM {
519611600SVikram.Hegde@Sun.COM #if defined(__amd64) && !defined(__xpv)
519711600SVikram.Hegde@Sun.COM 	return (immu_quiesce());
519811600SVikram.Hegde@Sun.COM #else
519911600SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
520011600SVikram.Hegde@Sun.COM #endif
520111600SVikram.Hegde@Sun.COM }
520211600SVikram.Hegde@Sun.COM 
520311600SVikram.Hegde@Sun.COM #if defined(__xpv)
520411600SVikram.Hegde@Sun.COM void
520511600SVikram.Hegde@Sun.COM immu_init(void)
520611600SVikram.Hegde@Sun.COM {
520711600SVikram.Hegde@Sun.COM 	;
520811600SVikram.Hegde@Sun.COM }
520911600SVikram.Hegde@Sun.COM 
521011600SVikram.Hegde@Sun.COM void
521111600SVikram.Hegde@Sun.COM immu_startup(void)
521211600SVikram.Hegde@Sun.COM {
521311600SVikram.Hegde@Sun.COM 	;
521411600SVikram.Hegde@Sun.COM }
521511600SVikram.Hegde@Sun.COM /*ARGSUSED*/
521611600SVikram.Hegde@Sun.COM void
521711600SVikram.Hegde@Sun.COM immu_physmem_update(uint64_t addr, uint64_t size)
521811600SVikram.Hegde@Sun.COM {
521911600SVikram.Hegde@Sun.COM 	;
522011600SVikram.Hegde@Sun.COM }
522111600SVikram.Hegde@Sun.COM #endif
5222