xref: /onnv-gate/usr/src/uts/i86pc/io/rootnex.c (revision 11465:ec77021cc782)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51865Sdilpreet  * Common Development and Distribution License (the "License").
61865Sdilpreet  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*11465SKerry.Shu@Sun.COM  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
27509Smrj  * x86 root nexus driver
280Sstevel@tonic-gate  */
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/conf.h>
320Sstevel@tonic-gate #include <sys/autoconf.h>
330Sstevel@tonic-gate #include <sys/sysmacros.h>
340Sstevel@tonic-gate #include <sys/debug.h>
350Sstevel@tonic-gate #include <sys/psw.h>
360Sstevel@tonic-gate #include <sys/ddidmareq.h>
370Sstevel@tonic-gate #include <sys/promif.h>
380Sstevel@tonic-gate #include <sys/devops.h>
390Sstevel@tonic-gate #include <sys/kmem.h>
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate #include <vm/seg.h>
420Sstevel@tonic-gate #include <vm/seg_kmem.h>
430Sstevel@tonic-gate #include <vm/seg_dev.h>
440Sstevel@tonic-gate #include <sys/vmem.h>
450Sstevel@tonic-gate #include <sys/mman.h>
460Sstevel@tonic-gate #include <vm/hat.h>
470Sstevel@tonic-gate #include <vm/as.h>
480Sstevel@tonic-gate #include <vm/page.h>
490Sstevel@tonic-gate #include <sys/avintr.h>
500Sstevel@tonic-gate #include <sys/errno.h>
510Sstevel@tonic-gate #include <sys/modctl.h>
520Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
530Sstevel@tonic-gate #include <sys/sunddi.h>
540Sstevel@tonic-gate #include <sys/sunndi.h>
55916Sschwartz #include <sys/mach_intr.h>
560Sstevel@tonic-gate #include <sys/psm.h>
570Sstevel@tonic-gate #include <sys/ontrap.h>
58509Smrj #include <sys/atomic.h>
59509Smrj #include <sys/sdt.h>
60509Smrj #include <sys/rootnex.h>
61509Smrj #include <vm/hat_i86.h>
621865Sdilpreet #include <sys/ddifm.h>
635251Smrj #include <sys/ddi_isa.h>
64509Smrj 
655084Sjohnlev #ifdef __xpv
665084Sjohnlev #include <sys/bootinfo.h>
675084Sjohnlev #include <sys/hypervisor.h>
685084Sjohnlev #include <sys/bootconf.h>
695084Sjohnlev #include <vm/kboot_mmu.h>
707613SVikram.Hegde@Sun.COM #else
717589SVikram.Hegde@Sun.COM #include <sys/intel_iommu.h>
727613SVikram.Hegde@Sun.COM #endif
737613SVikram.Hegde@Sun.COM 
747589SVikram.Hegde@Sun.COM 
75509Smrj /*
76509Smrj  * enable/disable extra checking of function parameters. Useful for debugging
77509Smrj  * drivers.
78509Smrj  */
79509Smrj #ifdef	DEBUG
80509Smrj int rootnex_alloc_check_parms = 1;
81509Smrj int rootnex_bind_check_parms = 1;
82509Smrj int rootnex_bind_check_inuse = 1;
83509Smrj int rootnex_unbind_verify_buffer = 0;
84509Smrj int rootnex_sync_check_parms = 1;
85509Smrj #else
86509Smrj int rootnex_alloc_check_parms = 0;
87509Smrj int rootnex_bind_check_parms = 0;
88509Smrj int rootnex_bind_check_inuse = 0;
89509Smrj int rootnex_unbind_verify_buffer = 0;
90509Smrj int rootnex_sync_check_parms = 0;
91509Smrj #endif
92509Smrj 
931414Scindi /* Master Abort and Target Abort panic flag */
941414Scindi int rootnex_fm_ma_ta_panic_flag = 0;
951414Scindi 
96509Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
970Sstevel@tonic-gate int rootnex_bind_fail = 1;
980Sstevel@tonic-gate int rootnex_bind_warn = 1;
990Sstevel@tonic-gate uint8_t *rootnex_warn_list;
1000Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
1010Sstevel@tonic-gate #define	ROOTNEX_BIND_WARNING	(0x1 << 0)
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
104509Smrj  * revert back to old broken behavior of always sync'ing entire copy buffer.
105509Smrj  * This is useful if be have a buggy driver which doesn't correctly pass in
106509Smrj  * the offset and size into ddi_dma_sync().
1070Sstevel@tonic-gate  */
108509Smrj int rootnex_sync_ignore_params = 0;
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate /*
111509Smrj  * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
112509Smrj  * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
113509Smrj  * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
114509Smrj  * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
115509Smrj  * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
116509Smrj  * (< 8K). We will still need to allocate the copy buffer during bind though
117509Smrj  * (if we need one). These can only be modified in /etc/system before rootnex
118509Smrj  * attach.
1190Sstevel@tonic-gate  */
120509Smrj #if defined(__amd64)
121509Smrj int rootnex_prealloc_cookies = 65;
122509Smrj int rootnex_prealloc_windows = 4;
123509Smrj int rootnex_prealloc_copybuf = 2;
124509Smrj #else
125509Smrj int rootnex_prealloc_cookies = 33;
126509Smrj int rootnex_prealloc_windows = 4;
127509Smrj int rootnex_prealloc_copybuf = 2;
128509Smrj #endif
129509Smrj 
130509Smrj /* driver global state */
131509Smrj static rootnex_state_t *rootnex_state;
132509Smrj 
133509Smrj /* shortcut to rootnex counters */
134509Smrj static uint64_t *rootnex_cnt;
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate /*
137509Smrj  * XXX - does x86 even need these or are they left over from the SPARC days?
1380Sstevel@tonic-gate  */
139509Smrj /* statically defined integer/boolean properties for the root node */
140509Smrj static rootnex_intprop_t rootnex_intprp[] = {
141509Smrj 	{ "PAGESIZE",			PAGESIZE },
142509Smrj 	{ "MMU_PAGESIZE",		MMU_PAGESIZE },
143509Smrj 	{ "MMU_PAGEOFFSET",		MMU_PAGEOFFSET },
144509Smrj 	{ DDI_RELATIVE_ADDRESSING,	1 },
145509Smrj };
146509Smrj #define	NROOT_INTPROPS	(sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
147509Smrj 
1485084Sjohnlev #ifdef __xpv
1495084Sjohnlev typedef maddr_t rootnex_addr_t;
1505084Sjohnlev #define	ROOTNEX_PADDR_TO_RBASE(xinfo, pa)	\
1515084Sjohnlev 	(DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa))
1525084Sjohnlev #else
1535084Sjohnlev typedef paddr_t rootnex_addr_t;
1545084Sjohnlev #endif
1555084Sjohnlev 
1567613SVikram.Hegde@Sun.COM #if !defined(__xpv)
15710384SVikram.Hegde@Sun.COM char _depends_on[] = "mach/pcplusmp misc/iommulib misc/acpica";
1587613SVikram.Hegde@Sun.COM #endif
159509Smrj 
160509Smrj static struct cb_ops rootnex_cb_ops = {
161509Smrj 	nodev,		/* open */
162509Smrj 	nodev,		/* close */
163509Smrj 	nodev,		/* strategy */
164509Smrj 	nodev,		/* print */
165509Smrj 	nodev,		/* dump */
166509Smrj 	nodev,		/* read */
167509Smrj 	nodev,		/* write */
168509Smrj 	nodev,		/* ioctl */
169509Smrj 	nodev,		/* devmap */
170509Smrj 	nodev,		/* mmap */
171509Smrj 	nodev,		/* segmap */
172509Smrj 	nochpoll,	/* chpoll */
173509Smrj 	ddi_prop_op,	/* cb_prop_op */
174509Smrj 	NULL,		/* struct streamtab */
175509Smrj 	D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
176509Smrj 	CB_REV,		/* Rev */
177509Smrj 	nodev,		/* cb_aread */
178509Smrj 	nodev		/* cb_awrite */
179509Smrj };
180509Smrj 
181509Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
1820Sstevel@tonic-gate     off_t offset, off_t len, caddr_t *vaddrp);
183509Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
1840Sstevel@tonic-gate     struct hat *hat, struct seg *seg, caddr_t addr,
1850Sstevel@tonic-gate     struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
186509Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
1870Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep);
188509Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
189509Smrj     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
190509Smrj     ddi_dma_handle_t *handlep);
191509Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
192509Smrj     ddi_dma_handle_t handle);
193509Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
194509Smrj     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
195509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
196509Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
197509Smrj     ddi_dma_handle_t handle);
198509Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
199509Smrj     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
200509Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
201509Smrj     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
202509Smrj     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
203509Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
2040Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
2050Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
206509Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
207509Smrj     ddi_ctl_enum_t ctlop, void *arg, void *result);
2081865Sdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
2091865Sdilpreet     ddi_iblock_cookie_t *ibc);
210509Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
211509Smrj     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
212509Smrj 
2137613SVikram.Hegde@Sun.COM static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
2147613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
2157613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep);
2167613SVikram.Hegde@Sun.COM static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
2177613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2187613SVikram.Hegde@Sun.COM static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2197613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2207613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2217613SVikram.Hegde@Sun.COM static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2227613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2237617SVikram.Hegde@Sun.COM #if !defined(__xpv)
2247613SVikram.Hegde@Sun.COM static void rootnex_coredma_reset_cookies(dev_info_t *dip,
2257613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2267613SVikram.Hegde@Sun.COM static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2278215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t **cookiepp, uint_t *ccountp);
2288215SVikram.Hegde@Sun.COM static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2298215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t ccount);
2308215SVikram.Hegde@Sun.COM static int rootnex_coredma_clear_cookies(dev_info_t *dip,
2318215SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle);
2328215SVikram.Hegde@Sun.COM static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle);
2337617SVikram.Hegde@Sun.COM #endif
2347613SVikram.Hegde@Sun.COM static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
2357613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
2367613SVikram.Hegde@Sun.COM static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
2377613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
2387613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp);
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = {
2410Sstevel@tonic-gate 	BUSO_REV,
2420Sstevel@tonic-gate 	rootnex_map,
2430Sstevel@tonic-gate 	NULL,
2440Sstevel@tonic-gate 	NULL,
2450Sstevel@tonic-gate 	NULL,
2460Sstevel@tonic-gate 	rootnex_map_fault,
2470Sstevel@tonic-gate 	rootnex_dma_map,
2480Sstevel@tonic-gate 	rootnex_dma_allochdl,
2490Sstevel@tonic-gate 	rootnex_dma_freehdl,
2500Sstevel@tonic-gate 	rootnex_dma_bindhdl,
2510Sstevel@tonic-gate 	rootnex_dma_unbindhdl,
252509Smrj 	rootnex_dma_sync,
2530Sstevel@tonic-gate 	rootnex_dma_win,
2540Sstevel@tonic-gate 	rootnex_dma_mctl,
2550Sstevel@tonic-gate 	rootnex_ctlops,
2560Sstevel@tonic-gate 	ddi_bus_prop_op,
2570Sstevel@tonic-gate 	i_ddi_rootnex_get_eventcookie,
2580Sstevel@tonic-gate 	i_ddi_rootnex_add_eventcall,
2590Sstevel@tonic-gate 	i_ddi_rootnex_remove_eventcall,
2600Sstevel@tonic-gate 	i_ddi_rootnex_post_event,
2610Sstevel@tonic-gate 	0,			/* bus_intr_ctl */
2620Sstevel@tonic-gate 	0,			/* bus_config */
2630Sstevel@tonic-gate 	0,			/* bus_unconfig */
2641865Sdilpreet 	rootnex_fm_init,	/* bus_fm_init */
2650Sstevel@tonic-gate 	NULL,			/* bus_fm_fini */
2660Sstevel@tonic-gate 	NULL,			/* bus_fm_access_enter */
2670Sstevel@tonic-gate 	NULL,			/* bus_fm_access_exit */
2680Sstevel@tonic-gate 	NULL,			/* bus_powr */
2690Sstevel@tonic-gate 	rootnex_intr_ops	/* bus_intr_op */
2700Sstevel@tonic-gate };
2710Sstevel@tonic-gate 
272509Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
273509Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate static struct dev_ops rootnex_ops = {
2760Sstevel@tonic-gate 	DEVO_REV,
277509Smrj 	0,
278509Smrj 	ddi_no_info,
279509Smrj 	nulldev,
2800Sstevel@tonic-gate 	nulldev,
2810Sstevel@tonic-gate 	rootnex_attach,
282509Smrj 	rootnex_detach,
283509Smrj 	nulldev,
284509Smrj 	&rootnex_cb_ops,
2857656SSherry.Moore@Sun.COM 	&rootnex_bus_ops,
2867656SSherry.Moore@Sun.COM 	NULL,
2877656SSherry.Moore@Sun.COM 	ddi_quiesce_not_needed,		/* quiesce */
2880Sstevel@tonic-gate };
2890Sstevel@tonic-gate 
290509Smrj static struct modldrv rootnex_modldrv = {
291509Smrj 	&mod_driverops,
2927542SRichard.Bean@Sun.COM 	"i86pc root nexus",
293509Smrj 	&rootnex_ops
294509Smrj };
295509Smrj 
296509Smrj static struct modlinkage rootnex_modlinkage = {
297509Smrj 	MODREV_1,
298509Smrj 	(void *)&rootnex_modldrv,
299509Smrj 	NULL
300509Smrj };
301509Smrj 
3027617SVikram.Hegde@Sun.COM #if !defined(__xpv)
3037613SVikram.Hegde@Sun.COM static iommulib_nexops_t iommulib_nexops = {
3047613SVikram.Hegde@Sun.COM 	IOMMU_NEXOPS_VERSION,
3057613SVikram.Hegde@Sun.COM 	"Rootnex IOMMU ops Vers 1.1",
3067613SVikram.Hegde@Sun.COM 	NULL,
3077613SVikram.Hegde@Sun.COM 	rootnex_coredma_allochdl,
3087613SVikram.Hegde@Sun.COM 	rootnex_coredma_freehdl,
3097613SVikram.Hegde@Sun.COM 	rootnex_coredma_bindhdl,
3107613SVikram.Hegde@Sun.COM 	rootnex_coredma_unbindhdl,
3117613SVikram.Hegde@Sun.COM 	rootnex_coredma_reset_cookies,
3127613SVikram.Hegde@Sun.COM 	rootnex_coredma_get_cookies,
3138215SVikram.Hegde@Sun.COM 	rootnex_coredma_set_cookies,
3148215SVikram.Hegde@Sun.COM 	rootnex_coredma_clear_cookies,
3158215SVikram.Hegde@Sun.COM 	rootnex_coredma_get_sleep_flags,
3167613SVikram.Hegde@Sun.COM 	rootnex_coredma_sync,
3177613SVikram.Hegde@Sun.COM 	rootnex_coredma_win,
31810216SVikram.Hegde@Sun.COM 	rootnex_dma_map,
31910216SVikram.Hegde@Sun.COM 	rootnex_dma_mctl
3207613SVikram.Hegde@Sun.COM };
3217617SVikram.Hegde@Sun.COM #endif
322509Smrj 
323509Smrj /*
324509Smrj  *  extern hacks
325509Smrj  */
326509Smrj extern struct seg_ops segdev_ops;
327509Smrj extern int ignore_hardware_nodes;	/* force flag from ddi_impl.c */
328509Smrj #ifdef	DDI_MAP_DEBUG
329509Smrj extern int ddi_map_debug_flag;
330509Smrj #define	ddi_map_debug	if (ddi_map_debug_flag) prom_printf
331509Smrj #endif
332509Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr);
333509Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
334509Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
335509Smrj     psm_intr_op_t, int *);
336509Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
337509Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
3385251Smrj 
339509Smrj /*
340509Smrj  * Use device arena to use for device control register mappings.
341509Smrj  * Various kernel memory walkers (debugger, dtrace) need to know
342509Smrj  * to avoid this address range to prevent undesired device activity.
343509Smrj  */
344509Smrj extern void *device_arena_alloc(size_t size, int vm_flag);
345509Smrj extern void device_arena_free(void * vaddr, size_t size);
346509Smrj 
347509Smrj 
3480Sstevel@tonic-gate /*
349509Smrj  *  Internal functions
3500Sstevel@tonic-gate  */
351509Smrj static int rootnex_dma_init();
352509Smrj static void rootnex_add_props(dev_info_t *);
353509Smrj static int rootnex_ctl_reportdev(dev_info_t *dip);
354509Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
355509Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
356509Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
357509Smrj static int rootnex_map_handle(ddi_map_req_t *mp);
358509Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
359509Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
360509Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
361509Smrj     ddi_dma_attr_t *attr);
362509Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
363509Smrj     rootnex_sglinfo_t *sglinfo);
364509Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
365509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag);
366509Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
367509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr);
368509Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
369509Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
370509Smrj     ddi_dma_attr_t *attr, int kmflag);
371509Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma);
372509Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
373509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
374509Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
375509Smrj     rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
376509Smrj     size_t *copybuf_used, page_t **cur_pp);
377509Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
378509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
379509Smrj     ddi_dma_attr_t *attr, off_t cur_offset);
380509Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
381509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp,
382509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
383509Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
384509Smrj     rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
385509Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
386509Smrj     off_t offset, size_t size, uint_t cache_flags);
387509Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma);
3881865Sdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle,
3891865Sdilpreet     const void *comp_addr, const void *not_used);
390509Smrj 
391509Smrj /*
392509Smrj  * _init()
393509Smrj  *
394509Smrj  */
3950Sstevel@tonic-gate int
3960Sstevel@tonic-gate _init(void)
3970Sstevel@tonic-gate {
398509Smrj 
399509Smrj 	rootnex_state = NULL;
400509Smrj 	return (mod_install(&rootnex_modlinkage));
4010Sstevel@tonic-gate }
4020Sstevel@tonic-gate 
403509Smrj 
404509Smrj /*
405509Smrj  * _info()
406509Smrj  *
407509Smrj  */
408509Smrj int
409509Smrj _info(struct modinfo *modinfop)
410509Smrj {
411509Smrj 	return (mod_info(&rootnex_modlinkage, modinfop));
412509Smrj }
413509Smrj 
414509Smrj 
415509Smrj /*
416509Smrj  * _fini()
417509Smrj  *
418509Smrj  */
4190Sstevel@tonic-gate int
4200Sstevel@tonic-gate _fini(void)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	return (EBUSY);
4230Sstevel@tonic-gate }
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate /*
427509Smrj  * rootnex_attach()
4280Sstevel@tonic-gate  *
4290Sstevel@tonic-gate  */
430509Smrj static int
431509Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
432509Smrj {
4331414Scindi 	int fmcap;
434509Smrj 	int e;
435509Smrj 
436509Smrj 	switch (cmd) {
437509Smrj 	case DDI_ATTACH:
438509Smrj 		break;
439509Smrj 	case DDI_RESUME:
440509Smrj 		return (DDI_SUCCESS);
441509Smrj 	default:
442509Smrj 		return (DDI_FAILURE);
443509Smrj 	}
444509Smrj 
445509Smrj 	/*
446509Smrj 	 * We should only have one instance of rootnex. Save it away since we
447509Smrj 	 * don't have an easy way to get it back later.
448509Smrj 	 */
449509Smrj 	ASSERT(rootnex_state == NULL);
450509Smrj 	rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
451509Smrj 
452509Smrj 	rootnex_state->r_dip = dip;
4531414Scindi 	rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
454509Smrj 	rootnex_state->r_reserved_msg_printed = B_FALSE;
455509Smrj 	rootnex_cnt = &rootnex_state->r_counters[0];
4567589SVikram.Hegde@Sun.COM 	rootnex_state->r_intel_iommu_enabled = B_FALSE;
457509Smrj 
4581414Scindi 	/*
4591414Scindi 	 * Set minimum fm capability level for i86pc platforms and then
4601414Scindi 	 * initialize error handling. Since we're the rootnex, we don't
4611414Scindi 	 * care what's returned in the fmcap field.
4621414Scindi 	 */
4631865Sdilpreet 	ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
4641865Sdilpreet 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
4651414Scindi 	fmcap = ddi_system_fmcap;
4661414Scindi 	ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
4671414Scindi 
468509Smrj 	/* initialize DMA related state */
469509Smrj 	e = rootnex_dma_init();
470509Smrj 	if (e != DDI_SUCCESS) {
471509Smrj 		kmem_free(rootnex_state, sizeof (rootnex_state_t));
472509Smrj 		return (DDI_FAILURE);
473509Smrj 	}
474509Smrj 
475509Smrj 	/* Add static root node properties */
476509Smrj 	rootnex_add_props(dip);
477509Smrj 
478509Smrj 	/* since we can't call ddi_report_dev() */
479509Smrj 	cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
480509Smrj 
481509Smrj 	/* Initialize rootnex event handle */
482509Smrj 	i_ddi_rootnex_init_events(dip);
483509Smrj 
4847613SVikram.Hegde@Sun.COM #if !defined(__xpv)
4857589SVikram.Hegde@Sun.COM #if defined(__amd64)
4867589SVikram.Hegde@Sun.COM 	/* probe intel iommu */
4877589SVikram.Hegde@Sun.COM 	intel_iommu_probe_and_parse();
4887589SVikram.Hegde@Sun.COM 
4897589SVikram.Hegde@Sun.COM 	/* attach the iommu nodes */
4907589SVikram.Hegde@Sun.COM 	if (intel_iommu_support) {
4917589SVikram.Hegde@Sun.COM 		if (intel_iommu_attach_dmar_nodes() == DDI_SUCCESS) {
4927589SVikram.Hegde@Sun.COM 			rootnex_state->r_intel_iommu_enabled = B_TRUE;
4937589SVikram.Hegde@Sun.COM 		} else {
4947589SVikram.Hegde@Sun.COM 			intel_iommu_release_dmar_info();
4957589SVikram.Hegde@Sun.COM 		}
4967589SVikram.Hegde@Sun.COM 	}
4977589SVikram.Hegde@Sun.COM #endif
4987589SVikram.Hegde@Sun.COM 
4997613SVikram.Hegde@Sun.COM 	e = iommulib_nexus_register(dip, &iommulib_nexops,
5007613SVikram.Hegde@Sun.COM 	    &rootnex_state->r_iommulib_handle);
5017613SVikram.Hegde@Sun.COM 
5027613SVikram.Hegde@Sun.COM 	ASSERT(e == DDI_SUCCESS);
5037613SVikram.Hegde@Sun.COM #endif
5047613SVikram.Hegde@Sun.COM 
505509Smrj 	return (DDI_SUCCESS);
506509Smrj }
507509Smrj 
508509Smrj 
509509Smrj /*
510509Smrj  * rootnex_detach()
511509Smrj  *
512509Smrj  */
5130Sstevel@tonic-gate /*ARGSUSED*/
5140Sstevel@tonic-gate static int
515509Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
516509Smrj {
517509Smrj 	switch (cmd) {
518509Smrj 	case DDI_SUSPEND:
519509Smrj 		break;
520509Smrj 	default:
521509Smrj 		return (DDI_FAILURE);
522509Smrj 	}
523509Smrj 
524509Smrj 	return (DDI_SUCCESS);
525509Smrj }
526509Smrj 
527509Smrj 
528509Smrj /*
529509Smrj  * rootnex_dma_init()
530509Smrj  *
531509Smrj  */
532509Smrj /*ARGSUSED*/
533509Smrj static int
534509Smrj rootnex_dma_init()
5350Sstevel@tonic-gate {
536509Smrj 	size_t bufsize;
537509Smrj 
538509Smrj 
539509Smrj 	/*
540509Smrj 	 * size of our cookie/window/copybuf state needed in dma bind that we
541509Smrj 	 * pre-alloc in dma_alloc_handle
542509Smrj 	 */
543509Smrj 	rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
544509Smrj 	rootnex_state->r_prealloc_size =
545509Smrj 	    (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
546509Smrj 	    (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
547509Smrj 	    (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
548509Smrj 
549509Smrj 	/*
550509Smrj 	 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
551509Smrj 	 * allocate 16 extra bytes for struct pointer alignment
552509Smrj 	 * (p->dmai_private & dma->dp_prealloc_buffer)
553509Smrj 	 */
554509Smrj 	bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
555509Smrj 	    rootnex_state->r_prealloc_size + 0x10;
556509Smrj 	rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
557509Smrj 	    bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
558509Smrj 	if (rootnex_state->r_dmahdl_cache == NULL) {
559509Smrj 		return (DDI_FAILURE);
560509Smrj 	}
5610Sstevel@tonic-gate 
5620Sstevel@tonic-gate 	/*
5630Sstevel@tonic-gate 	 * allocate array to track which major numbers we have printed warnings
5640Sstevel@tonic-gate 	 * for.
5650Sstevel@tonic-gate 	 */
5660Sstevel@tonic-gate 	rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
5670Sstevel@tonic-gate 	    KM_SLEEP);
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	return (DDI_SUCCESS);
5700Sstevel@tonic-gate }
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate /*
574509Smrj  * rootnex_add_props()
575509Smrj  *
5760Sstevel@tonic-gate  */
5770Sstevel@tonic-gate static void
578509Smrj rootnex_add_props(dev_info_t *dip)
5790Sstevel@tonic-gate {
580509Smrj 	rootnex_intprop_t *rpp;
5810Sstevel@tonic-gate 	int i;
582509Smrj 
583509Smrj 	/* Add static integer/boolean properties to the root node */
584509Smrj 	rpp = rootnex_intprp;
585509Smrj 	for (i = 0; i < NROOT_INTPROPS; i++) {
586509Smrj 		(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
587509Smrj 		    rpp[i].prop_name, rpp[i].prop_value);
5880Sstevel@tonic-gate 	}
5890Sstevel@tonic-gate }
5900Sstevel@tonic-gate 
591509Smrj 
592509Smrj 
593509Smrj /*
594509Smrj  * *************************
595509Smrj  *  ctlops related routines
596509Smrj  * *************************
597509Smrj  */
598509Smrj 
5990Sstevel@tonic-gate /*
600509Smrj  * rootnex_ctlops()
601509Smrj  *
6020Sstevel@tonic-gate  */
603693Sgovinda /*ARGSUSED*/
604509Smrj static int
605509Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
606509Smrj     void *arg, void *result)
607509Smrj {
608509Smrj 	int n, *ptr;
609509Smrj 	struct ddi_parent_private_data *pdp;
610509Smrj 
611509Smrj 	switch (ctlop) {
612509Smrj 	case DDI_CTLOPS_DMAPMAPC:
613509Smrj 		/*
614509Smrj 		 * Return 'partial' to indicate that dma mapping
615509Smrj 		 * has to be done in the main MMU.
616509Smrj 		 */
617509Smrj 		return (DDI_DMA_PARTIAL);
618509Smrj 
619509Smrj 	case DDI_CTLOPS_BTOP:
620509Smrj 		/*
621509Smrj 		 * Convert byte count input to physical page units.
622509Smrj 		 * (byte counts that are not a page-size multiple
623509Smrj 		 * are rounded down)
624509Smrj 		 */
625509Smrj 		*(ulong_t *)result = btop(*(ulong_t *)arg);
626509Smrj 		return (DDI_SUCCESS);
627509Smrj 
628509Smrj 	case DDI_CTLOPS_PTOB:
629509Smrj 		/*
630509Smrj 		 * Convert size in physical pages to bytes
631509Smrj 		 */
632509Smrj 		*(ulong_t *)result = ptob(*(ulong_t *)arg);
633509Smrj 		return (DDI_SUCCESS);
634509Smrj 
635509Smrj 	case DDI_CTLOPS_BTOPR:
636509Smrj 		/*
637509Smrj 		 * Convert byte count input to physical page units
638509Smrj 		 * (byte counts that are not a page-size multiple
639509Smrj 		 * are rounded up)
640509Smrj 		 */
641509Smrj 		*(ulong_t *)result = btopr(*(ulong_t *)arg);
642509Smrj 		return (DDI_SUCCESS);
643509Smrj 
644509Smrj 	case DDI_CTLOPS_INITCHILD:
645509Smrj 		return (impl_ddi_sunbus_initchild(arg));
646509Smrj 
647509Smrj 	case DDI_CTLOPS_UNINITCHILD:
648509Smrj 		impl_ddi_sunbus_removechild(arg);
649509Smrj 		return (DDI_SUCCESS);
650509Smrj 
651509Smrj 	case DDI_CTLOPS_REPORTDEV:
652509Smrj 		return (rootnex_ctl_reportdev(rdip));
653509Smrj 
654509Smrj 	case DDI_CTLOPS_IOMIN:
655509Smrj 		/*
656509Smrj 		 * Nothing to do here but reflect back..
657509Smrj 		 */
658509Smrj 		return (DDI_SUCCESS);
659509Smrj 
660509Smrj 	case DDI_CTLOPS_REGSIZE:
661509Smrj 	case DDI_CTLOPS_NREGS:
662509Smrj 		break;
663509Smrj 
664509Smrj 	case DDI_CTLOPS_SIDDEV:
665509Smrj 		if (ndi_dev_is_prom_node(rdip))
666509Smrj 			return (DDI_SUCCESS);
667509Smrj 		if (ndi_dev_is_persistent_node(rdip))
668509Smrj 			return (DDI_SUCCESS);
669509Smrj 		return (DDI_FAILURE);
670509Smrj 
671509Smrj 	case DDI_CTLOPS_POWER:
672509Smrj 		return ((*pm_platform_power)((power_req_t *)arg));
673509Smrj 
674693Sgovinda 	case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
675509Smrj 	case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
676509Smrj 	case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
677509Smrj 	case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
678693Sgovinda 	case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
679693Sgovinda 	case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
680509Smrj 		if (!rootnex_state->r_reserved_msg_printed) {
681509Smrj 			rootnex_state->r_reserved_msg_printed = B_TRUE;
682509Smrj 			cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
683509Smrj 			    "1 or more reserved/obsolete operations.");
684509Smrj 		}
685509Smrj 		return (DDI_FAILURE);
686509Smrj 
687509Smrj 	default:
688509Smrj 		return (DDI_FAILURE);
689509Smrj 	}
690509Smrj 	/*
691509Smrj 	 * The rest are for "hardware" properties
692509Smrj 	 */
693509Smrj 	if ((pdp = ddi_get_parent_data(rdip)) == NULL)
694509Smrj 		return (DDI_FAILURE);
695509Smrj 
696509Smrj 	if (ctlop == DDI_CTLOPS_NREGS) {
697509Smrj 		ptr = (int *)result;
698509Smrj 		*ptr = pdp->par_nreg;
699509Smrj 	} else {
700509Smrj 		off_t *size = (off_t *)result;
701509Smrj 
702509Smrj 		ptr = (int *)arg;
703509Smrj 		n = *ptr;
704509Smrj 		if (n >= pdp->par_nreg) {
705509Smrj 			return (DDI_FAILURE);
706509Smrj 		}
707509Smrj 		*size = (off_t)pdp->par_reg[n].regspec_size;
708509Smrj 	}
709509Smrj 	return (DDI_SUCCESS);
710509Smrj }
7110Sstevel@tonic-gate 
7120Sstevel@tonic-gate 
7130Sstevel@tonic-gate /*
714509Smrj  * rootnex_ctl_reportdev()
715509Smrj  *
7160Sstevel@tonic-gate  */
7170Sstevel@tonic-gate static int
718509Smrj rootnex_ctl_reportdev(dev_info_t *dev)
7190Sstevel@tonic-gate {
720509Smrj 	int i, n, len, f_len = 0;
721509Smrj 	char *buf;
722509Smrj 
723509Smrj 	buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
724509Smrj 	f_len += snprintf(buf, REPORTDEV_BUFSIZE,
725509Smrj 	    "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
726509Smrj 	len = strlen(buf);
727509Smrj 
728509Smrj 	for (i = 0; i < sparc_pd_getnreg(dev); i++) {
729509Smrj 
730509Smrj 		struct regspec *rp = sparc_pd_getreg(dev, i);
731509Smrj 
732509Smrj 		if (i == 0)
733509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
734509Smrj 			    ": ");
735509Smrj 		else
736509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
737509Smrj 			    " and ");
738509Smrj 		len = strlen(buf);
739509Smrj 
740509Smrj 		switch (rp->regspec_bustype) {
741509Smrj 
742509Smrj 		case BTEISA:
743509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
744509Smrj 			    "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
7450Sstevel@tonic-gate 			break;
746509Smrj 
747509Smrj 		case BTISA:
748509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
749509Smrj 			    "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
7500Sstevel@tonic-gate 			break;
751509Smrj 
752509Smrj 		default:
753509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
754509Smrj 			    "space %x offset %x",
755509Smrj 			    rp->regspec_bustype, rp->regspec_addr);
7560Sstevel@tonic-gate 			break;
7570Sstevel@tonic-gate 		}
758509Smrj 		len = strlen(buf);
7590Sstevel@tonic-gate 	}
760509Smrj 	for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
761509Smrj 		int pri;
762509Smrj 
763509Smrj 		if (i != 0) {
764509Smrj 			f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
765509Smrj 			    ",");
766509Smrj 			len = strlen(buf);
767509Smrj 		}
768509Smrj 		pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
769509Smrj 		f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
770509Smrj 		    " sparc ipl %d", pri);
771509Smrj 		len = strlen(buf);
7720Sstevel@tonic-gate 	}
773509Smrj #ifdef DEBUG
774509Smrj 	if (f_len + 1 >= REPORTDEV_BUFSIZE) {
775509Smrj 		cmn_err(CE_NOTE, "next message is truncated: "
776509Smrj 		    "printed length 1024, real length %d", f_len);
777509Smrj 	}
778509Smrj #endif /* DEBUG */
779509Smrj 	cmn_err(CE_CONT, "?%s\n", buf);
780509Smrj 	kmem_free(buf, REPORTDEV_BUFSIZE);
7810Sstevel@tonic-gate 	return (DDI_SUCCESS);
7820Sstevel@tonic-gate }
7830Sstevel@tonic-gate 
784509Smrj 
785509Smrj /*
786509Smrj  * ******************
787509Smrj  *  map related code
788509Smrj  * ******************
789509Smrj  */
790509Smrj 
791509Smrj /*
792509Smrj  * rootnex_map()
793509Smrj  *
794509Smrj  */
7950Sstevel@tonic-gate static int
796509Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
797509Smrj     off_t len, caddr_t *vaddrp)
7980Sstevel@tonic-gate {
7990Sstevel@tonic-gate 	struct regspec *rp, tmp_reg;
8000Sstevel@tonic-gate 	ddi_map_req_t mr = *mp;		/* Get private copy of request */
8010Sstevel@tonic-gate 	int error;
8020Sstevel@tonic-gate 
8030Sstevel@tonic-gate 	mp = &mr;
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate 	switch (mp->map_op)  {
8060Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
8070Sstevel@tonic-gate 	case DDI_MO_UNMAP:
8080Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
8090Sstevel@tonic-gate 		break;
8100Sstevel@tonic-gate 	default:
8110Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8120Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
8130Sstevel@tonic-gate 		    mp->map_op);
8140Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8150Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8160Sstevel@tonic-gate 	}
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate 	if (mp->map_flags & DDI_MF_USER_MAPPING)  {
8190Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8200Sstevel@tonic-gate 		cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
8210Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8220Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
8230Sstevel@tonic-gate 	}
8240Sstevel@tonic-gate 
8250Sstevel@tonic-gate 	/*
8260Sstevel@tonic-gate 	 * First, if given an rnumber, convert it to a regspec...
8270Sstevel@tonic-gate 	 * (Presumably, this is on behalf of a child of the root node?)
8280Sstevel@tonic-gate 	 */
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	if (mp->map_type == DDI_MT_RNUMBER)  {
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate 		int rnumber = mp->map_obj.rnumber;
8330Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8340Sstevel@tonic-gate 		static char *out_of_range =
8350Sstevel@tonic-gate 		    "rootnex_map: Out of range rnumber <%d>, device <%s>";
8360Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 		rp = i_ddi_rnumber_to_regspec(rdip, rnumber);
8390Sstevel@tonic-gate 		if (rp == NULL)  {
8400Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8410Sstevel@tonic-gate 			cmn_err(CE_WARN, out_of_range, rnumber,
8420Sstevel@tonic-gate 			    ddi_get_name(rdip));
8430Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8440Sstevel@tonic-gate 			return (DDI_ME_RNUMBER_RANGE);
8450Sstevel@tonic-gate 		}
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 		/*
8480Sstevel@tonic-gate 		 * Convert the given ddi_map_req_t from rnumber to regspec...
8490Sstevel@tonic-gate 		 */
8500Sstevel@tonic-gate 
8510Sstevel@tonic-gate 		mp->map_type = DDI_MT_REGSPEC;
8520Sstevel@tonic-gate 		mp->map_obj.rp = rp;
8530Sstevel@tonic-gate 	}
8540Sstevel@tonic-gate 
8550Sstevel@tonic-gate 	/*
8560Sstevel@tonic-gate 	 * Adjust offset and length correspnding to called values...
8570Sstevel@tonic-gate 	 * XXX: A non-zero length means override the one in the regspec
8580Sstevel@tonic-gate 	 * XXX: (regardless of what's in the parent's range?)
8590Sstevel@tonic-gate 	 */
8600Sstevel@tonic-gate 
8610Sstevel@tonic-gate 	tmp_reg = *(mp->map_obj.rp);		/* Preserve underlying data */
8620Sstevel@tonic-gate 	rp = mp->map_obj.rp = &tmp_reg;		/* Use tmp_reg in request */
8630Sstevel@tonic-gate 
8640Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
8655084Sjohnlev 	cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
8665084Sjohnlev 	    "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
8675084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset,
8685084Sjohnlev 	    len, mp->map_handlep);
8690Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 	/*
8720Sstevel@tonic-gate 	 * I/O or memory mapping:
8730Sstevel@tonic-gate 	 *
8740Sstevel@tonic-gate 	 *	<bustype=0, addr=x, len=x>: memory
8750Sstevel@tonic-gate 	 *	<bustype=1, addr=x, len=x>: i/o
8760Sstevel@tonic-gate 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
8770Sstevel@tonic-gate 	 */
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
8800Sstevel@tonic-gate 		cmn_err(CE_WARN, "<%s,%s> invalid register spec"
8810Sstevel@tonic-gate 		    " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip),
8820Sstevel@tonic-gate 		    ddi_get_name(rdip), rp->regspec_bustype,
8830Sstevel@tonic-gate 		    rp->regspec_addr, rp->regspec_size);
8840Sstevel@tonic-gate 		return (DDI_ME_INVAL);
8850Sstevel@tonic-gate 	}
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) {
8880Sstevel@tonic-gate 		/*
8890Sstevel@tonic-gate 		 * compatibility i/o mapping
8900Sstevel@tonic-gate 		 */
8910Sstevel@tonic-gate 		rp->regspec_bustype += (uint_t)offset;
8920Sstevel@tonic-gate 	} else {
8930Sstevel@tonic-gate 		/*
8940Sstevel@tonic-gate 		 * Normal memory or i/o mapping
8950Sstevel@tonic-gate 		 */
8960Sstevel@tonic-gate 		rp->regspec_addr += (uint_t)offset;
8970Sstevel@tonic-gate 	}
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 	if (len != 0)
9000Sstevel@tonic-gate 		rp->regspec_size = (uint_t)len;
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9035084Sjohnlev 	cmn_err(CE_CONT, "             <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
9045084Sjohnlev 	    "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
9055084Sjohnlev 	    rp->regspec_bustype, rp->regspec_addr, rp->regspec_size,
9065084Sjohnlev 	    offset, len, mp->map_handlep);
9070Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9080Sstevel@tonic-gate 
9090Sstevel@tonic-gate 	/*
9100Sstevel@tonic-gate 	 * Apply any parent ranges at this level, if applicable.
9110Sstevel@tonic-gate 	 * (This is where nexus specific regspec translation takes place.
9120Sstevel@tonic-gate 	 * Use of this function is implicit agreement that translation is
9130Sstevel@tonic-gate 	 * provided via ddi_apply_range.)
9140Sstevel@tonic-gate 	 */
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9170Sstevel@tonic-gate 	ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
9180Sstevel@tonic-gate 	    ddi_get_name(dip), ddi_get_name(rdip));
9190Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9200Sstevel@tonic-gate 
9210Sstevel@tonic-gate 	if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0)
9220Sstevel@tonic-gate 		return (error);
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	switch (mp->map_op)  {
9250Sstevel@tonic-gate 	case DDI_MO_MAP_LOCKED:
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 		/*
9280Sstevel@tonic-gate 		 * Set up the locked down kernel mapping to the regspec...
9290Sstevel@tonic-gate 		 */
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 		return (rootnex_map_regspec(mp, vaddrp));
9320Sstevel@tonic-gate 
9330Sstevel@tonic-gate 	case DDI_MO_UNMAP:
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate 		/*
9360Sstevel@tonic-gate 		 * Release mapping...
9370Sstevel@tonic-gate 		 */
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 		return (rootnex_unmap_regspec(mp, vaddrp));
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate 	case DDI_MO_MAP_HANDLE:
9420Sstevel@tonic-gate 
9430Sstevel@tonic-gate 		return (rootnex_map_handle(mp));
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 	default:
9460Sstevel@tonic-gate 		return (DDI_ME_UNIMPLEMENTED);
9470Sstevel@tonic-gate 	}
9480Sstevel@tonic-gate }
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate 
9510Sstevel@tonic-gate /*
952509Smrj  * rootnex_map_fault()
9530Sstevel@tonic-gate  *
9540Sstevel@tonic-gate  *	fault in mappings for requestors
9550Sstevel@tonic-gate  */
9560Sstevel@tonic-gate /*ARGSUSED*/
9570Sstevel@tonic-gate static int
958509Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
959509Smrj     struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
960509Smrj     uint_t lock)
9610Sstevel@tonic-gate {
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate #ifdef	DDI_MAP_DEBUG
9640Sstevel@tonic-gate 	ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
9650Sstevel@tonic-gate 	ddi_map_debug(" Seg <%s>\n",
9660Sstevel@tonic-gate 	    seg->s_ops == &segdev_ops ? "segdev" :
9670Sstevel@tonic-gate 	    seg == &kvseg ? "segkmem" : "NONE!");
9680Sstevel@tonic-gate #endif	/* DDI_MAP_DEBUG */
9690Sstevel@tonic-gate 
9700Sstevel@tonic-gate 	/*
9710Sstevel@tonic-gate 	 * This is all terribly broken, but it is a start
9720Sstevel@tonic-gate 	 *
9730Sstevel@tonic-gate 	 * XXX	Note that this test means that segdev_ops
9740Sstevel@tonic-gate 	 *	must be exported from seg_dev.c.
9750Sstevel@tonic-gate 	 * XXX	What about devices with their own segment drivers?
9760Sstevel@tonic-gate 	 */
9770Sstevel@tonic-gate 	if (seg->s_ops == &segdev_ops) {
9785084Sjohnlev 		struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
9790Sstevel@tonic-gate 
9800Sstevel@tonic-gate 		if (hat == NULL) {
9810Sstevel@tonic-gate 			/*
9820Sstevel@tonic-gate 			 * This is one plausible interpretation of
9830Sstevel@tonic-gate 			 * a null hat i.e. use the first hat on the
9840Sstevel@tonic-gate 			 * address space hat list which by convention is
9850Sstevel@tonic-gate 			 * the hat of the system MMU.  At alternative
9860Sstevel@tonic-gate 			 * would be to panic .. this might well be better ..
9870Sstevel@tonic-gate 			 */
9880Sstevel@tonic-gate 			ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock));
9890Sstevel@tonic-gate 			hat = seg->s_as->a_hat;
9900Sstevel@tonic-gate 			cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
9910Sstevel@tonic-gate 		}
9920Sstevel@tonic-gate 		hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
9930Sstevel@tonic-gate 		    (lock ? HAT_LOAD_LOCK : HAT_LOAD));
9940Sstevel@tonic-gate 	} else if (seg == &kvseg && dp == NULL) {
9950Sstevel@tonic-gate 		hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
9960Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
9970Sstevel@tonic-gate 	} else
9980Sstevel@tonic-gate 		return (DDI_FAILURE);
9990Sstevel@tonic-gate 	return (DDI_SUCCESS);
10000Sstevel@tonic-gate }
10010Sstevel@tonic-gate 
10020Sstevel@tonic-gate 
10030Sstevel@tonic-gate /*
1004509Smrj  * rootnex_map_regspec()
1005509Smrj  *     we don't support mapping of I/O cards above 4Gb
10060Sstevel@tonic-gate  */
1007509Smrj static int
1008509Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1009509Smrj {
10105084Sjohnlev 	rootnex_addr_t rbase;
1011509Smrj 	void *cvaddr;
1012509Smrj 	uint_t npages, pgoffset;
1013509Smrj 	struct regspec *rp;
1014509Smrj 	ddi_acc_hdl_t *hp;
1015509Smrj 	ddi_acc_impl_t *ap;
1016509Smrj 	uint_t	hat_acc_flags;
10175084Sjohnlev 	paddr_t pbase;
1018509Smrj 
1019509Smrj 	rp = mp->map_obj.rp;
1020509Smrj 	hp = mp->map_handlep;
1021509Smrj 
1022509Smrj #ifdef	DDI_MAP_DEBUG
1023509Smrj 	ddi_map_debug(
1024509Smrj 	    "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1025509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1026509Smrj 	    rp->regspec_size, mp->map_handlep);
1027509Smrj #endif	/* DDI_MAP_DEBUG */
1028509Smrj 
1029509Smrj 	/*
1030509Smrj 	 * I/O or memory mapping
1031509Smrj 	 *
1032509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1033509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1034509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1035509Smrj 	 */
1036509Smrj 
1037509Smrj 	if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1038509Smrj 		cmn_err(CE_WARN, "rootnex: invalid register spec"
1039509Smrj 		    " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype,
1040509Smrj 		    rp->regspec_addr, rp->regspec_size);
1041509Smrj 		return (DDI_FAILURE);
1042509Smrj 	}
1043509Smrj 
1044509Smrj 	if (rp->regspec_bustype != 0) {
1045509Smrj 		/*
1046509Smrj 		 * I/O space - needs a handle.
1047509Smrj 		 */
1048509Smrj 		if (hp == NULL) {
1049509Smrj 			return (DDI_FAILURE);
1050509Smrj 		}
1051509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1052509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1053509Smrj 		impl_acc_hdl_init(hp);
1054509Smrj 
1055509Smrj 		if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1056509Smrj #ifdef  DDI_MAP_DEBUG
10575084Sjohnlev 			ddi_map_debug("rootnex_map_regspec: mmap() "
10585084Sjohnlev 			    "to I/O space is not supported.\n");
1059509Smrj #endif  /* DDI_MAP_DEBUG */
1060509Smrj 			return (DDI_ME_INVAL);
1061509Smrj 		} else {
1062509Smrj 			/*
1063509Smrj 			 * 1275-compliant vs. compatibility i/o mapping
1064509Smrj 			 */
1065509Smrj 			*vaddrp =
1066509Smrj 			    (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
10675084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_bustype) :
10685084Sjohnlev 			    ((caddr_t)(uintptr_t)rp->regspec_addr);
10695084Sjohnlev #ifdef __xpv
10705084Sjohnlev 			if (DOMAIN_IS_INITDOMAIN(xen_info)) {
10715084Sjohnlev 				hp->ah_pfn = xen_assign_pfn(
10725084Sjohnlev 				    mmu_btop((ulong_t)rp->regspec_addr &
10735084Sjohnlev 				    MMU_PAGEMASK));
10745084Sjohnlev 			} else {
10755084Sjohnlev 				hp->ah_pfn = mmu_btop(
10765084Sjohnlev 				    (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
10775084Sjohnlev 			}
10785084Sjohnlev #else
10791865Sdilpreet 			hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
10805084Sjohnlev 			    MMU_PAGEMASK);
10815084Sjohnlev #endif
10821865Sdilpreet 			hp->ah_pnum = mmu_btopr(rp->regspec_size +
10831865Sdilpreet 			    (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1084509Smrj 		}
1085509Smrj 
1086509Smrj #ifdef	DDI_MAP_DEBUG
1087509Smrj 		ddi_map_debug(
1088509Smrj 	    "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1089509Smrj 		    rp->regspec_size, *vaddrp);
1090509Smrj #endif	/* DDI_MAP_DEBUG */
1091509Smrj 		return (DDI_SUCCESS);
1092509Smrj 	}
1093509Smrj 
1094509Smrj 	/*
1095509Smrj 	 * Memory space
1096509Smrj 	 */
1097509Smrj 
1098509Smrj 	if (hp != NULL) {
1099509Smrj 		/*
1100509Smrj 		 * hat layer ignores
1101509Smrj 		 * hp->ah_acc.devacc_attr_endian_flags.
1102509Smrj 		 */
1103509Smrj 		switch (hp->ah_acc.devacc_attr_dataorder) {
1104509Smrj 		case DDI_STRICTORDER_ACC:
1105509Smrj 			hat_acc_flags = HAT_STRICTORDER;
1106509Smrj 			break;
1107509Smrj 		case DDI_UNORDERED_OK_ACC:
1108509Smrj 			hat_acc_flags = HAT_UNORDERED_OK;
1109509Smrj 			break;
1110509Smrj 		case DDI_MERGING_OK_ACC:
1111509Smrj 			hat_acc_flags = HAT_MERGING_OK;
1112509Smrj 			break;
1113509Smrj 		case DDI_LOADCACHING_OK_ACC:
1114509Smrj 			hat_acc_flags = HAT_LOADCACHING_OK;
1115509Smrj 			break;
1116509Smrj 		case DDI_STORECACHING_OK_ACC:
1117509Smrj 			hat_acc_flags = HAT_STORECACHING_OK;
1118509Smrj 			break;
1119509Smrj 		}
1120509Smrj 		ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1121509Smrj 		ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1122509Smrj 		impl_acc_hdl_init(hp);
1123509Smrj 		hp->ah_hat_flags = hat_acc_flags;
1124509Smrj 	} else {
1125509Smrj 		hat_acc_flags = HAT_STRICTORDER;
1126509Smrj 	}
1127509Smrj 
11285084Sjohnlev 	rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
11295084Sjohnlev #ifdef __xpv
11305084Sjohnlev 	/*
11315084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
11325084Sjohnlev 	 * the MA to a PA.
11335084Sjohnlev 	 */
11345084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
11355084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
11365084Sjohnlev 	} else {
11375084Sjohnlev 		pbase = rbase;
11385084Sjohnlev 	}
11395084Sjohnlev #else
11405084Sjohnlev 	pbase = rbase;
11415084Sjohnlev #endif
11425084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1143509Smrj 
1144509Smrj 	if (rp->regspec_size == 0) {
1145509Smrj #ifdef  DDI_MAP_DEBUG
1146509Smrj 		ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1147509Smrj #endif  /* DDI_MAP_DEBUG */
1148509Smrj 		return (DDI_ME_INVAL);
1149509Smrj 	}
1150509Smrj 
1151509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
11525084Sjohnlev 		/* extra cast to make gcc happy */
11535084Sjohnlev 		*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1154509Smrj 	} else {
1155509Smrj 		npages = mmu_btopr(rp->regspec_size + pgoffset);
1156509Smrj 
1157509Smrj #ifdef	DDI_MAP_DEBUG
11585084Sjohnlev 		ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
11595084Sjohnlev 		    "physical %llx", npages, pbase);
1160509Smrj #endif	/* DDI_MAP_DEBUG */
1161509Smrj 
1162509Smrj 		cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1163509Smrj 		if (cvaddr == NULL)
1164509Smrj 			return (DDI_ME_NORESOURCES);
1165509Smrj 
1166509Smrj 		/*
1167509Smrj 		 * Now map in the pages we've allocated...
1168509Smrj 		 */
11695084Sjohnlev 		hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
11705084Sjohnlev 		    mmu_btop(pbase), mp->map_prot | hat_acc_flags,
11715084Sjohnlev 		    HAT_LOAD_LOCK);
1172509Smrj 		*vaddrp = (caddr_t)cvaddr + pgoffset;
11731865Sdilpreet 
11741865Sdilpreet 		/* save away pfn and npages for FMA */
11751865Sdilpreet 		hp = mp->map_handlep;
11761865Sdilpreet 		if (hp) {
11775084Sjohnlev 			hp->ah_pfn = mmu_btop(pbase);
11781865Sdilpreet 			hp->ah_pnum = npages;
11791865Sdilpreet 		}
1180509Smrj 	}
1181509Smrj 
1182509Smrj #ifdef	DDI_MAP_DEBUG
1183509Smrj 	ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1184509Smrj #endif	/* DDI_MAP_DEBUG */
1185509Smrj 	return (DDI_SUCCESS);
1186509Smrj }
1187509Smrj 
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate /*
1190509Smrj  * rootnex_unmap_regspec()
1191509Smrj  *
1192509Smrj  */
1193509Smrj static int
1194509Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1195509Smrj {
1196509Smrj 	caddr_t addr = (caddr_t)*vaddrp;
1197509Smrj 	uint_t npages, pgoffset;
1198509Smrj 	struct regspec *rp;
1199509Smrj 
1200509Smrj 	if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1201509Smrj 		return (0);
1202509Smrj 
1203509Smrj 	rp = mp->map_obj.rp;
1204509Smrj 
1205509Smrj 	if (rp->regspec_size == 0) {
1206509Smrj #ifdef  DDI_MAP_DEBUG
1207509Smrj 		ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1208509Smrj #endif  /* DDI_MAP_DEBUG */
1209509Smrj 		return (DDI_ME_INVAL);
1210509Smrj 	}
1211509Smrj 
1212509Smrj 	/*
1213509Smrj 	 * I/O or memory mapping:
1214509Smrj 	 *
1215509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1216509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1217509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1218509Smrj 	 */
1219509Smrj 	if (rp->regspec_bustype != 0) {
1220509Smrj 		/*
1221509Smrj 		 * This is I/O space, which requires no particular
1222509Smrj 		 * processing on unmap since it isn't mapped in the
1223509Smrj 		 * first place.
1224509Smrj 		 */
1225509Smrj 		return (DDI_SUCCESS);
1226509Smrj 	}
1227509Smrj 
1228509Smrj 	/*
1229509Smrj 	 * Memory space
1230509Smrj 	 */
1231509Smrj 	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1232509Smrj 	npages = mmu_btopr(rp->regspec_size + pgoffset);
1233509Smrj 	hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1234509Smrj 	device_arena_free(addr - pgoffset, ptob(npages));
1235509Smrj 
1236509Smrj 	/*
1237509Smrj 	 * Destroy the pointer - the mapping has logically gone
1238509Smrj 	 */
1239509Smrj 	*vaddrp = NULL;
1240509Smrj 
1241509Smrj 	return (DDI_SUCCESS);
1242509Smrj }
1243509Smrj 
1244509Smrj 
1245509Smrj /*
1246509Smrj  * rootnex_map_handle()
1247509Smrj  *
12480Sstevel@tonic-gate  */
1249509Smrj static int
1250509Smrj rootnex_map_handle(ddi_map_req_t *mp)
1251509Smrj {
12525084Sjohnlev 	rootnex_addr_t rbase;
1253509Smrj 	ddi_acc_hdl_t *hp;
1254509Smrj 	uint_t pgoffset;
1255509Smrj 	struct regspec *rp;
12565084Sjohnlev 	paddr_t pbase;
1257509Smrj 
1258509Smrj 	rp = mp->map_obj.rp;
1259509Smrj 
1260509Smrj #ifdef	DDI_MAP_DEBUG
1261509Smrj 	ddi_map_debug(
1262509Smrj 	    "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1263509Smrj 	    rp->regspec_bustype, rp->regspec_addr,
1264509Smrj 	    rp->regspec_size, mp->map_handlep);
1265509Smrj #endif	/* DDI_MAP_DEBUG */
1266509Smrj 
1267509Smrj 	/*
1268509Smrj 	 * I/O or memory mapping:
1269509Smrj 	 *
1270509Smrj 	 *	<bustype=0, addr=x, len=x>: memory
1271509Smrj 	 *	<bustype=1, addr=x, len=x>: i/o
1272509Smrj 	 *	<bustype>1, addr=0, len=x>: x86-compatibility i/o
1273509Smrj 	 */
1274509Smrj 	if (rp->regspec_bustype != 0) {
1275509Smrj 		/*
1276509Smrj 		 * This refers to I/O space, and we don't support "mapping"
1277509Smrj 		 * I/O space to a user.
1278509Smrj 		 */
1279509Smrj 		return (DDI_FAILURE);
1280509Smrj 	}
1281509Smrj 
1282509Smrj 	/*
1283509Smrj 	 * Set up the hat_flags for the mapping.
1284509Smrj 	 */
1285509Smrj 	hp = mp->map_handlep;
1286509Smrj 
1287509Smrj 	switch (hp->ah_acc.devacc_attr_endian_flags) {
1288509Smrj 	case DDI_NEVERSWAP_ACC:
1289509Smrj 		hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1290509Smrj 		break;
1291509Smrj 	case DDI_STRUCTURE_LE_ACC:
1292509Smrj 		hp->ah_hat_flags = HAT_STRUCTURE_LE;
1293509Smrj 		break;
1294509Smrj 	case DDI_STRUCTURE_BE_ACC:
1295509Smrj 		return (DDI_FAILURE);
1296509Smrj 	default:
1297509Smrj 		return (DDI_REGS_ACC_CONFLICT);
1298509Smrj 	}
1299509Smrj 
1300509Smrj 	switch (hp->ah_acc.devacc_attr_dataorder) {
1301509Smrj 	case DDI_STRICTORDER_ACC:
1302509Smrj 		break;
1303509Smrj 	case DDI_UNORDERED_OK_ACC:
1304509Smrj 		hp->ah_hat_flags |= HAT_UNORDERED_OK;
1305509Smrj 		break;
1306509Smrj 	case DDI_MERGING_OK_ACC:
1307509Smrj 		hp->ah_hat_flags |= HAT_MERGING_OK;
1308509Smrj 		break;
1309509Smrj 	case DDI_LOADCACHING_OK_ACC:
1310509Smrj 		hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1311509Smrj 		break;
1312509Smrj 	case DDI_STORECACHING_OK_ACC:
1313509Smrj 		hp->ah_hat_flags |= HAT_STORECACHING_OK;
1314509Smrj 		break;
1315509Smrj 	default:
1316509Smrj 		return (DDI_FAILURE);
1317509Smrj 	}
1318509Smrj 
13195084Sjohnlev 	rbase = (rootnex_addr_t)rp->regspec_addr &
13205084Sjohnlev 	    (~(rootnex_addr_t)MMU_PAGEOFFSET);
13215084Sjohnlev 	pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1322509Smrj 
1323509Smrj 	if (rp->regspec_size == 0)
1324509Smrj 		return (DDI_ME_INVAL);
1325509Smrj 
13265084Sjohnlev #ifdef __xpv
13275084Sjohnlev 	/*
13285084Sjohnlev 	 * If we're dom0, we're using a real device so we need to translate
13295084Sjohnlev 	 * the MA to a PA.
13305084Sjohnlev 	 */
13315084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
13325084Sjohnlev 		pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
13335084Sjohnlev 		    (rbase & MMU_PAGEOFFSET);
13345084Sjohnlev 	} else {
13355084Sjohnlev 		pbase = rbase;
13365084Sjohnlev 	}
13375084Sjohnlev #else
13385084Sjohnlev 	pbase = rbase;
13395084Sjohnlev #endif
13405084Sjohnlev 
13415084Sjohnlev 	hp->ah_pfn = mmu_btop(pbase);
1342509Smrj 	hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1343509Smrj 
1344509Smrj 	return (DDI_SUCCESS);
1345509Smrj }
13460Sstevel@tonic-gate 
13470Sstevel@tonic-gate 
13480Sstevel@tonic-gate 
13490Sstevel@tonic-gate /*
1350509Smrj  * ************************
1351509Smrj  *  interrupt related code
1352509Smrj  * ************************
13530Sstevel@tonic-gate  */
13540Sstevel@tonic-gate 
13550Sstevel@tonic-gate /*
1356509Smrj  * rootnex_intr_ops()
13570Sstevel@tonic-gate  *	bus_intr_op() function for interrupt support
13580Sstevel@tonic-gate  */
13590Sstevel@tonic-gate /* ARGSUSED */
13600Sstevel@tonic-gate static int
13610Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
13620Sstevel@tonic-gate     ddi_intr_handle_impl_t *hdlp, void *result)
13630Sstevel@tonic-gate {
13640Sstevel@tonic-gate 	struct intrspec			*ispec;
13650Sstevel@tonic-gate 	struct ddi_parent_private_data	*pdp;
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate 	DDI_INTR_NEXDBG((CE_CONT,
13680Sstevel@tonic-gate 	    "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
13690Sstevel@tonic-gate 	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
13700Sstevel@tonic-gate 
13710Sstevel@tonic-gate 	/* Process the interrupt operation */
13720Sstevel@tonic-gate 	switch (intr_op) {
13730Sstevel@tonic-gate 	case DDI_INTROP_GETCAP:
13740Sstevel@tonic-gate 		/* First check with pcplusmp */
13750Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13760Sstevel@tonic-gate 			return (DDI_FAILURE);
13770Sstevel@tonic-gate 
13780Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
13790Sstevel@tonic-gate 			*(int *)result = 0;
13800Sstevel@tonic-gate 			return (DDI_FAILURE);
13810Sstevel@tonic-gate 		}
13820Sstevel@tonic-gate 		break;
13830Sstevel@tonic-gate 	case DDI_INTROP_SETCAP:
13840Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
13850Sstevel@tonic-gate 			return (DDI_FAILURE);
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
13880Sstevel@tonic-gate 			return (DDI_FAILURE);
13890Sstevel@tonic-gate 		break;
13900Sstevel@tonic-gate 	case DDI_INTROP_ALLOC:
13910Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
13920Sstevel@tonic-gate 			return (DDI_FAILURE);
13930Sstevel@tonic-gate 		hdlp->ih_pri = ispec->intrspec_pri;
13940Sstevel@tonic-gate 		*(int *)result = hdlp->ih_scratch1;
13950Sstevel@tonic-gate 		break;
13960Sstevel@tonic-gate 	case DDI_INTROP_FREE:
13970Sstevel@tonic-gate 		pdp = ddi_get_parent_data(rdip);
13980Sstevel@tonic-gate 		/*
13990Sstevel@tonic-gate 		 * Special case for 'pcic' driver' only.
14000Sstevel@tonic-gate 		 * If an intrspec was created for it, clean it up here
14010Sstevel@tonic-gate 		 * See detailed comments on this in the function
14020Sstevel@tonic-gate 		 * rootnex_get_ispec().
14030Sstevel@tonic-gate 		 */
14040Sstevel@tonic-gate 		if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
14050Sstevel@tonic-gate 			kmem_free(pdp->par_intr, sizeof (struct intrspec) *
14060Sstevel@tonic-gate 			    pdp->par_nintr);
14070Sstevel@tonic-gate 			/*
14080Sstevel@tonic-gate 			 * Set it to zero; so that
14090Sstevel@tonic-gate 			 * DDI framework doesn't free it again
14100Sstevel@tonic-gate 			 */
14110Sstevel@tonic-gate 			pdp->par_intr = NULL;
14120Sstevel@tonic-gate 			pdp->par_nintr = 0;
14130Sstevel@tonic-gate 		}
14140Sstevel@tonic-gate 		break;
14150Sstevel@tonic-gate 	case DDI_INTROP_GETPRI:
14160Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14170Sstevel@tonic-gate 			return (DDI_FAILURE);
14180Sstevel@tonic-gate 		*(int *)result = ispec->intrspec_pri;
14190Sstevel@tonic-gate 		break;
14200Sstevel@tonic-gate 	case DDI_INTROP_SETPRI:
14210Sstevel@tonic-gate 		/* Validate the interrupt priority passed to us */
14220Sstevel@tonic-gate 		if (*(int *)result > LOCK_LEVEL)
14230Sstevel@tonic-gate 			return (DDI_FAILURE);
14240Sstevel@tonic-gate 
14250Sstevel@tonic-gate 		/* Ensure that PSM is all initialized and ispec is ok */
14260Sstevel@tonic-gate 		if ((psm_intr_ops == NULL) ||
14270Sstevel@tonic-gate 		    ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
14280Sstevel@tonic-gate 			return (DDI_FAILURE);
14290Sstevel@tonic-gate 
14300Sstevel@tonic-gate 		/* Change the priority */
14310Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
14320Sstevel@tonic-gate 		    PSM_FAILURE)
14330Sstevel@tonic-gate 			return (DDI_FAILURE);
14340Sstevel@tonic-gate 
14350Sstevel@tonic-gate 		/* update the ispec with the new priority */
14360Sstevel@tonic-gate 		ispec->intrspec_pri =  *(int *)result;
14370Sstevel@tonic-gate 		break;
14380Sstevel@tonic-gate 	case DDI_INTROP_ADDISR:
14390Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14400Sstevel@tonic-gate 			return (DDI_FAILURE);
14410Sstevel@tonic-gate 		ispec->intrspec_func = hdlp->ih_cb_func;
14420Sstevel@tonic-gate 		break;
14430Sstevel@tonic-gate 	case DDI_INTROP_REMISR:
14440Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14450Sstevel@tonic-gate 			return (DDI_FAILURE);
14460Sstevel@tonic-gate 		ispec->intrspec_func = (uint_t (*)()) 0;
14470Sstevel@tonic-gate 		break;
14480Sstevel@tonic-gate 	case DDI_INTROP_ENABLE:
14490Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14500Sstevel@tonic-gate 			return (DDI_FAILURE);
14510Sstevel@tonic-gate 
14520Sstevel@tonic-gate 		/* Call psmi to translate irq with the dip */
14530Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14540Sstevel@tonic-gate 			return (DDI_FAILURE);
14550Sstevel@tonic-gate 
1456916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1457*11465SKerry.Shu@Sun.COM 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
1458*11465SKerry.Shu@Sun.COM 		    (int *)&hdlp->ih_vector) == PSM_FAILURE)
1459*11465SKerry.Shu@Sun.COM 			return (DDI_FAILURE);
14600Sstevel@tonic-gate 
14610Sstevel@tonic-gate 		/* Add the interrupt handler */
14620Sstevel@tonic-gate 		if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
14630Sstevel@tonic-gate 		    hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1464916Sschwartz 		    hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
14650Sstevel@tonic-gate 			return (DDI_FAILURE);
14660Sstevel@tonic-gate 		break;
14670Sstevel@tonic-gate 	case DDI_INTROP_DISABLE:
14680Sstevel@tonic-gate 		if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
14690Sstevel@tonic-gate 			return (DDI_FAILURE);
14700Sstevel@tonic-gate 
14710Sstevel@tonic-gate 		/* Call psm_ops() to translate irq with the dip */
14720Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14730Sstevel@tonic-gate 			return (DDI_FAILURE);
14740Sstevel@tonic-gate 
1475916Sschwartz 		((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
14760Sstevel@tonic-gate 		(void) (*psm_intr_ops)(rdip, hdlp,
14770Sstevel@tonic-gate 		    PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
14780Sstevel@tonic-gate 
14790Sstevel@tonic-gate 		/* Remove the interrupt handler */
14800Sstevel@tonic-gate 		rem_avintr((void *)hdlp, ispec->intrspec_pri,
14810Sstevel@tonic-gate 		    hdlp->ih_cb_func, hdlp->ih_vector);
14820Sstevel@tonic-gate 		break;
14830Sstevel@tonic-gate 	case DDI_INTROP_SETMASK:
14840Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14850Sstevel@tonic-gate 			return (DDI_FAILURE);
14860Sstevel@tonic-gate 
14870Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
14880Sstevel@tonic-gate 			return (DDI_FAILURE);
14890Sstevel@tonic-gate 		break;
14900Sstevel@tonic-gate 	case DDI_INTROP_CLRMASK:
14910Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14920Sstevel@tonic-gate 			return (DDI_FAILURE);
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
14950Sstevel@tonic-gate 			return (DDI_FAILURE);
14960Sstevel@tonic-gate 		break;
14970Sstevel@tonic-gate 	case DDI_INTROP_GETPENDING:
14980Sstevel@tonic-gate 		if (psm_intr_ops == NULL)
14990Sstevel@tonic-gate 			return (DDI_FAILURE);
15000Sstevel@tonic-gate 
15010Sstevel@tonic-gate 		if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
15020Sstevel@tonic-gate 		    result)) {
15030Sstevel@tonic-gate 			*(int *)result = 0;
15040Sstevel@tonic-gate 			return (DDI_FAILURE);
15050Sstevel@tonic-gate 		}
15060Sstevel@tonic-gate 		break;
15072580Sanish 	case DDI_INTROP_NAVAIL:
15080Sstevel@tonic-gate 	case DDI_INTROP_NINTRS:
15092580Sanish 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
15102580Sanish 		if (*(int *)result == 0) {
15110Sstevel@tonic-gate 			/*
15120Sstevel@tonic-gate 			 * Special case for 'pcic' driver' only. This driver
15130Sstevel@tonic-gate 			 * driver is a child of 'isa' and 'rootnex' drivers.
15140Sstevel@tonic-gate 			 *
15150Sstevel@tonic-gate 			 * See detailed comments on this in the function
15160Sstevel@tonic-gate 			 * rootnex_get_ispec().
15170Sstevel@tonic-gate 			 *
15180Sstevel@tonic-gate 			 * Children of 'pcic' send 'NINITR' request all the
15190Sstevel@tonic-gate 			 * way to rootnex driver. But, the 'pdp->par_nintr'
15200Sstevel@tonic-gate 			 * field may not initialized. So, we fake it here
15210Sstevel@tonic-gate 			 * to return 1 (a la what PCMCIA nexus does).
15220Sstevel@tonic-gate 			 */
15230Sstevel@tonic-gate 			if (strcmp(ddi_get_name(rdip), "pcic") == 0)
15240Sstevel@tonic-gate 				*(int *)result = 1;
15252580Sanish 			else
15262580Sanish 				return (DDI_FAILURE);
15270Sstevel@tonic-gate 		}
15280Sstevel@tonic-gate 		break;
15290Sstevel@tonic-gate 	case DDI_INTROP_SUPPORTED_TYPES:
15302580Sanish 		*(int *)result = DDI_INTR_TYPE_FIXED;	/* Always ... */
15310Sstevel@tonic-gate 		break;
15320Sstevel@tonic-gate 	default:
15330Sstevel@tonic-gate 		return (DDI_FAILURE);
15340Sstevel@tonic-gate 	}
15350Sstevel@tonic-gate 
15360Sstevel@tonic-gate 	return (DDI_SUCCESS);
15370Sstevel@tonic-gate }
15380Sstevel@tonic-gate 
15390Sstevel@tonic-gate 
15400Sstevel@tonic-gate /*
1541509Smrj  * rootnex_get_ispec()
1542509Smrj  *	convert an interrupt number to an interrupt specification.
1543509Smrj  *	The interrupt number determines which interrupt spec will be
1544509Smrj  *	returned if more than one exists.
1545509Smrj  *
1546509Smrj  *	Look into the parent private data area of the 'rdip' to find out
1547509Smrj  *	the interrupt specification.  First check to make sure there is
1548509Smrj  *	one that matchs "inumber" and then return a pointer to it.
1549509Smrj  *
1550509Smrj  *	Return NULL if one could not be found.
1551509Smrj  *
1552509Smrj  *	NOTE: This is needed for rootnex_intr_ops()
1553509Smrj  */
1554509Smrj static struct intrspec *
1555509Smrj rootnex_get_ispec(dev_info_t *rdip, int inum)
1556509Smrj {
1557509Smrj 	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1558509Smrj 
1559509Smrj 	/*
1560509Smrj 	 * Special case handling for drivers that provide their own
1561509Smrj 	 * intrspec structures instead of relying on the DDI framework.
1562509Smrj 	 *
1563509Smrj 	 * A broken hardware driver in ON could potentially provide its
1564509Smrj 	 * own intrspec structure, instead of relying on the hardware.
1565509Smrj 	 * If these drivers are children of 'rootnex' then we need to
1566509Smrj 	 * continue to provide backward compatibility to them here.
1567509Smrj 	 *
1568509Smrj 	 * Following check is a special case for 'pcic' driver which
1569509Smrj 	 * was found to have broken hardwre andby provides its own intrspec.
1570509Smrj 	 *
1571509Smrj 	 * Verbatim comments from this driver are shown here:
1572509Smrj 	 * "Don't use the ddi_add_intr since we don't have a
1573509Smrj 	 * default intrspec in all cases."
1574509Smrj 	 *
1575509Smrj 	 * Since an 'ispec' may not be always created for it,
1576509Smrj 	 * check for that and create one if so.
1577509Smrj 	 *
1578509Smrj 	 * NOTE: Currently 'pcic' is the only driver found to do this.
1579509Smrj 	 */
1580509Smrj 	if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1581509Smrj 		pdp->par_nintr = 1;
1582509Smrj 		pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1583509Smrj 		    pdp->par_nintr, KM_SLEEP);
1584509Smrj 	}
1585509Smrj 
1586509Smrj 	/* Validate the interrupt number */
1587509Smrj 	if (inum >= pdp->par_nintr)
1588509Smrj 		return (NULL);
1589509Smrj 
1590509Smrj 	/* Get the interrupt structure pointer and return that */
1591509Smrj 	return ((struct intrspec *)&pdp->par_intr[inum]);
1592509Smrj }
1593509Smrj 
1594509Smrj 
1595509Smrj /*
1596509Smrj  * ******************
1597509Smrj  *  dma related code
1598509Smrj  * ******************
1599509Smrj  */
1600509Smrj 
1601509Smrj /*ARGSUSED*/
1602509Smrj static int
16037613SVikram.Hegde@Sun.COM rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
16047613SVikram.Hegde@Sun.COM     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
16057613SVikram.Hegde@Sun.COM     ddi_dma_handle_t *handlep)
1606509Smrj {
1607509Smrj 	uint64_t maxsegmentsize_ll;
1608509Smrj 	uint_t maxsegmentsize;
1609509Smrj 	ddi_dma_impl_t *hp;
1610509Smrj 	rootnex_dma_t *dma;
1611509Smrj 	uint64_t count_max;
1612509Smrj 	uint64_t seg;
1613509Smrj 	int kmflag;
1614509Smrj 	int e;
1615509Smrj 
1616509Smrj 
1617509Smrj 	/* convert our sleep flags */
1618509Smrj 	if (waitfp == DDI_DMA_SLEEP) {
1619509Smrj 		kmflag = KM_SLEEP;
1620509Smrj 	} else {
1621509Smrj 		kmflag = KM_NOSLEEP;
1622509Smrj 	}
1623509Smrj 
1624509Smrj 	/*
1625509Smrj 	 * We try to do only one memory allocation here. We'll do a little
1626509Smrj 	 * pointer manipulation later. If the bind ends up taking more than
1627509Smrj 	 * our prealloc's space, we'll have to allocate more memory in the
1628509Smrj 	 * bind operation. Not great, but much better than before and the
1629509Smrj 	 * best we can do with the current bind interfaces.
1630509Smrj 	 */
1631509Smrj 	hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1632509Smrj 	if (hp == NULL) {
1633509Smrj 		if (waitfp != DDI_DMA_DONTWAIT) {
1634509Smrj 			ddi_set_callback(waitfp, arg,
1635509Smrj 			    &rootnex_state->r_dvma_call_list_id);
1636509Smrj 		}
1637509Smrj 		return (DDI_DMA_NORESOURCES);
1638509Smrj 	}
1639509Smrj 
1640509Smrj 	/* Do our pointer manipulation now, align the structures */
1641509Smrj 	hp->dmai_private = (void *)(((uintptr_t)hp +
1642509Smrj 	    (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1643509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1644509Smrj 	dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1645509Smrj 	    sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1646509Smrj 
1647509Smrj 	/* setup the handle */
1648509Smrj 	rootnex_clean_dmahdl(hp);
1649509Smrj 	dma->dp_dip = rdip;
1650509Smrj 	dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1651509Smrj 	dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1652509Smrj 	hp->dmai_minxfer = attr->dma_attr_minxfer;
1653509Smrj 	hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1654509Smrj 	hp->dmai_rdip = rdip;
1655509Smrj 	hp->dmai_attr = *attr;
1656509Smrj 
1657509Smrj 	/* we don't need to worry about the SPL since we do a tryenter */
1658509Smrj 	mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1659509Smrj 
1660509Smrj 	/*
1661509Smrj 	 * Figure out our maximum segment size. If the segment size is greater
1662509Smrj 	 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1663509Smrj 	 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1664509Smrj 	 * dma_attr_count_max are size-1 type values.
1665509Smrj 	 *
1666509Smrj 	 * Maximum segment size is the largest physically contiguous chunk of
1667509Smrj 	 * memory that we can return from a bind (i.e. the maximum size of a
1668509Smrj 	 * single cookie).
1669509Smrj 	 */
1670509Smrj 
1671509Smrj 	/* handle the rollover cases */
1672509Smrj 	seg = attr->dma_attr_seg + 1;
1673509Smrj 	if (seg < attr->dma_attr_seg) {
1674509Smrj 		seg = attr->dma_attr_seg;
1675509Smrj 	}
1676509Smrj 	count_max = attr->dma_attr_count_max + 1;
1677509Smrj 	if (count_max < attr->dma_attr_count_max) {
1678509Smrj 		count_max = attr->dma_attr_count_max;
1679509Smrj 	}
1680509Smrj 
1681509Smrj 	/*
1682509Smrj 	 * granularity may or may not be a power of two. If it isn't, we can't
1683509Smrj 	 * use a simple mask.
1684509Smrj 	 */
1685509Smrj 	if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) {
1686509Smrj 		dma->dp_granularity_power_2 = B_FALSE;
1687509Smrj 	} else {
1688509Smrj 		dma->dp_granularity_power_2 = B_TRUE;
1689509Smrj 	}
1690509Smrj 
1691509Smrj 	/*
1692509Smrj 	 * maxxfer should be a whole multiple of granularity. If we're going to
1693509Smrj 	 * break up a window because we're greater than maxxfer, we might as
1694509Smrj 	 * well make sure it's maxxfer is a whole multiple so we don't have to
1695509Smrj 	 * worry about triming the window later on for this case.
1696509Smrj 	 */
1697509Smrj 	if (attr->dma_attr_granular > 1) {
1698509Smrj 		if (dma->dp_granularity_power_2) {
1699509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1700509Smrj 			    (attr->dma_attr_maxxfer &
1701509Smrj 			    (attr->dma_attr_granular - 1));
1702509Smrj 		} else {
1703509Smrj 			dma->dp_maxxfer = attr->dma_attr_maxxfer -
1704509Smrj 			    (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1705509Smrj 		}
1706509Smrj 	} else {
1707509Smrj 		dma->dp_maxxfer = attr->dma_attr_maxxfer;
1708509Smrj 	}
1709509Smrj 
1710509Smrj 	maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1711509Smrj 	maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1712509Smrj 	if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1713509Smrj 		maxsegmentsize = 0xFFFFFFFF;
1714509Smrj 	} else {
1715509Smrj 		maxsegmentsize = maxsegmentsize_ll;
1716509Smrj 	}
1717509Smrj 	dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1718509Smrj 	dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1719509Smrj 
1720509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1721509Smrj 	if (rootnex_alloc_check_parms) {
1722509Smrj 		e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1723509Smrj 		if (e != DDI_SUCCESS) {
1724509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1725509Smrj 			(void) rootnex_dma_freehdl(dip, rdip,
1726509Smrj 			    (ddi_dma_handle_t)hp);
1727509Smrj 			return (e);
1728509Smrj 		}
1729509Smrj 	}
1730509Smrj 
1731509Smrj 	*handlep = (ddi_dma_handle_t)hp;
1732509Smrj 
173310902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
173410902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t,
1735509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1736509Smrj 
1737509Smrj 	return (DDI_SUCCESS);
1738509Smrj }
1739509Smrj 
1740509Smrj 
1741509Smrj /*
17427613SVikram.Hegde@Sun.COM  * rootnex_dma_allochdl()
17437613SVikram.Hegde@Sun.COM  *    called from ddi_dma_alloc_handle().
1744509Smrj  */
17457613SVikram.Hegde@Sun.COM static int
17467613SVikram.Hegde@Sun.COM rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
17477613SVikram.Hegde@Sun.COM     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
17487613SVikram.Hegde@Sun.COM {
17497613SVikram.Hegde@Sun.COM #if !defined(__xpv)
17507613SVikram.Hegde@Sun.COM 	uint_t error = ENOTSUP;
17517613SVikram.Hegde@Sun.COM 	int retval;
17527613SVikram.Hegde@Sun.COM 
17537613SVikram.Hegde@Sun.COM 	retval = iommulib_nex_open(rdip, &error);
17547613SVikram.Hegde@Sun.COM 
17557613SVikram.Hegde@Sun.COM 	if (retval != DDI_SUCCESS && error == ENOTSUP) {
17567613SVikram.Hegde@Sun.COM 		/* No IOMMU */
17577613SVikram.Hegde@Sun.COM 		return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
17587613SVikram.Hegde@Sun.COM 		    handlep));
17597613SVikram.Hegde@Sun.COM 	} else if (retval != DDI_SUCCESS) {
17607613SVikram.Hegde@Sun.COM 		return (DDI_FAILURE);
17617613SVikram.Hegde@Sun.COM 	}
17627613SVikram.Hegde@Sun.COM 
176310216SVikram.Hegde@Sun.COM 	ASSERT(IOMMU_USED(rdip));
17647613SVikram.Hegde@Sun.COM 
17657613SVikram.Hegde@Sun.COM 	/* has an IOMMU */
17667613SVikram.Hegde@Sun.COM 	return (iommulib_nexdma_allochdl(dip, rdip, attr,
17677613SVikram.Hegde@Sun.COM 	    waitfp, arg, handlep));
17687613SVikram.Hegde@Sun.COM #else
17697613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
17707613SVikram.Hegde@Sun.COM 	    handlep));
17717613SVikram.Hegde@Sun.COM #endif
17727613SVikram.Hegde@Sun.COM }
17737613SVikram.Hegde@Sun.COM 
1774509Smrj /*ARGSUSED*/
1775509Smrj static int
17767613SVikram.Hegde@Sun.COM rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
17777613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
1778509Smrj {
1779509Smrj 	ddi_dma_impl_t *hp;
1780509Smrj 	rootnex_dma_t *dma;
1781509Smrj 
1782509Smrj 
1783509Smrj 	hp = (ddi_dma_impl_t *)handle;
1784509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1785509Smrj 
1786509Smrj 	/* unbind should have been called first */
1787509Smrj 	ASSERT(!dma->dp_inuse);
1788509Smrj 
1789509Smrj 	mutex_destroy(&dma->dp_mutex);
1790509Smrj 	kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1791509Smrj 
179210902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
179310902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t,
1794509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1795509Smrj 
1796509Smrj 	if (rootnex_state->r_dvma_call_list_id)
1797509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1798509Smrj 
1799509Smrj 	return (DDI_SUCCESS);
1800509Smrj }
1801509Smrj 
1802509Smrj /*
18037613SVikram.Hegde@Sun.COM  * rootnex_dma_freehdl()
18047613SVikram.Hegde@Sun.COM  *    called from ddi_dma_free_handle().
1805509Smrj  */
18067613SVikram.Hegde@Sun.COM static int
18077613SVikram.Hegde@Sun.COM rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
18087613SVikram.Hegde@Sun.COM {
18097613SVikram.Hegde@Sun.COM #if !defined(__xpv)
181010216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
18117613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_freehdl(dip, rdip, handle));
18127613SVikram.Hegde@Sun.COM 	}
18137613SVikram.Hegde@Sun.COM #endif
18147613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_freehdl(dip, rdip, handle));
18157613SVikram.Hegde@Sun.COM }
18167613SVikram.Hegde@Sun.COM 
18177613SVikram.Hegde@Sun.COM 
1818509Smrj /*ARGSUSED*/
1819509Smrj static int
18207613SVikram.Hegde@Sun.COM rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
18217613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
18227613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
18230Sstevel@tonic-gate {
1824509Smrj 	rootnex_sglinfo_t *sinfo;
1825509Smrj 	ddi_dma_attr_t *attr;
1826509Smrj 	ddi_dma_impl_t *hp;
1827509Smrj 	rootnex_dma_t *dma;
1828509Smrj 	int kmflag;
1829509Smrj 	int e;
1830509Smrj 
1831509Smrj 
1832509Smrj 	hp = (ddi_dma_impl_t *)handle;
1833509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
1834509Smrj 	sinfo = &dma->dp_sglinfo;
1835509Smrj 	attr = &hp->dmai_attr;
1836509Smrj 
18378215SVikram.Hegde@Sun.COM 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
18388215SVikram.Hegde@Sun.COM 		dma->dp_sleep_flags = KM_SLEEP;
18398215SVikram.Hegde@Sun.COM 	} else {
18408215SVikram.Hegde@Sun.COM 		dma->dp_sleep_flags = KM_NOSLEEP;
18418215SVikram.Hegde@Sun.COM 	}
18428215SVikram.Hegde@Sun.COM 
1843509Smrj 	hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1844509Smrj 
1845509Smrj 	/*
1846509Smrj 	 * This is useful for debugging a driver. Not as useful in a production
1847509Smrj 	 * system. The only time this will fail is if you have a driver bug.
1848509Smrj 	 */
1849509Smrj 	if (rootnex_bind_check_inuse) {
1850509Smrj 		/*
1851509Smrj 		 * No one else should ever have this lock unless someone else
1852509Smrj 		 * is trying to use this handle. So contention on the lock
1853509Smrj 		 * is the same as inuse being set.
1854509Smrj 		 */
1855509Smrj 		e = mutex_tryenter(&dma->dp_mutex);
1856509Smrj 		if (e == 0) {
1857509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1858509Smrj 			return (DDI_DMA_INUSE);
1859509Smrj 		}
1860509Smrj 		if (dma->dp_inuse) {
1861509Smrj 			mutex_exit(&dma->dp_mutex);
1862509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1863509Smrj 			return (DDI_DMA_INUSE);
1864509Smrj 		}
1865509Smrj 		dma->dp_inuse = B_TRUE;
1866509Smrj 		mutex_exit(&dma->dp_mutex);
1867509Smrj 	}
1868509Smrj 
1869509Smrj 	/* check the ddi_dma_attr arg to make sure it makes a little sense */
1870509Smrj 	if (rootnex_bind_check_parms) {
1871509Smrj 		e = rootnex_valid_bind_parms(dmareq, attr);
1872509Smrj 		if (e != DDI_SUCCESS) {
1873509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1874509Smrj 			rootnex_clean_dmahdl(hp);
1875509Smrj 			return (e);
1876509Smrj 		}
1877509Smrj 	}
1878509Smrj 
1879509Smrj 	/* save away the original bind info */
1880509Smrj 	dma->dp_dma = dmareq->dmar_object;
1881509Smrj 
18827613SVikram.Hegde@Sun.COM #if !defined(__xpv)
18837589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
18847589SVikram.Hegde@Sun.COM 		e = intel_iommu_map_sgl(handle, dmareq,
18857589SVikram.Hegde@Sun.COM 		    rootnex_state->r_prealloc_cookies);
18867589SVikram.Hegde@Sun.COM 
18877589SVikram.Hegde@Sun.COM 		switch (e) {
18887589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_SUCCESS:
18897589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_end;
18907589SVikram.Hegde@Sun.COM 
18917589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_DISABLE:
18927589SVikram.Hegde@Sun.COM 			goto rootnex_sgl_start;
18937589SVikram.Hegde@Sun.COM 
18947589SVikram.Hegde@Sun.COM 		case IOMMU_SGL_NORESOURCES:
18957589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN, "iommu map sgl failed for %s",
18967589SVikram.Hegde@Sun.COM 			    ddi_node_name(dma->dp_dip));
18977589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
18987589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
18997589SVikram.Hegde@Sun.COM 
19007589SVikram.Hegde@Sun.COM 		default:
19017589SVikram.Hegde@Sun.COM 			cmn_err(CE_WARN,
19027589SVikram.Hegde@Sun.COM 			    "undefined value returned from"
19037589SVikram.Hegde@Sun.COM 			    " intel_iommu_map_sgl: %d",
19047589SVikram.Hegde@Sun.COM 			    e);
19057589SVikram.Hegde@Sun.COM 			rootnex_clean_dmahdl(hp);
19067589SVikram.Hegde@Sun.COM 			return (DDI_DMA_NORESOURCES);
19077589SVikram.Hegde@Sun.COM 		}
19087589SVikram.Hegde@Sun.COM 	}
19097613SVikram.Hegde@Sun.COM #endif
19107589SVikram.Hegde@Sun.COM 
19117589SVikram.Hegde@Sun.COM rootnex_sgl_start:
1912509Smrj 	/*
1913509Smrj 	 * Figure out a rough estimate of what maximum number of pages this
1914509Smrj 	 * buffer could use (a high estimate of course).
1915509Smrj 	 */
1916509Smrj 	sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
1917509Smrj 
1918509Smrj 	/*
1919509Smrj 	 * We'll use the pre-allocated cookies for any bind that will *always*
1920509Smrj 	 * fit (more important to be consistent, we don't want to create
1921509Smrj 	 * additional degenerate cases).
1922509Smrj 	 */
1923509Smrj 	if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
1924509Smrj 		dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
1925509Smrj 		dma->dp_need_to_free_cookie = B_FALSE;
1926509Smrj 		DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
1927509Smrj 		    uint_t, sinfo->si_max_pages);
1928509Smrj 
1929509Smrj 	/*
1930509Smrj 	 * For anything larger than that, we'll go ahead and allocate the
1931509Smrj 	 * maximum number of pages we expect to see. Hopefuly, we won't be
1932509Smrj 	 * seeing this path in the fast path for high performance devices very
1933509Smrj 	 * frequently.
1934509Smrj 	 *
1935509Smrj 	 * a ddi bind interface that allowed the driver to provide storage to
1936509Smrj 	 * the bind interface would speed this case up.
1937509Smrj 	 */
1938509Smrj 	} else {
1939509Smrj 		/* convert the sleep flags */
1940509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1941509Smrj 			kmflag =  KM_SLEEP;
1942509Smrj 		} else {
1943509Smrj 			kmflag =  KM_NOSLEEP;
1944509Smrj 		}
1945509Smrj 
1946509Smrj 		/*
1947509Smrj 		 * Save away how much memory we allocated. If we're doing a
1948509Smrj 		 * nosleep, the alloc could fail...
1949509Smrj 		 */
1950509Smrj 		dma->dp_cookie_size = sinfo->si_max_pages *
1951509Smrj 		    sizeof (ddi_dma_cookie_t);
1952509Smrj 		dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
1953509Smrj 		if (dma->dp_cookies == NULL) {
1954509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
1955509Smrj 			rootnex_clean_dmahdl(hp);
1956509Smrj 			return (DDI_DMA_NORESOURCES);
1957509Smrj 		}
1958509Smrj 		dma->dp_need_to_free_cookie = B_TRUE;
1959509Smrj 		DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t,
1960509Smrj 		    sinfo->si_max_pages);
1961509Smrj 	}
1962509Smrj 	hp->dmai_cookie = dma->dp_cookies;
1963509Smrj 
1964509Smrj 	/*
1965509Smrj 	 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
1966509Smrj 	 * looking at the contraints in the dma structure. It will then put some
1967509Smrj 	 * additional state about the sgl in the dma struct (i.e. is the sgl
1968509Smrj 	 * clean, or do we need to do some munging; how many pages need to be
1969509Smrj 	 * copied, etc.)
1970509Smrj 	 */
1971509Smrj 	rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies,
1972509Smrj 	    &dma->dp_sglinfo);
19737589SVikram.Hegde@Sun.COM 
19747589SVikram.Hegde@Sun.COM rootnex_sgl_end:
1975509Smrj 	ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
1976509Smrj 	/* if we don't need a copy buffer, we don't need to sync */
1977509Smrj 	if (sinfo->si_copybuf_req == 0) {
1978509Smrj 		hp->dmai_rflags |= DMP_NOSYNC;
1979509Smrj 	}
1980509Smrj 
1981509Smrj 	/*
1982509Smrj 	 * if we don't need the copybuf and we don't need to do a partial,  we
1983509Smrj 	 * hit the fast path. All the high performance devices should be trying
1984509Smrj 	 * to hit this path. To hit this path, a device should be able to reach
1985509Smrj 	 * all of memory, shouldn't try to bind more than it can transfer, and
1986509Smrj 	 * the buffer shouldn't require more cookies than the driver/device can
1987509Smrj 	 * handle [sgllen]).
1988509Smrj 	 */
1989509Smrj 	if ((sinfo->si_copybuf_req == 0) &&
1990509Smrj 	    (sinfo->si_sgl_size <= attr->dma_attr_sgllen) &&
1991509Smrj 	    (dma->dp_dma.dmao_size < dma->dp_maxxfer)) {
1992509Smrj 		/*
19935591Sstephh 		 * If the driver supports FMA, insert the handle in the FMA DMA
19945591Sstephh 		 * handle cache.
19955591Sstephh 		 */
19965591Sstephh 		if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
19975591Sstephh 			hp->dmai_error.err_cf = rootnex_dma_check;
19985591Sstephh 			(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
19995591Sstephh 		}
20005591Sstephh 
20015591Sstephh 		/*
2002509Smrj 		 * copy out the first cookie and ccountp, set the cookie
2003509Smrj 		 * pointer to the second cookie. The first cookie is passed
2004509Smrj 		 * back on the stack. Additional cookies are accessed via
2005509Smrj 		 * ddi_dma_nextcookie()
2006509Smrj 		 */
2007509Smrj 		*cookiep = dma->dp_cookies[0];
2008509Smrj 		*ccountp = sinfo->si_sgl_size;
2009509Smrj 		hp->dmai_cookie++;
2010509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2011509Smrj 		hp->dmai_nwin = 1;
201210902SMark.Johnson@Sun.COM 		ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
201310902SMark.Johnson@Sun.COM 		ROOTNEX_DPROBE3(rootnex__bind__fast, dev_info_t *, rdip,
201410902SMark.Johnson@Sun.COM 		    uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2015509Smrj 		    dma->dp_dma.dmao_size);
2016509Smrj 		return (DDI_DMA_MAPPED);
2017509Smrj 	}
2018509Smrj 
2019509Smrj 	/*
2020509Smrj 	 * go to the slow path, we may need to alloc more memory, create
2021509Smrj 	 * multiple windows, and munge up a sgl to make the device happy.
2022509Smrj 	 */
2023509Smrj 	e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag);
2024509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2025509Smrj 		if (dma->dp_need_to_free_cookie) {
2026509Smrj 			kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2027509Smrj 		}
2028509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2029509Smrj 		rootnex_clean_dmahdl(hp); /* must be after free cookie */
2030509Smrj 		return (e);
2031509Smrj 	}
2032509Smrj 
20335591Sstephh 	/*
20345591Sstephh 	 * If the driver supports FMA, insert the handle in the FMA DMA handle
20355591Sstephh 	 * cache.
20365591Sstephh 	 */
20375591Sstephh 	if (attr->dma_attr_flags & DDI_DMA_FLAGERR) {
20385591Sstephh 		hp->dmai_error.err_cf = rootnex_dma_check;
20395591Sstephh 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL);
20405591Sstephh 	}
20415591Sstephh 
2042509Smrj 	/* if the first window uses the copy buffer, sync it for the device */
2043509Smrj 	if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2044509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
20458215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2046509Smrj 		    DDI_DMA_SYNC_FORDEV);
2047509Smrj 	}
2048509Smrj 
2049509Smrj 	/*
2050509Smrj 	 * copy out the first cookie and ccountp, set the cookie pointer to the
2051509Smrj 	 * second cookie. Make sure the partial flag is set/cleared correctly.
2052509Smrj 	 * If we have a partial map (i.e. multiple windows), the number of
2053509Smrj 	 * cookies we return is the number of cookies in the first window.
2054509Smrj 	 */
2055509Smrj 	if (e == DDI_DMA_MAPPED) {
2056509Smrj 		hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2057509Smrj 		*ccountp = sinfo->si_sgl_size;
2058509Smrj 	} else {
2059509Smrj 		hp->dmai_rflags |= DDI_DMA_PARTIAL;
2060509Smrj 		*ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2061509Smrj 		ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2062509Smrj 	}
2063509Smrj 	*cookiep = dma->dp_cookies[0];
2064509Smrj 	hp->dmai_cookie++;
2065509Smrj 
206610902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
206710902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2068509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2069509Smrj 	    dma->dp_dma.dmao_size);
2070509Smrj 	return (e);
2071509Smrj }
2072509Smrj 
2073509Smrj 
2074509Smrj /*
20757613SVikram.Hegde@Sun.COM  * rootnex_dma_bindhdl()
20767613SVikram.Hegde@Sun.COM  *    called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2077509Smrj  */
20787613SVikram.Hegde@Sun.COM static int
20797613SVikram.Hegde@Sun.COM rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
20807613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
20817613SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
20827613SVikram.Hegde@Sun.COM {
20837613SVikram.Hegde@Sun.COM #if !defined(__xpv)
208410216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
20857613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
20867613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
20877613SVikram.Hegde@Sun.COM 	}
20887613SVikram.Hegde@Sun.COM #endif
20897613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
20907613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
20917613SVikram.Hegde@Sun.COM }
20927613SVikram.Hegde@Sun.COM 
2093509Smrj /*ARGSUSED*/
2094509Smrj static int
20957613SVikram.Hegde@Sun.COM rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2096509Smrj     ddi_dma_handle_t handle)
2097509Smrj {
2098509Smrj 	ddi_dma_impl_t *hp;
2099509Smrj 	rootnex_dma_t *dma;
2100509Smrj 	int e;
2101509Smrj 
2102509Smrj 	hp = (ddi_dma_impl_t *)handle;
2103509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2104509Smrj 
2105509Smrj 	/* make sure the buffer wasn't free'd before calling unbind */
2106509Smrj 	if (rootnex_unbind_verify_buffer) {
2107509Smrj 		e = rootnex_verify_buffer(dma);
2108509Smrj 		if (e != DDI_SUCCESS) {
2109509Smrj 			ASSERT(0);
2110509Smrj 			return (DDI_FAILURE);
2111509Smrj 		}
2112509Smrj 	}
2113509Smrj 
2114509Smrj 	/* sync the current window before unbinding the buffer */
2115509Smrj 	if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2116509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
21178215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2118509Smrj 		    DDI_DMA_SYNC_FORCPU);
2119509Smrj 	}
2120509Smrj 
2121509Smrj 	/*
21221865Sdilpreet 	 * If the driver supports FMA, remove the handle in the FMA DMA handle
21231865Sdilpreet 	 * cache.
21241865Sdilpreet 	 */
21251865Sdilpreet 	if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
21261865Sdilpreet 		if ((DEVI(rdip)->devi_fmhdl != NULL) &&
21271865Sdilpreet 		    (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) {
21281865Sdilpreet 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, hp);
21291865Sdilpreet 		}
21301865Sdilpreet 	}
21311865Sdilpreet 
21321865Sdilpreet 	/*
2133509Smrj 	 * cleanup and copy buffer or window state. if we didn't use the copy
2134509Smrj 	 * buffer or windows, there won't be much to do :-)
2135509Smrj 	 */
2136509Smrj 	rootnex_teardown_copybuf(dma);
2137509Smrj 	rootnex_teardown_windows(dma);
2138509Smrj 
21397613SVikram.Hegde@Sun.COM #if !defined(__xpv)
2140509Smrj 	/*
21417589SVikram.Hegde@Sun.COM 	 * If intel iommu enabled, clean up the page tables and free the dvma
21427589SVikram.Hegde@Sun.COM 	 */
21437589SVikram.Hegde@Sun.COM 	if (rootnex_state->r_intel_iommu_enabled) {
21447589SVikram.Hegde@Sun.COM 		intel_iommu_unmap_sgl(handle);
21457589SVikram.Hegde@Sun.COM 	}
21467613SVikram.Hegde@Sun.COM #endif
21477589SVikram.Hegde@Sun.COM 
21487589SVikram.Hegde@Sun.COM 	/*
2149509Smrj 	 * If we had to allocate space to for the worse case sgl (it didn't
2150509Smrj 	 * fit into our pre-allocate buffer), free that up now
2151509Smrj 	 */
2152509Smrj 	if (dma->dp_need_to_free_cookie) {
2153509Smrj 		kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2154509Smrj 	}
2155509Smrj 
2156509Smrj 	/*
2157509Smrj 	 * clean up the handle so it's ready for the next bind (i.e. if the
2158509Smrj 	 * handle is reused).
2159509Smrj 	 */
2160509Smrj 	rootnex_clean_dmahdl(hp);
2161509Smrj 
2162509Smrj 	if (rootnex_state->r_dvma_call_list_id)
2163509Smrj 		ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2164509Smrj 
216510902SMark.Johnson@Sun.COM 	ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
216610902SMark.Johnson@Sun.COM 	ROOTNEX_DPROBE1(rootnex__unbind, uint64_t,
2167509Smrj 	    rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2168509Smrj 
2169509Smrj 	return (DDI_SUCCESS);
2170509Smrj }
2171509Smrj 
21727613SVikram.Hegde@Sun.COM /*
21737613SVikram.Hegde@Sun.COM  * rootnex_dma_unbindhdl()
21747613SVikram.Hegde@Sun.COM  *    called from ddi_dma_unbind_handle()
21757613SVikram.Hegde@Sun.COM  */
21767613SVikram.Hegde@Sun.COM /*ARGSUSED*/
21777613SVikram.Hegde@Sun.COM static int
21787613SVikram.Hegde@Sun.COM rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
21797613SVikram.Hegde@Sun.COM     ddi_dma_handle_t handle)
21807613SVikram.Hegde@Sun.COM {
21817613SVikram.Hegde@Sun.COM #if !defined(__xpv)
218210216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
21837613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_unbindhdl(dip, rdip, handle));
21847613SVikram.Hegde@Sun.COM 	}
21857613SVikram.Hegde@Sun.COM #endif
21867613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_unbindhdl(dip, rdip, handle));
21877613SVikram.Hegde@Sun.COM }
21887613SVikram.Hegde@Sun.COM 
21897617SVikram.Hegde@Sun.COM #if !defined(__xpv)
21908215SVikram.Hegde@Sun.COM 
21918215SVikram.Hegde@Sun.COM static int
21928215SVikram.Hegde@Sun.COM rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle)
21938215SVikram.Hegde@Sun.COM {
21948215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
21958215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
21968215SVikram.Hegde@Sun.COM 
21978215SVikram.Hegde@Sun.COM 	if (dma->dp_sleep_flags != KM_SLEEP &&
21988215SVikram.Hegde@Sun.COM 	    dma->dp_sleep_flags != KM_NOSLEEP)
21998215SVikram.Hegde@Sun.COM 		cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle");
22008215SVikram.Hegde@Sun.COM 	return (dma->dp_sleep_flags);
22018215SVikram.Hegde@Sun.COM }
22027613SVikram.Hegde@Sun.COM /*ARGSUSED*/
22037613SVikram.Hegde@Sun.COM static void
22047613SVikram.Hegde@Sun.COM rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
22057613SVikram.Hegde@Sun.COM {
22067613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
22077613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
22088215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
22098215SVikram.Hegde@Sun.COM 
22108215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
22118215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
22128215SVikram.Hegde@Sun.COM 		hp->dmai_cookie = window->wd_first_cookie;
22138215SVikram.Hegde@Sun.COM 	} else {
22148215SVikram.Hegde@Sun.COM 		hp->dmai_cookie = dma->dp_cookies;
22158215SVikram.Hegde@Sun.COM 	}
22167613SVikram.Hegde@Sun.COM 	hp->dmai_cookie++;
22177613SVikram.Hegde@Sun.COM }
22187613SVikram.Hegde@Sun.COM 
22197613SVikram.Hegde@Sun.COM /*ARGSUSED*/
22207613SVikram.Hegde@Sun.COM static int
22217613SVikram.Hegde@Sun.COM rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
22228215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
22238215SVikram.Hegde@Sun.COM {
22248215SVikram.Hegde@Sun.COM 	int i;
22258215SVikram.Hegde@Sun.COM 	int km_flags;
22268215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
22278215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
22288215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
22298215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cp;
22308215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cookie;
22318215SVikram.Hegde@Sun.COM 
22328215SVikram.Hegde@Sun.COM 	ASSERT(*cookiepp == NULL);
22338215SVikram.Hegde@Sun.COM 	ASSERT(*ccountp == 0);
22348215SVikram.Hegde@Sun.COM 
22358215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
22368215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
22378215SVikram.Hegde@Sun.COM 		cp = window->wd_first_cookie;
22388215SVikram.Hegde@Sun.COM 		*ccountp = window->wd_cookie_cnt;
22398215SVikram.Hegde@Sun.COM 	} else {
22408215SVikram.Hegde@Sun.COM 		cp = dma->dp_cookies;
22418215SVikram.Hegde@Sun.COM 		*ccountp = dma->dp_sglinfo.si_sgl_size;
22428215SVikram.Hegde@Sun.COM 	}
22438215SVikram.Hegde@Sun.COM 
22448215SVikram.Hegde@Sun.COM 	km_flags = rootnex_coredma_get_sleep_flags(handle);
22458215SVikram.Hegde@Sun.COM 	cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags);
22468215SVikram.Hegde@Sun.COM 	if (cookie == NULL) {
22478215SVikram.Hegde@Sun.COM 		return (DDI_DMA_NORESOURCES);
22488215SVikram.Hegde@Sun.COM 	}
22498215SVikram.Hegde@Sun.COM 
22508215SVikram.Hegde@Sun.COM 	for (i = 0; i < *ccountp; i++) {
22518215SVikram.Hegde@Sun.COM 		cookie[i].dmac_notused = cp[i].dmac_notused;
22528215SVikram.Hegde@Sun.COM 		cookie[i].dmac_type = cp[i].dmac_type;
22538215SVikram.Hegde@Sun.COM 		cookie[i].dmac_address = cp[i].dmac_address;
22548215SVikram.Hegde@Sun.COM 		cookie[i].dmac_size = cp[i].dmac_size;
22558215SVikram.Hegde@Sun.COM 	}
22568215SVikram.Hegde@Sun.COM 
22578215SVikram.Hegde@Sun.COM 	*cookiepp = cookie;
22588215SVikram.Hegde@Sun.COM 
22598215SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
22608215SVikram.Hegde@Sun.COM }
22618215SVikram.Hegde@Sun.COM 
22628215SVikram.Hegde@Sun.COM /*ARGSUSED*/
22638215SVikram.Hegde@Sun.COM static int
22648215SVikram.Hegde@Sun.COM rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
22658215SVikram.Hegde@Sun.COM     ddi_dma_cookie_t *cookiep, uint_t ccount)
22667613SVikram.Hegde@Sun.COM {
22677613SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
22687613SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
22698215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
22708215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cur_cookiep;
22718215SVikram.Hegde@Sun.COM 
22728215SVikram.Hegde@Sun.COM 	ASSERT(cookiep);
22738215SVikram.Hegde@Sun.COM 	ASSERT(ccount != 0);
22748215SVikram.Hegde@Sun.COM 	ASSERT(dma->dp_need_to_switch_cookies == B_FALSE);
22758215SVikram.Hegde@Sun.COM 
22768215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
22778215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
22788215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = window->wd_first_cookie;
22798215SVikram.Hegde@Sun.COM 		window->wd_first_cookie = cookiep;
22808215SVikram.Hegde@Sun.COM 		ASSERT(ccount == window->wd_cookie_cnt);
22818215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
22828215SVikram.Hegde@Sun.COM 		    + window->wd_first_cookie;
22837613SVikram.Hegde@Sun.COM 	} else {
22848215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = dma->dp_cookies;
22858215SVikram.Hegde@Sun.COM 		dma->dp_cookies = cookiep;
22868215SVikram.Hegde@Sun.COM 		ASSERT(ccount == dma->dp_sglinfo.si_sgl_size);
22878215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
22888215SVikram.Hegde@Sun.COM 		    + dma->dp_cookies;
22897613SVikram.Hegde@Sun.COM 	}
22908215SVikram.Hegde@Sun.COM 
22918215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_TRUE;
22928215SVikram.Hegde@Sun.COM 	hp->dmai_cookie = cur_cookiep;
22937613SVikram.Hegde@Sun.COM 
22947613SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
22957613SVikram.Hegde@Sun.COM }
22968215SVikram.Hegde@Sun.COM 
22978215SVikram.Hegde@Sun.COM /*ARGSUSED*/
22988215SVikram.Hegde@Sun.COM static int
22998215SVikram.Hegde@Sun.COM rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
23008215SVikram.Hegde@Sun.COM {
23018215SVikram.Hegde@Sun.COM 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
23028215SVikram.Hegde@Sun.COM 	rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
23038215SVikram.Hegde@Sun.COM 	rootnex_window_t *window;
23048215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cur_cookiep;
23058215SVikram.Hegde@Sun.COM 	ddi_dma_cookie_t *cookie_array;
23068215SVikram.Hegde@Sun.COM 	uint_t ccount;
23078215SVikram.Hegde@Sun.COM 
23088215SVikram.Hegde@Sun.COM 	/* check if cookies have not been switched */
23098215SVikram.Hegde@Sun.COM 	if (dma->dp_need_to_switch_cookies == B_FALSE)
23108215SVikram.Hegde@Sun.COM 		return (DDI_SUCCESS);
23118215SVikram.Hegde@Sun.COM 
23128215SVikram.Hegde@Sun.COM 	ASSERT(dma->dp_saved_cookies);
23138215SVikram.Hegde@Sun.COM 
23148215SVikram.Hegde@Sun.COM 	if (dma->dp_window) {
23158215SVikram.Hegde@Sun.COM 		window = &dma->dp_window[dma->dp_current_win];
23168215SVikram.Hegde@Sun.COM 		cookie_array = window->wd_first_cookie;
23178215SVikram.Hegde@Sun.COM 		window->wd_first_cookie = dma->dp_saved_cookies;
23188215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = NULL;
23198215SVikram.Hegde@Sun.COM 		ccount = window->wd_cookie_cnt;
23208215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - cookie_array)
23218215SVikram.Hegde@Sun.COM 		    + window->wd_first_cookie;
23228215SVikram.Hegde@Sun.COM 	} else {
23238215SVikram.Hegde@Sun.COM 		cookie_array = dma->dp_cookies;
23248215SVikram.Hegde@Sun.COM 		dma->dp_cookies = dma->dp_saved_cookies;
23258215SVikram.Hegde@Sun.COM 		dma->dp_saved_cookies = NULL;
23268215SVikram.Hegde@Sun.COM 		ccount = dma->dp_sglinfo.si_sgl_size;
23278215SVikram.Hegde@Sun.COM 		cur_cookiep = (hp->dmai_cookie - cookie_array)
23288215SVikram.Hegde@Sun.COM 		    + dma->dp_cookies;
23298215SVikram.Hegde@Sun.COM 	}
23308215SVikram.Hegde@Sun.COM 
23318215SVikram.Hegde@Sun.COM 	kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
23328215SVikram.Hegde@Sun.COM 
23338215SVikram.Hegde@Sun.COM 	hp->dmai_cookie = cur_cookiep;
23348215SVikram.Hegde@Sun.COM 
23358215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_FALSE;
23368215SVikram.Hegde@Sun.COM 
23378215SVikram.Hegde@Sun.COM 	return (DDI_SUCCESS);
23388215SVikram.Hegde@Sun.COM }
23398215SVikram.Hegde@Sun.COM 
23407617SVikram.Hegde@Sun.COM #endif
2341509Smrj 
2342509Smrj /*
2343509Smrj  * rootnex_verify_buffer()
2344509Smrj  *   verify buffer wasn't free'd
2345509Smrj  */
2346509Smrj static int
2347509Smrj rootnex_verify_buffer(rootnex_dma_t *dma)
2348509Smrj {
2349509Smrj 	page_t **pplist;
2350509Smrj 	caddr_t vaddr;
2351509Smrj 	uint_t pcnt;
2352509Smrj 	uint_t poff;
2353509Smrj 	page_t *pp;
23541865Sdilpreet 	char b;
2355509Smrj 	int i;
2356509Smrj 
2357509Smrj 	/* Figure out how many pages this buffer occupies */
2358509Smrj 	if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2359509Smrj 		poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2360509Smrj 	} else {
2361509Smrj 		vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2362509Smrj 		poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2363509Smrj 	}
2364509Smrj 	pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2365509Smrj 
2366509Smrj 	switch (dma->dp_dma.dmao_type) {
23670Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
2368509Smrj 		/*
2369509Smrj 		 * for a linked list of pp's walk through them to make sure
2370509Smrj 		 * they're locked and not free.
2371509Smrj 		 */
2372509Smrj 		pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2373509Smrj 		for (i = 0; i < pcnt; i++) {
2374509Smrj 			if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2375509Smrj 				return (DDI_FAILURE);
23760Sstevel@tonic-gate 			}
2377509Smrj 			pp = pp->p_next;
23780Sstevel@tonic-gate 		}
23790Sstevel@tonic-gate 		break;
2380509Smrj 
23810Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
23820Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
2383509Smrj 		pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2384509Smrj 		/*
2385509Smrj 		 * for an array of pp's walk through them to make sure they're
2386509Smrj 		 * not free. It's possible that they may not be locked.
2387509Smrj 		 */
2388509Smrj 		if (pplist) {
2389509Smrj 			for (i = 0; i < pcnt; i++) {
2390509Smrj 				if (PP_ISFREE(pplist[i])) {
2391509Smrj 					return (DDI_FAILURE);
2392509Smrj 				}
2393509Smrj 			}
2394509Smrj 
2395509Smrj 		/* For a virtual address, try to peek at each page */
2396509Smrj 		} else {
2397509Smrj 			if (dma->dp_sglinfo.si_asp == &kas) {
2398509Smrj 				for (i = 0; i < pcnt; i++) {
23991865Sdilpreet 					if (ddi_peek8(NULL, vaddr, &b) ==
24001865Sdilpreet 					    DDI_FAILURE)
2401509Smrj 						return (DDI_FAILURE);
24021865Sdilpreet 					vaddr += MMU_PAGESIZE;
2403509Smrj 				}
2404509Smrj 			}
2405509Smrj 		}
2406509Smrj 		break;
2407509Smrj 
2408509Smrj 	default:
2409509Smrj 		ASSERT(0);
2410509Smrj 		break;
2411509Smrj 	}
2412509Smrj 
2413509Smrj 	return (DDI_SUCCESS);
2414509Smrj }
2415509Smrj 
2416509Smrj 
2417509Smrj /*
2418509Smrj  * rootnex_clean_dmahdl()
2419509Smrj  *    Clean the dma handle. This should be called on a handle alloc and an
2420509Smrj  *    unbind handle. Set the handle state to the default settings.
2421509Smrj  */
2422509Smrj static void
2423509Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2424509Smrj {
2425509Smrj 	rootnex_dma_t *dma;
2426509Smrj 
2427509Smrj 
2428509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
2429509Smrj 
2430509Smrj 	hp->dmai_nwin = 0;
2431509Smrj 	dma->dp_current_cookie = 0;
2432509Smrj 	dma->dp_copybuf_size = 0;
2433509Smrj 	dma->dp_window = NULL;
2434509Smrj 	dma->dp_cbaddr = NULL;
2435509Smrj 	dma->dp_inuse = B_FALSE;
2436509Smrj 	dma->dp_need_to_free_cookie = B_FALSE;
24378215SVikram.Hegde@Sun.COM 	dma->dp_need_to_switch_cookies = B_FALSE;
24388215SVikram.Hegde@Sun.COM 	dma->dp_saved_cookies = NULL;
24398215SVikram.Hegde@Sun.COM 	dma->dp_sleep_flags = KM_PANIC;
2440509Smrj 	dma->dp_need_to_free_window = B_FALSE;
2441509Smrj 	dma->dp_partial_required = B_FALSE;
2442509Smrj 	dma->dp_trim_required = B_FALSE;
2443509Smrj 	dma->dp_sglinfo.si_copybuf_req = 0;
2444509Smrj #if !defined(__amd64)
2445509Smrj 	dma->dp_cb_remaping = B_FALSE;
2446509Smrj 	dma->dp_kva = NULL;
2447509Smrj #endif
2448509Smrj 
2449509Smrj 	/* FMA related initialization */
2450509Smrj 	hp->dmai_fault = 0;
2451509Smrj 	hp->dmai_fault_check = NULL;
2452509Smrj 	hp->dmai_fault_notify = NULL;
2453509Smrj 	hp->dmai_error.err_ena = 0;
2454509Smrj 	hp->dmai_error.err_status = DDI_FM_OK;
2455509Smrj 	hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2456509Smrj 	hp->dmai_error.err_ontrap = NULL;
2457509Smrj 	hp->dmai_error.err_fep = NULL;
24581865Sdilpreet 	hp->dmai_error.err_cf = NULL;
2459509Smrj }
2460509Smrj 
2461509Smrj 
2462509Smrj /*
2463509Smrj  * rootnex_valid_alloc_parms()
2464509Smrj  *    Called in ddi_dma_alloc_handle path to validate its parameters.
2465509Smrj  */
2466509Smrj static int
2467509Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2468509Smrj {
2469509Smrj 	if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2470509Smrj 	    (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2471509Smrj 	    (attr->dma_attr_granular > MMU_PAGESIZE) ||
2472509Smrj 	    (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2473509Smrj 		return (DDI_DMA_BADATTR);
2474509Smrj 	}
2475509Smrj 
2476509Smrj 	if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2477509Smrj 		return (DDI_DMA_BADATTR);
2478509Smrj 	}
2479509Smrj 
2480509Smrj 	if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2481509Smrj 	    MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2482509Smrj 	    attr->dma_attr_sgllen <= 0) {
2483509Smrj 		return (DDI_DMA_BADATTR);
2484509Smrj 	}
2485509Smrj 
2486509Smrj 	/* We should be able to DMA into every byte offset in a page */
2487509Smrj 	if (maxsegmentsize < MMU_PAGESIZE) {
2488509Smrj 		return (DDI_DMA_BADATTR);
2489509Smrj 	}
2490509Smrj 
2491509Smrj 	return (DDI_SUCCESS);
2492509Smrj }
2493509Smrj 
2494509Smrj 
2495509Smrj /*
2496509Smrj  * rootnex_valid_bind_parms()
2497509Smrj  *    Called in ddi_dma_*_bind_handle path to validate its parameters.
2498509Smrj  */
2499509Smrj /* ARGSUSED */
2500509Smrj static int
2501509Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2502509Smrj {
2503509Smrj #if !defined(__amd64)
2504509Smrj 	/*
2505509Smrj 	 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2506509Smrj 	 * we can track the offset for the obsoleted interfaces.
2507509Smrj 	 */
2508509Smrj 	if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2509509Smrj 		return (DDI_DMA_TOOBIG);
2510509Smrj 	}
2511509Smrj #endif
2512509Smrj 
2513509Smrj 	return (DDI_SUCCESS);
2514509Smrj }
2515509Smrj 
2516509Smrj 
2517509Smrj /*
2518509Smrj  * rootnex_get_sgl()
2519509Smrj  *    Called in bind fastpath to get the sgl. Most of this will be replaced
2520509Smrj  *    with a call to the vm layer when vm2.0 comes around...
2521509Smrj  */
2522509Smrj static void
2523509Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2524509Smrj     rootnex_sglinfo_t *sglinfo)
2525509Smrj {
2526509Smrj 	ddi_dma_atyp_t buftype;
25275084Sjohnlev 	rootnex_addr_t raddr;
2528509Smrj 	uint64_t last_page;
2529509Smrj 	uint64_t offset;
2530509Smrj 	uint64_t addrhi;
2531509Smrj 	uint64_t addrlo;
2532509Smrj 	uint64_t maxseg;
2533509Smrj 	page_t **pplist;
2534509Smrj 	uint64_t paddr;
2535509Smrj 	uint32_t psize;
2536509Smrj 	uint32_t size;
2537509Smrj 	caddr_t vaddr;
2538509Smrj 	uint_t pcnt;
2539509Smrj 	page_t *pp;
2540509Smrj 	uint_t cnt;
2541509Smrj 
2542509Smrj 
2543509Smrj 	/* shortcuts */
2544509Smrj 	pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2545509Smrj 	vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2546509Smrj 	maxseg = sglinfo->si_max_cookie_size;
2547509Smrj 	buftype = dmar_object->dmao_type;
2548509Smrj 	addrhi = sglinfo->si_max_addr;
2549509Smrj 	addrlo = sglinfo->si_min_addr;
2550509Smrj 	size = dmar_object->dmao_size;
2551509Smrj 
2552509Smrj 	pcnt = 0;
2553509Smrj 	cnt = 0;
2554509Smrj 
2555509Smrj 	/*
2556509Smrj 	 * if we were passed down a linked list of pages, i.e. pointer to
2557509Smrj 	 * page_t, use this to get our physical address and buf offset.
2558509Smrj 	 */
2559509Smrj 	if (buftype == DMA_OTYP_PAGES) {
2560509Smrj 		pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2561509Smrj 		ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2562509Smrj 		offset =  dmar_object->dmao_obj.pp_obj.pp_offset &
2563509Smrj 		    MMU_PAGEOFFSET;
25645084Sjohnlev 		paddr = pfn_to_pa(pp->p_pagenum) + offset;
2565509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2566509Smrj 		pp = pp->p_next;
2567509Smrj 		sglinfo->si_asp = NULL;
2568509Smrj 
2569509Smrj 	/*
2570509Smrj 	 * We weren't passed down a linked list of pages, but if we were passed
2571509Smrj 	 * down an array of pages, use this to get our physical address and buf
2572509Smrj 	 * offset.
2573509Smrj 	 */
2574509Smrj 	} else if (pplist != NULL) {
2575509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2576509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2577509Smrj 
2578509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2579509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2580509Smrj 		if (sglinfo->si_asp == NULL) {
2581509Smrj 			sglinfo->si_asp = &kas;
2582509Smrj 		}
2583509Smrj 
2584509Smrj 		ASSERT(!PP_ISFREE(pplist[pcnt]));
25855084Sjohnlev 		paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2586509Smrj 		paddr += offset;
2587509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2588509Smrj 		pcnt++;
2589509Smrj 
2590509Smrj 	/*
2591509Smrj 	 * All we have is a virtual address, we'll need to call into the VM
2592509Smrj 	 * to get the physical address.
2593509Smrj 	 */
2594509Smrj 	} else {
2595509Smrj 		ASSERT((buftype == DMA_OTYP_VADDR) ||
2596509Smrj 		    (buftype == DMA_OTYP_BUFVADDR));
2597509Smrj 
2598509Smrj 		offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2599509Smrj 		sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2600509Smrj 		if (sglinfo->si_asp == NULL) {
2601509Smrj 			sglinfo->si_asp = &kas;
2602509Smrj 		}
2603509Smrj 
26045084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2605509Smrj 		paddr += offset;
2606509Smrj 		psize = MIN(size, (MMU_PAGESIZE - offset));
2607509Smrj 		vaddr += psize;
2608509Smrj 	}
2609509Smrj 
26105084Sjohnlev #ifdef __xpv
26115084Sjohnlev 	/*
26125084Sjohnlev 	 * If we're dom0, we're using a real device so we need to load
26135084Sjohnlev 	 * the cookies with MFNs instead of PFNs.
26145084Sjohnlev 	 */
26155084Sjohnlev 	raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
26165084Sjohnlev #else
26175084Sjohnlev 	raddr = paddr;
26185084Sjohnlev #endif
26195084Sjohnlev 
2620509Smrj 	/*
2621509Smrj 	 * Setup the first cookie with the physical address of the page and the
2622509Smrj 	 * size of the page (which takes into account the initial offset into
2623509Smrj 	 * the page.
2624509Smrj 	 */
26255084Sjohnlev 	sgl[cnt].dmac_laddress = raddr;
2626509Smrj 	sgl[cnt].dmac_size = psize;
2627509Smrj 	sgl[cnt].dmac_type = 0;
2628509Smrj 
2629509Smrj 	/*
2630509Smrj 	 * Save away the buffer offset into the page. We'll need this later in
2631509Smrj 	 * the copy buffer code to help figure out the page index within the
2632509Smrj 	 * buffer and the offset into the current page.
2633509Smrj 	 */
2634509Smrj 	sglinfo->si_buf_offset = offset;
2635509Smrj 
2636509Smrj 	/*
2637509Smrj 	 * If the DMA engine can't reach the physical address, increase how
2638509Smrj 	 * much copy buffer we need. We always increase by pagesize so we don't
2639509Smrj 	 * have to worry about converting offsets. Set a flag in the cookies
2640509Smrj 	 * dmac_type to indicate that it uses the copy buffer. If this isn't the
2641509Smrj 	 * last cookie, go to the next cookie (since we separate each page which
2642509Smrj 	 * uses the copy buffer in case the copy buffer is not physically
2643509Smrj 	 * contiguous.
2644509Smrj 	 */
26455084Sjohnlev 	if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2646509Smrj 		sglinfo->si_copybuf_req += MMU_PAGESIZE;
2647509Smrj 		sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2648509Smrj 		if ((cnt + 1) < sglinfo->si_max_pages) {
2649509Smrj 			cnt++;
2650509Smrj 			sgl[cnt].dmac_laddress = 0;
2651509Smrj 			sgl[cnt].dmac_size = 0;
2652509Smrj 			sgl[cnt].dmac_type = 0;
2653509Smrj 		}
2654509Smrj 	}
2655509Smrj 
2656509Smrj 	/*
2657509Smrj 	 * save this page's physical address so we can figure out if the next
2658509Smrj 	 * page is physically contiguous. Keep decrementing size until we are
2659509Smrj 	 * done with the buffer.
2660509Smrj 	 */
26615084Sjohnlev 	last_page = raddr & MMU_PAGEMASK;
2662509Smrj 	size -= psize;
2663509Smrj 
2664509Smrj 	while (size > 0) {
2665509Smrj 		/* Get the size for this page (i.e. partial or full page) */
2666509Smrj 		psize = MIN(size, MMU_PAGESIZE);
2667509Smrj 
2668509Smrj 		if (buftype == DMA_OTYP_PAGES) {
2669509Smrj 			/* get the paddr from the page_t */
2670509Smrj 			ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
26715084Sjohnlev 			paddr = pfn_to_pa(pp->p_pagenum);
2672509Smrj 			pp = pp->p_next;
2673509Smrj 		} else if (pplist != NULL) {
2674509Smrj 			/* index into the array of page_t's to get the paddr */
2675509Smrj 			ASSERT(!PP_ISFREE(pplist[pcnt]));
26765084Sjohnlev 			paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2677509Smrj 			pcnt++;
26780Sstevel@tonic-gate 		} else {
2679509Smrj 			/* call into the VM to get the paddr */
26805084Sjohnlev 			paddr =  pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2681509Smrj 			    vaddr));
2682509Smrj 			vaddr += psize;
2683509Smrj 		}
2684509Smrj 
26855084Sjohnlev #ifdef __xpv
26865084Sjohnlev 		/*
26875084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
26885084Sjohnlev 		 * the cookies with MFNs instead of PFNs.
26895084Sjohnlev 		 */
26905084Sjohnlev 		raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
26915084Sjohnlev #else
26925084Sjohnlev 		raddr = paddr;
26935084Sjohnlev #endif
2694509Smrj 		/* check to see if this page needs the copy buffer */
26955084Sjohnlev 		if ((raddr < addrlo) || ((raddr + psize) > addrhi)) {
2696509Smrj 			sglinfo->si_copybuf_req += MMU_PAGESIZE;
2697509Smrj 
26980Sstevel@tonic-gate 			/*
2699509Smrj 			 * if there is something in the current cookie, go to
2700509Smrj 			 * the next one. We only want one page in a cookie which
2701509Smrj 			 * uses the copybuf since the copybuf doesn't have to
2702509Smrj 			 * be physically contiguous.
2703509Smrj 			 */
2704509Smrj 			if (sgl[cnt].dmac_size != 0) {
2705509Smrj 				cnt++;
2706509Smrj 			}
27075084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2708509Smrj 			sgl[cnt].dmac_size = psize;
2709509Smrj #if defined(__amd64)
2710509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
2711509Smrj #else
2712509Smrj 			/*
2713509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2714509Smrj 			 * obsoleted interfaces.
2715509Smrj 			 */
2716509Smrj 			sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
2717509Smrj 			    (dmar_object->dmao_size - size);
2718509Smrj #endif
2719509Smrj 			/* if this isn't the last cookie, go to the next one */
2720509Smrj 			if ((cnt + 1) < sglinfo->si_max_pages) {
2721509Smrj 				cnt++;
2722509Smrj 				sgl[cnt].dmac_laddress = 0;
2723509Smrj 				sgl[cnt].dmac_size = 0;
2724509Smrj 				sgl[cnt].dmac_type = 0;
2725509Smrj 			}
2726509Smrj 
2727509Smrj 		/*
2728509Smrj 		 * this page didn't need the copy buffer, if it's not physically
2729509Smrj 		 * contiguous, or it would put us over a segment boundary, or it
2730509Smrj 		 * puts us over the max cookie size, or the current sgl doesn't
2731509Smrj 		 * have anything in it.
2732509Smrj 		 */
27335084Sjohnlev 		} else if (((last_page + MMU_PAGESIZE) != raddr) ||
27345084Sjohnlev 		    !(raddr & sglinfo->si_segmask) ||
2735509Smrj 		    ((sgl[cnt].dmac_size + psize) > maxseg) ||
2736509Smrj 		    (sgl[cnt].dmac_size == 0)) {
2737509Smrj 			/*
2738509Smrj 			 * if we're not already in a new cookie, go to the next
2739509Smrj 			 * cookie.
2740509Smrj 			 */
2741509Smrj 			if (sgl[cnt].dmac_size != 0) {
2742509Smrj 				cnt++;
2743509Smrj 			}
2744509Smrj 
2745509Smrj 			/* save the cookie information */
27465084Sjohnlev 			sgl[cnt].dmac_laddress = raddr;
2747509Smrj 			sgl[cnt].dmac_size = psize;
2748509Smrj #if defined(__amd64)
2749509Smrj 			sgl[cnt].dmac_type = 0;
2750509Smrj #else
2751509Smrj 			/*
2752509Smrj 			 * save the buf offset for 32-bit kernel. used in the
2753509Smrj 			 * obsoleted interfaces.
2754509Smrj 			 */
2755509Smrj 			sgl[cnt].dmac_type = dmar_object->dmao_size - size;
2756509Smrj #endif
2757509Smrj 
2758509Smrj 		/*
2759509Smrj 		 * this page didn't need the copy buffer, it is physically
2760509Smrj 		 * contiguous with the last page, and it's <= the max cookie
2761509Smrj 		 * size.
2762509Smrj 		 */
2763509Smrj 		} else {
2764509Smrj 			sgl[cnt].dmac_size += psize;
2765509Smrj 
2766509Smrj 			/*
2767509Smrj 			 * if this exactly ==  the maximum cookie size, and
2768509Smrj 			 * it isn't the last cookie, go to the next cookie.
2769509Smrj 			 */
2770509Smrj 			if (((sgl[cnt].dmac_size + psize) == maxseg) &&
2771509Smrj 			    ((cnt + 1) < sglinfo->si_max_pages)) {
2772509Smrj 				cnt++;
2773509Smrj 				sgl[cnt].dmac_laddress = 0;
2774509Smrj 				sgl[cnt].dmac_size = 0;
2775509Smrj 				sgl[cnt].dmac_type = 0;
2776509Smrj 			}
2777509Smrj 		}
2778509Smrj 
2779509Smrj 		/*
2780509Smrj 		 * save this page's physical address so we can figure out if the
2781509Smrj 		 * next page is physically contiguous. Keep decrementing size
2782509Smrj 		 * until we are done with the buffer.
2783509Smrj 		 */
27845084Sjohnlev 		last_page = raddr;
2785509Smrj 		size -= psize;
2786509Smrj 	}
2787509Smrj 
2788509Smrj 	/* we're done, save away how many cookies the sgl has */
2789509Smrj 	if (sgl[cnt].dmac_size == 0) {
2790509Smrj 		ASSERT(cnt < sglinfo->si_max_pages);
2791509Smrj 		sglinfo->si_sgl_size = cnt;
2792509Smrj 	} else {
2793509Smrj 		sglinfo->si_sgl_size = cnt + 1;
2794509Smrj 	}
2795509Smrj }
2796509Smrj 
2797509Smrj 
2798509Smrj /*
2799509Smrj  * rootnex_bind_slowpath()
2800509Smrj  *    Call in the bind path if the calling driver can't use the sgl without
2801509Smrj  *    modifying it. We either need to use the copy buffer and/or we will end up
2802509Smrj  *    with a partial bind.
2803509Smrj  */
2804509Smrj static int
2805509Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
2806509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag)
2807509Smrj {
2808509Smrj 	rootnex_sglinfo_t *sinfo;
2809509Smrj 	rootnex_window_t *window;
2810509Smrj 	ddi_dma_cookie_t *cookie;
2811509Smrj 	size_t copybuf_used;
2812509Smrj 	size_t dmac_size;
2813509Smrj 	boolean_t partial;
2814509Smrj 	off_t cur_offset;
2815509Smrj 	page_t *cur_pp;
2816509Smrj 	major_t mnum;
2817509Smrj 	int e;
2818509Smrj 	int i;
2819509Smrj 
2820509Smrj 
2821509Smrj 	sinfo = &dma->dp_sglinfo;
2822509Smrj 	copybuf_used = 0;
2823509Smrj 	partial = B_FALSE;
2824509Smrj 
2825509Smrj 	/*
2826509Smrj 	 * If we're using the copybuf, set the copybuf state in dma struct.
2827509Smrj 	 * Needs to be first since it sets the copy buffer size.
2828509Smrj 	 */
2829509Smrj 	if (sinfo->si_copybuf_req != 0) {
2830509Smrj 		e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
2831509Smrj 		if (e != DDI_SUCCESS) {
2832509Smrj 			return (e);
2833509Smrj 		}
2834509Smrj 	} else {
2835509Smrj 		dma->dp_copybuf_size = 0;
2836509Smrj 	}
2837509Smrj 
2838509Smrj 	/*
2839509Smrj 	 * Figure out if we need to do a partial mapping. If so, figure out
2840509Smrj 	 * if we need to trim the buffers when we munge the sgl.
2841509Smrj 	 */
2842509Smrj 	if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
2843509Smrj 	    (dma->dp_dma.dmao_size > dma->dp_maxxfer) ||
2844509Smrj 	    (attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
2845509Smrj 		dma->dp_partial_required = B_TRUE;
2846509Smrj 		if (attr->dma_attr_granular != 1) {
2847509Smrj 			dma->dp_trim_required = B_TRUE;
2848509Smrj 		}
2849509Smrj 	} else {
2850509Smrj 		dma->dp_partial_required = B_FALSE;
2851509Smrj 		dma->dp_trim_required = B_FALSE;
2852509Smrj 	}
2853509Smrj 
2854509Smrj 	/* If we need to do a partial bind, make sure the driver supports it */
2855509Smrj 	if (dma->dp_partial_required &&
2856509Smrj 	    !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
2857509Smrj 
2858509Smrj 		mnum = ddi_driver_major(dma->dp_dip);
2859509Smrj 		/*
2860509Smrj 		 * patchable which allows us to print one warning per major
2861509Smrj 		 * number.
2862509Smrj 		 */
2863509Smrj 		if ((rootnex_bind_warn) &&
2864509Smrj 		    ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
2865509Smrj 			rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
2866509Smrj 			cmn_err(CE_WARN, "!%s: coding error detected, the "
2867509Smrj 			    "driver is using ddi_dma_attr(9S) incorrectly. "
2868509Smrj 			    "There is a small risk of data corruption in "
2869509Smrj 			    "particular with large I/Os. The driver should be "
2870509Smrj 			    "replaced with a corrected version for proper "
2871509Smrj 			    "system operation. To disable this warning, add "
2872509Smrj 			    "'set rootnex:rootnex_bind_warn=0' to "
2873509Smrj 			    "/etc/system(4).", ddi_driver_name(dma->dp_dip));
2874509Smrj 		}
2875509Smrj 		return (DDI_DMA_TOOBIG);
2876509Smrj 	}
2877509Smrj 
2878509Smrj 	/*
2879509Smrj 	 * we might need multiple windows, setup state to handle them. In this
2880509Smrj 	 * code path, we will have at least one window.
2881509Smrj 	 */
2882509Smrj 	e = rootnex_setup_windows(hp, dma, attr, kmflag);
2883509Smrj 	if (e != DDI_SUCCESS) {
2884509Smrj 		rootnex_teardown_copybuf(dma);
2885509Smrj 		return (e);
2886509Smrj 	}
2887509Smrj 
2888509Smrj 	window = &dma->dp_window[0];
2889509Smrj 	cookie = &dma->dp_cookies[0];
2890509Smrj 	cur_offset = 0;
2891509Smrj 	rootnex_init_win(hp, dma, window, cookie, cur_offset);
2892509Smrj 	if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) {
2893509Smrj 		cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
2894509Smrj 	}
2895509Smrj 
2896509Smrj 	/* loop though all the cookies we got back from get_sgl() */
2897509Smrj 	for (i = 0; i < sinfo->si_sgl_size; i++) {
2898509Smrj 		/*
2899509Smrj 		 * If we're using the copy buffer, check this cookie and setup
2900509Smrj 		 * its associated copy buffer state. If this cookie uses the
2901509Smrj 		 * copy buffer, make sure we sync this window during dma_sync.
2902509Smrj 		 */
2903509Smrj 		if (dma->dp_copybuf_size > 0) {
2904509Smrj 			rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie,
2905509Smrj 			    cur_offset, &copybuf_used, &cur_pp);
2906509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2907509Smrj 				window->wd_dosync = B_TRUE;
2908509Smrj 			}
2909509Smrj 		}
2910509Smrj 
2911509Smrj 		/*
2912509Smrj 		 * save away the cookie size, since it could be modified in
2913509Smrj 		 * the windowing code.
2914509Smrj 		 */
2915509Smrj 		dmac_size = cookie->dmac_size;
2916509Smrj 
2917509Smrj 		/* if we went over max copybuf size */
2918509Smrj 		if (dma->dp_copybuf_size &&
2919509Smrj 		    (copybuf_used > dma->dp_copybuf_size)) {
2920509Smrj 			partial = B_TRUE;
2921509Smrj 			e = rootnex_copybuf_window_boundary(hp, dma, &window,
2922509Smrj 			    cookie, cur_offset, &copybuf_used);
2923509Smrj 			if (e != DDI_SUCCESS) {
2924509Smrj 				rootnex_teardown_copybuf(dma);
2925509Smrj 				rootnex_teardown_windows(dma);
2926509Smrj 				return (e);
2927509Smrj 			}
2928509Smrj 
2929509Smrj 			/*
2930509Smrj 			 * if the coookie uses the copy buffer, make sure the
2931509Smrj 			 * new window we just moved to is set to sync.
2932509Smrj 			 */
2933509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2934509Smrj 				window->wd_dosync = B_TRUE;
2935509Smrj 			}
2936509Smrj 			DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *,
2937509Smrj 			    dma->dp_dip);
2938509Smrj 
2939509Smrj 		/* if the cookie cnt == max sgllen, move to the next window */
2940509Smrj 		} else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) {
2941509Smrj 			partial = B_TRUE;
2942509Smrj 			ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
2943509Smrj 			e = rootnex_sgllen_window_boundary(hp, dma, &window,
2944509Smrj 			    cookie, attr, cur_offset);
2945509Smrj 			if (e != DDI_SUCCESS) {
2946509Smrj 				rootnex_teardown_copybuf(dma);
2947509Smrj 				rootnex_teardown_windows(dma);
2948509Smrj 				return (e);
2949509Smrj 			}
2950509Smrj 
2951509Smrj 			/*
2952509Smrj 			 * if the coookie uses the copy buffer, make sure the
2953509Smrj 			 * new window we just moved to is set to sync.
2954509Smrj 			 */
2955509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2956509Smrj 				window->wd_dosync = B_TRUE;
2957509Smrj 			}
2958509Smrj 			DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *,
2959509Smrj 			    dma->dp_dip);
2960509Smrj 
2961509Smrj 		/* else if we will be over maxxfer */
2962509Smrj 		} else if ((window->wd_size + dmac_size) >
2963509Smrj 		    dma->dp_maxxfer) {
2964509Smrj 			partial = B_TRUE;
2965509Smrj 			e = rootnex_maxxfer_window_boundary(hp, dma, &window,
2966509Smrj 			    cookie);
2967509Smrj 			if (e != DDI_SUCCESS) {
2968509Smrj 				rootnex_teardown_copybuf(dma);
2969509Smrj 				rootnex_teardown_windows(dma);
2970509Smrj 				return (e);
2971509Smrj 			}
2972509Smrj 
2973509Smrj 			/*
2974509Smrj 			 * if the coookie uses the copy buffer, make sure the
2975509Smrj 			 * new window we just moved to is set to sync.
29760Sstevel@tonic-gate 			 */
2977509Smrj 			if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
2978509Smrj 				window->wd_dosync = B_TRUE;
2979509Smrj 			}
2980509Smrj 			DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *,
2981509Smrj 			    dma->dp_dip);
2982509Smrj 
2983509Smrj 		/* else this cookie fits in the current window */
2984509Smrj 		} else {
2985509Smrj 			window->wd_cookie_cnt++;
2986509Smrj 			window->wd_size += dmac_size;
2987509Smrj 		}
2988509Smrj 
2989509Smrj 		/* track our offset into the buffer, go to the next cookie */
2990509Smrj 		ASSERT(dmac_size <= dma->dp_dma.dmao_size);
2991509Smrj 		ASSERT(cookie->dmac_size <= dmac_size);
2992509Smrj 		cur_offset += dmac_size;
2993509Smrj 		cookie++;
2994509Smrj 	}
2995509Smrj 
2996509Smrj 	/* if we ended up with a zero sized window in the end, clean it up */
2997509Smrj 	if (window->wd_size == 0) {
2998509Smrj 		hp->dmai_nwin--;
2999509Smrj 		window--;
3000509Smrj 	}
3001509Smrj 
3002509Smrj 	ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
3003509Smrj 
3004509Smrj 	if (!partial) {
3005509Smrj 		return (DDI_DMA_MAPPED);
3006509Smrj 	}
3007509Smrj 
3008509Smrj 	ASSERT(dma->dp_partial_required);
3009509Smrj 	return (DDI_DMA_PARTIAL_MAP);
3010509Smrj }
3011509Smrj 
3012509Smrj 
3013509Smrj /*
3014509Smrj  * rootnex_setup_copybuf()
3015509Smrj  *    Called in bind slowpath. Figures out if we're going to use the copy
3016509Smrj  *    buffer, and if we do, sets up the basic state to handle it.
3017509Smrj  */
3018509Smrj static int
3019509Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3020509Smrj     rootnex_dma_t *dma, ddi_dma_attr_t *attr)
3021509Smrj {
3022509Smrj 	rootnex_sglinfo_t *sinfo;
3023509Smrj 	ddi_dma_attr_t lattr;
3024509Smrj 	size_t max_copybuf;
3025509Smrj 	int cansleep;
3026509Smrj 	int e;
3027509Smrj #if !defined(__amd64)
3028509Smrj 	int vmflag;
3029509Smrj #endif
3030509Smrj 
3031509Smrj 
3032509Smrj 	sinfo = &dma->dp_sglinfo;
3033509Smrj 
30345251Smrj 	/* read this first so it's consistent through the routine  */
30355251Smrj 	max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
3036509Smrj 
3037509Smrj 	/* We need to call into the rootnex on ddi_dma_sync() */
3038509Smrj 	hp->dmai_rflags &= ~DMP_NOSYNC;
3039509Smrj 
3040509Smrj 	/* make sure the copybuf size <= the max size */
3041509Smrj 	dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
3042509Smrj 	ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
3043509Smrj 
3044509Smrj #if !defined(__amd64)
3045509Smrj 	/*
3046509Smrj 	 * if we don't have kva space to copy to/from, allocate the KVA space
3047509Smrj 	 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3048509Smrj 	 * the 64-bit kernel.
3049509Smrj 	 */
3050509Smrj 	if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
3051509Smrj 	    (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
3052509Smrj 
3053509Smrj 		/* convert the sleep flags */
3054509Smrj 		if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3055509Smrj 			vmflag = VM_SLEEP;
3056509Smrj 		} else {
3057509Smrj 			vmflag = VM_NOSLEEP;
3058509Smrj 		}
3059509Smrj 
3060509Smrj 		/* allocate Kernel VA space that we can bcopy to/from */
3061509Smrj 		dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
3062509Smrj 		    vmflag);
3063509Smrj 		if (dma->dp_kva == NULL) {
3064509Smrj 			return (DDI_DMA_NORESOURCES);
3065509Smrj 		}
3066509Smrj 	}
3067509Smrj #endif
3068509Smrj 
3069509Smrj 	/* convert the sleep flags */
3070509Smrj 	if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3071509Smrj 		cansleep = 1;
3072509Smrj 	} else {
3073509Smrj 		cansleep = 0;
3074509Smrj 	}
3075509Smrj 
3076509Smrj 	/*
30777173Smrj 	 * Allocate the actual copy buffer. This needs to fit within the DMA
30787173Smrj 	 * engine limits, so we can't use kmem_alloc... We don't need
30797173Smrj 	 * contiguous memory (sgllen) since we will be forcing windows on
30807173Smrj 	 * sgllen anyway.
3081509Smrj 	 */
3082509Smrj 	lattr = *attr;
3083509Smrj 	lattr.dma_attr_align = MMU_PAGESIZE;
30847173Smrj 	/*
30857173Smrj 	 * this should be < 0 to indicate no limit, but due to a bug in
30867173Smrj 	 * the rootnex, we'll set it to the maximum positive int.
30877173Smrj 	 */
30887173Smrj 	lattr.dma_attr_sgllen = 0x7fffffff;
3089509Smrj 	e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
3090509Smrj 	    0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
3091509Smrj 	if (e != DDI_SUCCESS) {
3092509Smrj #if !defined(__amd64)
3093509Smrj 		if (dma->dp_kva != NULL) {
3094509Smrj 			vmem_free(heap_arena, dma->dp_kva,
3095509Smrj 			    dma->dp_copybuf_size);
3096509Smrj 		}
3097509Smrj #endif
3098509Smrj 		return (DDI_DMA_NORESOURCES);
3099509Smrj 	}
3100509Smrj 
3101509Smrj 	DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
3102509Smrj 	    size_t, dma->dp_copybuf_size);
3103509Smrj 
3104509Smrj 	return (DDI_SUCCESS);
3105509Smrj }
3106509Smrj 
3107509Smrj 
3108509Smrj /*
3109509Smrj  * rootnex_setup_windows()
3110509Smrj  *    Called in bind slowpath to setup the window state. We always have windows
3111509Smrj  *    in the slowpath. Even if the window count = 1.
3112509Smrj  */
3113509Smrj static int
3114509Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3115509Smrj     ddi_dma_attr_t *attr, int kmflag)
3116509Smrj {
3117509Smrj 	rootnex_window_t *windowp;
3118509Smrj 	rootnex_sglinfo_t *sinfo;
3119509Smrj 	size_t copy_state_size;
3120509Smrj 	size_t win_state_size;
3121509Smrj 	size_t state_available;
3122509Smrj 	size_t space_needed;
3123509Smrj 	uint_t copybuf_win;
3124509Smrj 	uint_t maxxfer_win;
3125509Smrj 	size_t space_used;
3126509Smrj 	uint_t sglwin;
3127509Smrj 
3128509Smrj 
3129509Smrj 	sinfo = &dma->dp_sglinfo;
3130509Smrj 
3131509Smrj 	dma->dp_current_win = 0;
3132509Smrj 	hp->dmai_nwin = 0;
3133509Smrj 
3134509Smrj 	/* If we don't need to do a partial, we only have one window */
3135509Smrj 	if (!dma->dp_partial_required) {
3136509Smrj 		dma->dp_max_win = 1;
3137509Smrj 
3138509Smrj 	/*
3139509Smrj 	 * we need multiple windows, need to figure out the worse case number
3140509Smrj 	 * of windows.
3141509Smrj 	 */
3142509Smrj 	} else {
3143509Smrj 		/*
3144509Smrj 		 * if we need windows because we need more copy buffer that
3145509Smrj 		 * we allow, the worse case number of windows we could need
3146509Smrj 		 * here would be (copybuf space required / copybuf space that
3147509Smrj 		 * we have) plus one for remainder, and plus 2 to handle the
3148509Smrj 		 * extra pages on the trim for the first and last pages of the
3149509Smrj 		 * buffer (a page is the minimum window size so under the right
3150509Smrj 		 * attr settings, you could have a window for each page).
3151509Smrj 		 * The last page will only be hit here if the size is not a
3152509Smrj 		 * multiple of the granularity (which theoretically shouldn't
3153509Smrj 		 * be the case but never has been enforced, so we could have
3154509Smrj 		 * broken things without it).
3155509Smrj 		 */
3156509Smrj 		if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3157509Smrj 			ASSERT(dma->dp_copybuf_size > 0);
3158509Smrj 			copybuf_win = (sinfo->si_copybuf_req /
3159509Smrj 			    dma->dp_copybuf_size) + 1 + 2;
3160509Smrj 		} else {
3161509Smrj 			copybuf_win = 0;
3162509Smrj 		}
3163509Smrj 
3164509Smrj 		/*
3165509Smrj 		 * if we need windows because we have more cookies than the H/W
3166509Smrj 		 * can handle, the number of windows we would need here would
3167509Smrj 		 * be (cookie count / cookies count H/W supports) plus one for
3168509Smrj 		 * remainder, and plus 2 to handle the extra pages on the trim
3169509Smrj 		 * (see above comment about trim)
3170509Smrj 		 */
3171509Smrj 		if (attr->dma_attr_sgllen < sinfo->si_sgl_size) {
3172509Smrj 			sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen)
3173509Smrj 			    + 1) + 2;
3174509Smrj 		} else {
3175509Smrj 			sglwin = 0;
3176509Smrj 		}
3177509Smrj 
3178509Smrj 		/*
3179509Smrj 		 * if we need windows because we're binding more memory than the
3180509Smrj 		 * H/W can transfer at once, the number of windows we would need
3181509Smrj 		 * here would be (xfer count / max xfer H/W supports) plus one
3182509Smrj 		 * for remainder, and plus 2 to handle the extra pages on the
3183509Smrj 		 * trim (see above comment about trim)
3184509Smrj 		 */
3185509Smrj 		if (dma->dp_dma.dmao_size > dma->dp_maxxfer) {
3186509Smrj 			maxxfer_win = (dma->dp_dma.dmao_size /
3187509Smrj 			    dma->dp_maxxfer) + 1 + 2;
3188509Smrj 		} else {
3189509Smrj 			maxxfer_win = 0;
3190509Smrj 		}
3191509Smrj 		dma->dp_max_win =  copybuf_win + sglwin + maxxfer_win;
3192509Smrj 		ASSERT(dma->dp_max_win > 0);
3193509Smrj 	}
3194509Smrj 	win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3195509Smrj 
3196509Smrj 	/*
3197509Smrj 	 * Get space for window and potential copy buffer state. Before we
3198509Smrj 	 * go and allocate memory, see if we can get away with using what's
3199509Smrj 	 * left in the pre-allocted state or the dynamically allocated sgl.
3200509Smrj 	 */
3201509Smrj 	space_used = (uintptr_t)(sinfo->si_sgl_size *
3202509Smrj 	    sizeof (ddi_dma_cookie_t));
3203509Smrj 
3204509Smrj 	/* if we dynamically allocated space for the cookies */
3205509Smrj 	if (dma->dp_need_to_free_cookie) {
3206509Smrj 		/* if we have more space in the pre-allocted buffer, use it */
3207509Smrj 		ASSERT(space_used <= dma->dp_cookie_size);
3208509Smrj 		if ((dma->dp_cookie_size - space_used) <=
3209509Smrj 		    rootnex_state->r_prealloc_size) {
3210509Smrj 			state_available = rootnex_state->r_prealloc_size;
3211509Smrj 			windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3212509Smrj 
3213509Smrj 		/*
3214509Smrj 		 * else, we have more free space in the dynamically allocated
3215509Smrj 		 * buffer, i.e. the buffer wasn't worse case fragmented so we
3216509Smrj 		 * didn't need a lot of cookies.
3217509Smrj 		 */
3218509Smrj 		} else {
3219509Smrj 			state_available = dma->dp_cookie_size - space_used;
3220509Smrj 			windowp = (rootnex_window_t *)
3221509Smrj 			    &dma->dp_cookies[sinfo->si_sgl_size];
3222509Smrj 		}
3223509Smrj 
3224509Smrj 	/* we used the pre-alloced buffer */
3225509Smrj 	} else {
3226509Smrj 		ASSERT(space_used <= rootnex_state->r_prealloc_size);
3227509Smrj 		state_available = rootnex_state->r_prealloc_size - space_used;
3228509Smrj 		windowp = (rootnex_window_t *)
3229509Smrj 		    &dma->dp_cookies[sinfo->si_sgl_size];
3230509Smrj 	}
3231509Smrj 
3232509Smrj 	/*
3233509Smrj 	 * figure out how much state we need to track the copy buffer. Add an
3234509Smrj 	 * addition 8 bytes for pointer alignemnt later.
3235509Smrj 	 */
3236509Smrj 	if (dma->dp_copybuf_size > 0) {
3237509Smrj 		copy_state_size = sinfo->si_max_pages *
3238509Smrj 		    sizeof (rootnex_pgmap_t);
3239509Smrj 	} else {
3240509Smrj 		copy_state_size = 0;
3241509Smrj 	}
3242509Smrj 	/* add an additional 8 bytes for pointer alignment */
3243509Smrj 	space_needed = win_state_size + copy_state_size + 0x8;
3244509Smrj 
3245509Smrj 	/* if we have enough space already, use it */
3246509Smrj 	if (state_available >= space_needed) {
3247509Smrj 		dma->dp_window = windowp;
3248509Smrj 		dma->dp_need_to_free_window = B_FALSE;
3249509Smrj 
3250509Smrj 	/* not enough space, need to allocate more. */
3251509Smrj 	} else {
3252509Smrj 		dma->dp_window = kmem_alloc(space_needed, kmflag);
3253509Smrj 		if (dma->dp_window == NULL) {
3254509Smrj 			return (DDI_DMA_NORESOURCES);
3255509Smrj 		}
3256509Smrj 		dma->dp_need_to_free_window = B_TRUE;
3257509Smrj 		dma->dp_window_size = space_needed;
3258509Smrj 		DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3259509Smrj 		    dma->dp_dip, size_t, space_needed);
3260509Smrj 	}
3261509Smrj 
3262509Smrj 	/*
3263509Smrj 	 * we allocate copy buffer state and window state at the same time.
3264509Smrj 	 * setup our copy buffer state pointers. Make sure it's aligned.
3265509Smrj 	 */
3266509Smrj 	if (dma->dp_copybuf_size > 0) {
3267509Smrj 		dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3268509Smrj 		    &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3269509Smrj 
3270509Smrj #if !defined(__amd64)
3271509Smrj 		/*
3272509Smrj 		 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3273509Smrj 		 * false/NULL. Should be quicker to bzero vs loop and set.
3274509Smrj 		 */
3275509Smrj 		bzero(dma->dp_pgmap, copy_state_size);
3276509Smrj #endif
3277509Smrj 	} else {
3278509Smrj 		dma->dp_pgmap = NULL;
3279509Smrj 	}
3280509Smrj 
3281509Smrj 	return (DDI_SUCCESS);
3282509Smrj }
3283509Smrj 
3284509Smrj 
3285509Smrj /*
3286509Smrj  * rootnex_teardown_copybuf()
3287509Smrj  *    cleans up after rootnex_setup_copybuf()
3288509Smrj  */
3289509Smrj static void
3290509Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma)
3291509Smrj {
3292509Smrj #if !defined(__amd64)
3293509Smrj 	int i;
3294509Smrj 
3295509Smrj 	/*
3296509Smrj 	 * if we allocated kernel heap VMEM space, go through all the pages and
3297509Smrj 	 * map out any of the ones that we're mapped into the kernel heap VMEM
3298509Smrj 	 * arena. Then free the VMEM space.
3299509Smrj 	 */
3300509Smrj 	if (dma->dp_kva != NULL) {
3301509Smrj 		for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3302509Smrj 			if (dma->dp_pgmap[i].pm_mapped) {
3303509Smrj 				hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3304509Smrj 				    MMU_PAGESIZE, HAT_UNLOAD);
3305509Smrj 				dma->dp_pgmap[i].pm_mapped = B_FALSE;
3306509Smrj 			}
3307509Smrj 		}
3308509Smrj 
3309509Smrj 		vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3310509Smrj 	}
3311509Smrj 
3312509Smrj #endif
3313509Smrj 
3314509Smrj 	/* if we allocated a copy buffer, free it */
3315509Smrj 	if (dma->dp_cbaddr != NULL) {
33161900Seota 		i_ddi_mem_free(dma->dp_cbaddr, NULL);
3317509Smrj 	}
3318509Smrj }
3319509Smrj 
3320509Smrj 
3321509Smrj /*
3322509Smrj  * rootnex_teardown_windows()
3323509Smrj  *    cleans up after rootnex_setup_windows()
3324509Smrj  */
3325509Smrj static void
3326509Smrj rootnex_teardown_windows(rootnex_dma_t *dma)
3327509Smrj {
3328509Smrj 	/*
3329509Smrj 	 * if we had to allocate window state on the last bind (because we
3330509Smrj 	 * didn't have enough pre-allocated space in the handle), free it.
3331509Smrj 	 */
3332509Smrj 	if (dma->dp_need_to_free_window) {
3333509Smrj 		kmem_free(dma->dp_window, dma->dp_window_size);
3334509Smrj 	}
3335509Smrj }
3336509Smrj 
3337509Smrj 
3338509Smrj /*
3339509Smrj  * rootnex_init_win()
3340509Smrj  *    Called in bind slow path during creation of a new window. Initializes
3341509Smrj  *    window state to default values.
3342509Smrj  */
3343509Smrj /*ARGSUSED*/
3344509Smrj static void
3345509Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3346509Smrj     rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3347509Smrj {
3348509Smrj 	hp->dmai_nwin++;
3349509Smrj 	window->wd_dosync = B_FALSE;
3350509Smrj 	window->wd_offset = cur_offset;
3351509Smrj 	window->wd_size = 0;
3352509Smrj 	window->wd_first_cookie = cookie;
3353509Smrj 	window->wd_cookie_cnt = 0;
3354509Smrj 	window->wd_trim.tr_trim_first = B_FALSE;
3355509Smrj 	window->wd_trim.tr_trim_last = B_FALSE;
3356509Smrj 	window->wd_trim.tr_first_copybuf_win = B_FALSE;
3357509Smrj 	window->wd_trim.tr_last_copybuf_win = B_FALSE;
3358509Smrj #if !defined(__amd64)
3359509Smrj 	window->wd_remap_copybuf = dma->dp_cb_remaping;
3360509Smrj #endif
3361509Smrj }
3362509Smrj 
3363509Smrj 
3364509Smrj /*
3365509Smrj  * rootnex_setup_cookie()
3366509Smrj  *    Called in the bind slow path when the sgl uses the copy buffer. If any of
3367509Smrj  *    the sgl uses the copy buffer, we need to go through each cookie, figure
3368509Smrj  *    out if it uses the copy buffer, and if it does, save away everything we'll
3369509Smrj  *    need during sync.
3370509Smrj  */
3371509Smrj static void
3372509Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3373509Smrj     ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3374509Smrj     page_t **cur_pp)
3375509Smrj {
3376509Smrj 	boolean_t copybuf_sz_power_2;
3377509Smrj 	rootnex_sglinfo_t *sinfo;
33785084Sjohnlev 	paddr_t paddr;
3379509Smrj 	uint_t pidx;
3380509Smrj 	uint_t pcnt;
3381509Smrj 	off_t poff;
3382509Smrj #if defined(__amd64)
3383509Smrj 	pfn_t pfn;
3384509Smrj #else
3385509Smrj 	page_t **pplist;
3386509Smrj #endif
3387509Smrj 
3388509Smrj 	sinfo = &dma->dp_sglinfo;
3389509Smrj 
3390509Smrj 	/*
3391509Smrj 	 * Calculate the page index relative to the start of the buffer. The
3392509Smrj 	 * index to the current page for our buffer is the offset into the
3393509Smrj 	 * first page of the buffer plus our current offset into the buffer
3394509Smrj 	 * itself, shifted of course...
3395509Smrj 	 */
3396509Smrj 	pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3397509Smrj 	ASSERT(pidx < sinfo->si_max_pages);
3398509Smrj 
3399509Smrj 	/* if this cookie uses the copy buffer */
3400509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3401509Smrj 		/*
3402509Smrj 		 * NOTE: we know that since this cookie uses the copy buffer, it
3403509Smrj 		 * is <= MMU_PAGESIZE.
3404509Smrj 		 */
3405509Smrj 
3406509Smrj 		/*
3407509Smrj 		 * get the offset into the page. For the 64-bit kernel, get the
3408509Smrj 		 * pfn which we'll use with seg kpm.
3409509Smrj 		 */
34105084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3411509Smrj #if defined(__amd64)
34125084Sjohnlev 		/* mfn_to_pfn() is a NOP on i86pc */
34135084Sjohnlev 		pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
34145084Sjohnlev #endif /* __amd64 */
3415509Smrj 
3416509Smrj 		/* figure out if the copybuf size is a power of 2 */
3417509Smrj 		if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) {
3418509Smrj 			copybuf_sz_power_2 = B_FALSE;
3419509Smrj 		} else {
3420509Smrj 			copybuf_sz_power_2 = B_TRUE;
3421509Smrj 		}
3422509Smrj 
3423509Smrj 		/* This page uses the copy buffer */
3424509Smrj 		dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3425509Smrj 
3426509Smrj 		/*
3427509Smrj 		 * save the copy buffer KVA that we'll use with this page.
3428509Smrj 		 * if we still fit within the copybuf, it's a simple add.
3429509Smrj 		 * otherwise, we need to wrap over using & or % accordingly.
3430509Smrj 		 */
3431509Smrj 		if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3432509Smrj 			dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3433509Smrj 			    *copybuf_used;
3434509Smrj 		} else {
3435509Smrj 			if (copybuf_sz_power_2) {
3436509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3437509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3438509Smrj 				    (*copybuf_used &
3439509Smrj 				    (dma->dp_copybuf_size - 1)));
34400Sstevel@tonic-gate 			} else {
3441509Smrj 				dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3442509Smrj 				    (uintptr_t)dma->dp_cbaddr +
3443509Smrj 				    (*copybuf_used % dma->dp_copybuf_size));
34440Sstevel@tonic-gate 			}
3445509Smrj 		}
3446509Smrj 
3447509Smrj 		/*
3448509Smrj 		 * over write the cookie physical address with the address of
3449509Smrj 		 * the physical address of the copy buffer page that we will
3450509Smrj 		 * use.
3451509Smrj 		 */
34525084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3453509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3454509Smrj 
34555084Sjohnlev #ifdef __xpv
34565084Sjohnlev 		/*
34575084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
34585084Sjohnlev 		 * the cookies with MAs instead of PAs.
34595084Sjohnlev 		 */
34605084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
34615084Sjohnlev #else
34625084Sjohnlev 		cookie->dmac_laddress = paddr;
34635084Sjohnlev #endif
34645084Sjohnlev 
3465509Smrj 		/* if we have a kernel VA, it's easy, just save that address */
3466509Smrj 		if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3467509Smrj 		    (sinfo->si_asp == &kas)) {
3468509Smrj 			/*
3469509Smrj 			 * save away the page aligned virtual address of the
3470509Smrj 			 * driver buffer. Offsets are handled in the sync code.
3471509Smrj 			 */
3472509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3473509Smrj 			    dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3474509Smrj 			    & MMU_PAGEMASK);
3475509Smrj #if !defined(__amd64)
3476509Smrj 			/*
3477509Smrj 			 * we didn't need to, and will never need to map this
3478509Smrj 			 * page.
3479509Smrj 			 */
3480509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3481509Smrj #endif
3482509Smrj 
3483509Smrj 		/* we don't have a kernel VA. We need one for the bcopy. */
3484509Smrj 		} else {
3485509Smrj #if defined(__amd64)
3486509Smrj 			/*
3487509Smrj 			 * for the 64-bit kernel, it's easy. We use seg kpm to
3488509Smrj 			 * get a Kernel VA for the corresponding pfn.
3489509Smrj 			 */
3490509Smrj 			dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3491509Smrj #else
3492509Smrj 			/*
3493509Smrj 			 * for the 32-bit kernel, this is a pain. First we'll
3494509Smrj 			 * save away the page_t or user VA for this page. This
3495509Smrj 			 * is needed in rootnex_dma_win() when we switch to a
3496509Smrj 			 * new window which requires us to re-map the copy
3497509Smrj 			 * buffer.
3498509Smrj 			 */
3499509Smrj 			pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3500509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3501509Smrj 				dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3502509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3503509Smrj 			} else if (pplist != NULL) {
3504509Smrj 				dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3505509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = NULL;
3506509Smrj 			} else {
3507509Smrj 				dma->dp_pgmap[pidx].pm_pp = NULL;
3508509Smrj 				dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3509509Smrj 				    (((uintptr_t)
3510509Smrj 				    dmar_object->dmao_obj.virt_obj.v_addr +
3511509Smrj 				    cur_offset) & MMU_PAGEMASK);
3512509Smrj 			}
3513509Smrj 
3514509Smrj 			/*
3515509Smrj 			 * save away the page aligned virtual address which was
3516509Smrj 			 * allocated from the kernel heap arena (taking into
3517509Smrj 			 * account if we need more copy buffer than we alloced
3518509Smrj 			 * and use multiple windows to handle this, i.e. &,%).
3519509Smrj 			 * NOTE: there isn't and physical memory backing up this
3520509Smrj 			 * virtual address space currently.
3521509Smrj 			 */
3522509Smrj 			if ((*copybuf_used + MMU_PAGESIZE) <=
3523509Smrj 			    dma->dp_copybuf_size) {
3524509Smrj 				dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3525509Smrj 				    (((uintptr_t)dma->dp_kva + *copybuf_used) &
3526509Smrj 				    MMU_PAGEMASK);
3527509Smrj 			} else {
3528509Smrj 				if (copybuf_sz_power_2) {
3529509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3530509Smrj 					    (((uintptr_t)dma->dp_kva +
3531509Smrj 					    (*copybuf_used &
3532509Smrj 					    (dma->dp_copybuf_size - 1))) &
3533509Smrj 					    MMU_PAGEMASK);
3534509Smrj 				} else {
3535509Smrj 					dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3536509Smrj 					    (((uintptr_t)dma->dp_kva +
3537509Smrj 					    (*copybuf_used %
3538509Smrj 					    dma->dp_copybuf_size)) &
3539509Smrj 					    MMU_PAGEMASK);
3540509Smrj 				}
3541509Smrj 			}
3542509Smrj 
3543509Smrj 			/*
3544509Smrj 			 * if we haven't used up the available copy buffer yet,
3545509Smrj 			 * map the kva to the physical page.
3546509Smrj 			 */
3547509Smrj 			if (!dma->dp_cb_remaping && ((*copybuf_used +
3548509Smrj 			    MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3549509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3550509Smrj 				if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3551509Smrj 					i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3552509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3553509Smrj 				} else {
3554509Smrj 					i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3555509Smrj 					    sinfo->si_asp,
3556509Smrj 					    dma->dp_pgmap[pidx].pm_kaddr);
3557509Smrj 				}
3558509Smrj 
3559509Smrj 			/*
3560509Smrj 			 * we've used up the available copy buffer, this page
3561509Smrj 			 * will have to be mapped during rootnex_dma_win() when
3562509Smrj 			 * we switch to a new window which requires a re-map
3563509Smrj 			 * the copy buffer. (32-bit kernel only)
3564509Smrj 			 */
3565509Smrj 			} else {
3566509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3567509Smrj 			}
3568509Smrj #endif
3569509Smrj 			/* go to the next page_t */
3570509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3571509Smrj 				*cur_pp = (*cur_pp)->p_next;
3572509Smrj 			}
35730Sstevel@tonic-gate 		}
3574509Smrj 
3575509Smrj 		/* add to the copy buffer count */
3576509Smrj 		*copybuf_used += MMU_PAGESIZE;
3577509Smrj 
3578509Smrj 	/*
3579509Smrj 	 * This cookie doesn't use the copy buffer. Walk through the pages this
3580509Smrj 	 * cookie occupies to reflect this.
3581509Smrj 	 */
3582509Smrj 	} else {
3583509Smrj 		/*
3584509Smrj 		 * figure out how many pages the cookie occupies. We need to
3585509Smrj 		 * use the original page offset of the buffer and the cookies
3586509Smrj 		 * offset in the buffer to do this.
3587509Smrj 		 */
3588509Smrj 		poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
3589509Smrj 		pcnt = mmu_btopr(cookie->dmac_size + poff);
3590509Smrj 
3591509Smrj 		while (pcnt > 0) {
3592509Smrj #if !defined(__amd64)
3593509Smrj 			/*
3594509Smrj 			 * the 32-bit kernel doesn't have seg kpm, so we need
3595509Smrj 			 * to map in the driver buffer (if it didn't come down
3596509Smrj 			 * with a kernel VA) on the fly. Since this page doesn't
3597509Smrj 			 * use the copy buffer, it's not, or will it ever, have
3598509Smrj 			 * to be mapped in.
3599509Smrj 			 */
3600509Smrj 			dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3601509Smrj #endif
3602509Smrj 			dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
3603509Smrj 
3604509Smrj 			/*
3605509Smrj 			 * we need to update pidx and cur_pp or we'll loose
3606509Smrj 			 * track of where we are.
3607509Smrj 			 */
3608509Smrj 			if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3609509Smrj 				*cur_pp = (*cur_pp)->p_next;
3610509Smrj 			}
3611509Smrj 			pidx++;
3612509Smrj 			pcnt--;
3613509Smrj 		}
3614509Smrj 	}
3615509Smrj }
3616509Smrj 
3617509Smrj 
3618509Smrj /*
3619509Smrj  * rootnex_sgllen_window_boundary()
3620509Smrj  *    Called in the bind slow path when the next cookie causes us to exceed (in
3621509Smrj  *    this case == since we start at 0 and sgllen starts at 1) the maximum sgl
3622509Smrj  *    length supported by the DMA H/W.
3623509Smrj  */
3624509Smrj static int
3625509Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3626509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
3627509Smrj     off_t cur_offset)
3628509Smrj {
3629509Smrj 	off_t new_offset;
3630509Smrj 	size_t trim_sz;
3631509Smrj 	off_t coffset;
3632509Smrj 
3633509Smrj 
3634509Smrj 	/*
3635509Smrj 	 * if we know we'll never have to trim, it's pretty easy. Just move to
3636509Smrj 	 * the next window and init it. We're done.
3637509Smrj 	 */
3638509Smrj 	if (!dma->dp_trim_required) {
3639509Smrj 		(*windowp)++;
3640509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3641509Smrj 		(*windowp)->wd_cookie_cnt++;
3642509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3643509Smrj 		return (DDI_SUCCESS);
3644509Smrj 	}
3645509Smrj 
3646509Smrj 	/* figure out how much we need to trim from the window */
3647509Smrj 	ASSERT(attr->dma_attr_granular != 0);
3648509Smrj 	if (dma->dp_granularity_power_2) {
3649509Smrj 		trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
3650509Smrj 	} else {
3651509Smrj 		trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
3652509Smrj 	}
3653509Smrj 
3654509Smrj 	/* The window's a whole multiple of granularity. We're done */
3655509Smrj 	if (trim_sz == 0) {
3656509Smrj 		(*windowp)++;
3657509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3658509Smrj 		(*windowp)->wd_cookie_cnt++;
3659509Smrj 		(*windowp)->wd_size = cookie->dmac_size;
3660509Smrj 		return (DDI_SUCCESS);
3661509Smrj 	}
3662509Smrj 
3663509Smrj 	/*
3664509Smrj 	 * The window's not a whole multiple of granularity, since we know this
3665509Smrj 	 * is due to the sgllen, we need to go back to the last cookie and trim
3666509Smrj 	 * that one, add the left over part of the old cookie into the new
3667509Smrj 	 * window, and then add in the new cookie into the new window.
3668509Smrj 	 */
3669509Smrj 
3670509Smrj 	/*
3671509Smrj 	 * make sure the driver isn't making us do something bad... Trimming and
3672509Smrj 	 * sgllen == 1 don't go together.
3673509Smrj 	 */
3674509Smrj 	if (attr->dma_attr_sgllen == 1) {
3675509Smrj 		return (DDI_DMA_NOMAPPING);
3676509Smrj 	}
3677509Smrj 
3678509Smrj 	/*
3679509Smrj 	 * first, setup the current window to account for the trim. Need to go
3680509Smrj 	 * back to the last cookie for this.
3681509Smrj 	 */
3682509Smrj 	cookie--;
3683509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3684509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
36855084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3686509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3687509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3688509Smrj 	(*windowp)->wd_size -= trim_sz;
3689509Smrj 
3690509Smrj 	/* save the buffer offsets for the next window */
3691509Smrj 	coffset = cookie->dmac_size - trim_sz;
3692509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3693509Smrj 
3694509Smrj 	/*
3695509Smrj 	 * set this now in case this is the first window. all other cases are
3696509Smrj 	 * set in dma_win()
3697509Smrj 	 */
3698509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3699509Smrj 
3700509Smrj 	/*
3701509Smrj 	 * initialize the next window using what's left over in the previous
3702509Smrj 	 * cookie.
3703509Smrj 	 */
3704509Smrj 	(*windowp)++;
3705509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3706509Smrj 	(*windowp)->wd_cookie_cnt++;
3707509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
37085084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3709509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3710509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3711509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3712509Smrj 	}
3713509Smrj 
3714509Smrj 	/*
3715509Smrj 	 * now go back to the current cookie and add it to the new window. set
3716509Smrj 	 * the new window size to the what was left over from the previous
3717509Smrj 	 * cookie and what's in the current cookie.
3718509Smrj 	 */
3719509Smrj 	cookie++;
3720509Smrj 	(*windowp)->wd_cookie_cnt++;
3721509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
3722509Smrj 
3723509Smrj 	/*
3724509Smrj 	 * trim plus the next cookie could put us over maxxfer (a cookie can be
3725509Smrj 	 * a max size of maxxfer). Handle that case.
3726509Smrj 	 */
3727509Smrj 	if ((*windowp)->wd_size > dma->dp_maxxfer) {
3728509Smrj 		/*
3729509Smrj 		 * maxxfer is already a whole multiple of granularity, and this
3730509Smrj 		 * trim will be <= the previous trim (since a cookie can't be
3731509Smrj 		 * larger than maxxfer). Make things simple here.
3732509Smrj 		 */
3733509Smrj 		trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
3734509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3735509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
37365084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3737509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3738509Smrj 		(*windowp)->wd_size -= trim_sz;
3739509Smrj 		ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
3740509Smrj 
3741509Smrj 		/* save the buffer offsets for the next window */
3742509Smrj 		coffset = cookie->dmac_size - trim_sz;
3743509Smrj 		new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3744509Smrj 
3745509Smrj 		/* setup the next window */
3746509Smrj 		(*windowp)++;
3747509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3748509Smrj 		(*windowp)->wd_cookie_cnt++;
3749509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
37505084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
3751509Smrj 		    coffset;
3752509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
3753509Smrj 	}
3754509Smrj 
3755509Smrj 	return (DDI_SUCCESS);
3756509Smrj }
3757509Smrj 
3758509Smrj 
3759509Smrj /*
3760509Smrj  * rootnex_copybuf_window_boundary()
3761509Smrj  *    Called in bind slowpath when we get to a window boundary because we used
3762509Smrj  *    up all the copy buffer that we have.
3763509Smrj  */
3764509Smrj static int
3765509Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3766509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
3767509Smrj     size_t *copybuf_used)
3768509Smrj {
3769509Smrj 	rootnex_sglinfo_t *sinfo;
3770509Smrj 	off_t new_offset;
3771509Smrj 	size_t trim_sz;
37725084Sjohnlev 	paddr_t paddr;
3773509Smrj 	off_t coffset;
3774509Smrj 	uint_t pidx;
3775509Smrj 	off_t poff;
3776509Smrj 
3777509Smrj 
3778509Smrj 	sinfo = &dma->dp_sglinfo;
3779509Smrj 
3780509Smrj 	/*
3781509Smrj 	 * the copy buffer should be a whole multiple of page size. We know that
3782509Smrj 	 * this cookie is <= MMU_PAGESIZE.
3783509Smrj 	 */
3784509Smrj 	ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
3785509Smrj 
3786509Smrj 	/*
3787509Smrj 	 * from now on, all new windows in this bind need to be re-mapped during
3788509Smrj 	 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
3789509Smrj 	 * space...
3790509Smrj 	 */
3791509Smrj #if !defined(__amd64)
3792509Smrj 	dma->dp_cb_remaping = B_TRUE;
3793509Smrj #endif
3794509Smrj 
3795509Smrj 	/* reset copybuf used */
3796509Smrj 	*copybuf_used = 0;
3797509Smrj 
3798509Smrj 	/*
3799509Smrj 	 * if we don't have to trim (since granularity is set to 1), go to the
3800509Smrj 	 * next window and add the current cookie to it. We know the current
3801509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3802509Smrj 	 */
3803509Smrj 	if (!dma->dp_trim_required) {
3804509Smrj 		(*windowp)++;
3805509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3806509Smrj 
3807509Smrj 		/* Add this cookie to the new window */
3808509Smrj 		(*windowp)->wd_cookie_cnt++;
3809509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3810509Smrj 		*copybuf_used += MMU_PAGESIZE;
3811509Smrj 		return (DDI_SUCCESS);
3812509Smrj 	}
3813509Smrj 
3814509Smrj 	/*
3815509Smrj 	 * *** may need to trim, figure it out.
3816509Smrj 	 */
3817509Smrj 
3818509Smrj 	/* figure out how much we need to trim from the window */
3819509Smrj 	if (dma->dp_granularity_power_2) {
3820509Smrj 		trim_sz = (*windowp)->wd_size &
3821509Smrj 		    (hp->dmai_attr.dma_attr_granular - 1);
3822509Smrj 	} else {
3823509Smrj 		trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
3824509Smrj 	}
3825509Smrj 
3826509Smrj 	/*
3827509Smrj 	 * if the window's a whole multiple of granularity, go to the next
3828509Smrj 	 * window, init it, then add in the current cookie. We know the current
3829509Smrj 	 * cookie uses the copy buffer since we're in this code path.
3830509Smrj 	 */
3831509Smrj 	if (trim_sz == 0) {
3832509Smrj 		(*windowp)++;
3833509Smrj 		rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
3834509Smrj 
3835509Smrj 		/* Add this cookie to the new window */
3836509Smrj 		(*windowp)->wd_cookie_cnt++;
3837509Smrj 		(*windowp)->wd_size += cookie->dmac_size;
3838509Smrj 		*copybuf_used += MMU_PAGESIZE;
3839509Smrj 		return (DDI_SUCCESS);
3840509Smrj 	}
3841509Smrj 
3842509Smrj 	/*
3843509Smrj 	 * *** We figured it out, we definitly need to trim
3844509Smrj 	 */
3845509Smrj 
3846509Smrj 	/*
3847509Smrj 	 * make sure the driver isn't making us do something bad...
3848509Smrj 	 * Trimming and sgllen == 1 don't go together.
3849509Smrj 	 */
3850509Smrj 	if (hp->dmai_attr.dma_attr_sgllen == 1) {
3851509Smrj 		return (DDI_DMA_NOMAPPING);
3852509Smrj 	}
3853509Smrj 
3854509Smrj 	/*
3855509Smrj 	 * first, setup the current window to account for the trim. Need to go
3856509Smrj 	 * back to the last cookie for this. Some of the last cookie will be in
3857509Smrj 	 * the current window, and some of the last cookie will be in the new
3858509Smrj 	 * window. All of the current cookie will be in the new window.
3859509Smrj 	 */
3860509Smrj 	cookie--;
3861509Smrj 	(*windowp)->wd_trim.tr_trim_last = B_TRUE;
3862509Smrj 	(*windowp)->wd_trim.tr_last_cookie = cookie;
38635084Sjohnlev 	(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
3864509Smrj 	ASSERT(cookie->dmac_size > trim_sz);
3865509Smrj 	(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
3866509Smrj 	(*windowp)->wd_size -= trim_sz;
3867509Smrj 
3868509Smrj 	/*
3869509Smrj 	 * we're trimming the last cookie (not the current cookie). So that
3870509Smrj 	 * last cookie may have or may not have been using the copy buffer (
3871509Smrj 	 * we know the cookie passed in uses the copy buffer since we're in
3872509Smrj 	 * this code path).
3873509Smrj 	 *
3874509Smrj 	 * If the last cookie doesn't use the copy buffer, nothing special to
3875509Smrj 	 * do. However, if it does uses the copy buffer, it will be both the
3876509Smrj 	 * last page in the current window and the first page in the next
3877509Smrj 	 * window. Since we are reusing the copy buffer (and KVA space on the
3878509Smrj 	 * 32-bit kernel), this page will use the end of the copy buffer in the
3879509Smrj 	 * current window, and the start of the copy buffer in the next window.
3880509Smrj 	 * Track that info... The cookie physical address was already set to
3881509Smrj 	 * the copy buffer physical address in setup_cookie..
3882509Smrj 	 */
3883509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3884509Smrj 		pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
3885509Smrj 		    (*windowp)->wd_size) >> MMU_PAGESHIFT;
3886509Smrj 		(*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
3887509Smrj 		(*windowp)->wd_trim.tr_last_pidx = pidx;
3888509Smrj 		(*windowp)->wd_trim.tr_last_cbaddr =
3889509Smrj 		    dma->dp_pgmap[pidx].pm_cbaddr;
3890509Smrj #if !defined(__amd64)
3891509Smrj 		(*windowp)->wd_trim.tr_last_kaddr =
3892509Smrj 		    dma->dp_pgmap[pidx].pm_kaddr;
3893509Smrj #endif
3894509Smrj 	}
3895509Smrj 
3896509Smrj 	/* save the buffer offsets for the next window */
3897509Smrj 	coffset = cookie->dmac_size - trim_sz;
3898509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
3899509Smrj 
3900509Smrj 	/*
3901509Smrj 	 * set this now in case this is the first window. all other cases are
3902509Smrj 	 * set in dma_win()
3903509Smrj 	 */
3904509Smrj 	cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
3905509Smrj 
3906509Smrj 	/*
3907509Smrj 	 * initialize the next window using what's left over in the previous
3908509Smrj 	 * cookie.
3909509Smrj 	 */
3910509Smrj 	(*windowp)++;
3911509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
3912509Smrj 	(*windowp)->wd_cookie_cnt++;
3913509Smrj 	(*windowp)->wd_trim.tr_trim_first = B_TRUE;
39145084Sjohnlev 	(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
3915509Smrj 	(*windowp)->wd_trim.tr_first_size = trim_sz;
3916509Smrj 
3917509Smrj 	/*
3918509Smrj 	 * again, we're tracking if the last cookie uses the copy buffer.
3919509Smrj 	 * read the comment above for more info on why we need to track
3920509Smrj 	 * additional state.
3921509Smrj 	 *
3922509Smrj 	 * For the first cookie in the new window, we need reset the physical
3923509Smrj 	 * address to DMA into to the start of the copy buffer plus any
3924509Smrj 	 * initial page offset which may be present.
3925509Smrj 	 */
3926509Smrj 	if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3927509Smrj 		(*windowp)->wd_dosync = B_TRUE;
3928509Smrj 		(*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
3929509Smrj 		(*windowp)->wd_trim.tr_first_pidx = pidx;
3930509Smrj 		(*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
3931509Smrj 		poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
39325084Sjohnlev 
39335084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
39345084Sjohnlev 		    poff;
39355084Sjohnlev #ifdef __xpv
39365084Sjohnlev 		/*
39375084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
39385084Sjohnlev 		 * the cookies with MAs instead of PAs.
39395084Sjohnlev 		 */
39405084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr =
39415084Sjohnlev 		    ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
39425084Sjohnlev #else
39435084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = paddr;
39445084Sjohnlev #endif
39455084Sjohnlev 
3946509Smrj #if !defined(__amd64)
3947509Smrj 		(*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
3948509Smrj #endif
3949509Smrj 		/* account for the cookie copybuf usage in the new window */
3950509Smrj 		*copybuf_used += MMU_PAGESIZE;
3951509Smrj 
3952509Smrj 		/*
3953509Smrj 		 * every piece of code has to have a hack, and here is this
3954509Smrj 		 * ones :-)
3955509Smrj 		 *
3956509Smrj 		 * There is a complex interaction between setup_cookie and the
3957509Smrj 		 * copybuf window boundary. The complexity had to be in either
3958509Smrj 		 * the maxxfer window, or the copybuf window, and I chose the
3959509Smrj 		 * copybuf code.
3960509Smrj 		 *
3961509Smrj 		 * So in this code path, we have taken the last cookie,
3962509Smrj 		 * virtually broken it in half due to the trim, and it happens
3963509Smrj 		 * to use the copybuf which further complicates life. At the
3964509Smrj 		 * same time, we have already setup the current cookie, which
3965509Smrj 		 * is now wrong. More background info: the current cookie uses
3966509Smrj 		 * the copybuf, so it is only a page long max. So we need to
3967509Smrj 		 * fix the current cookies copy buffer address, physical
3968509Smrj 		 * address, and kva for the 32-bit kernel. We due this by
3969509Smrj 		 * bumping them by page size (of course, we can't due this on
3970509Smrj 		 * the physical address since the copy buffer may not be
3971509Smrj 		 * physically contiguous).
3972509Smrj 		 */
3973509Smrj 		cookie++;
3974509Smrj 		dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
39755084Sjohnlev 		poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
39765084Sjohnlev 
39775084Sjohnlev 		paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3978509Smrj 		    dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
39795084Sjohnlev #ifdef __xpv
39805084Sjohnlev 		/*
39815084Sjohnlev 		 * If we're dom0, we're using a real device so we need to load
39825084Sjohnlev 		 * the cookies with MAs instead of PAs.
39835084Sjohnlev 		 */
39845084Sjohnlev 		cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr);
39855084Sjohnlev #else
39865084Sjohnlev 		cookie->dmac_laddress = paddr;
39875084Sjohnlev #endif
39885084Sjohnlev 
3989509Smrj #if !defined(__amd64)
3990509Smrj 		ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
3991509Smrj 		dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
3992509Smrj #endif
3993509Smrj 	} else {
3994509Smrj 		/* go back to the current cookie */
3995509Smrj 		cookie++;
3996509Smrj 	}
3997509Smrj 
3998509Smrj 	/*
3999509Smrj 	 * add the current cookie to the new window. set the new window size to
4000509Smrj 	 * the what was left over from the previous cookie and what's in the
4001509Smrj 	 * current cookie.
4002509Smrj 	 */
4003509Smrj 	(*windowp)->wd_cookie_cnt++;
4004509Smrj 	(*windowp)->wd_size = trim_sz + cookie->dmac_size;
4005509Smrj 	ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
4006509Smrj 
4007509Smrj 	/*
4008509Smrj 	 * we know that the cookie passed in always uses the copy buffer. We
4009509Smrj 	 * wouldn't be here if it didn't.
4010509Smrj 	 */
4011509Smrj 	*copybuf_used += MMU_PAGESIZE;
4012509Smrj 
4013509Smrj 	return (DDI_SUCCESS);
4014509Smrj }
4015509Smrj 
4016509Smrj 
4017509Smrj /*
4018509Smrj  * rootnex_maxxfer_window_boundary()
4019509Smrj  *    Called in bind slowpath when we get to a window boundary because we will
4020509Smrj  *    go over maxxfer.
4021509Smrj  */
4022509Smrj static int
4023509Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4024509Smrj     rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
4025509Smrj {
4026509Smrj 	size_t dmac_size;
4027509Smrj 	off_t new_offset;
4028509Smrj 	size_t trim_sz;
4029509Smrj 	off_t coffset;
4030509Smrj 
4031509Smrj 
4032509Smrj 	/*
4033509Smrj 	 * calculate how much we have to trim off of the current cookie to equal
4034509Smrj 	 * maxxfer. We don't have to account for granularity here since our
4035509Smrj 	 * maxxfer already takes that into account.
4036509Smrj 	 */
4037509Smrj 	trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
4038509Smrj 	ASSERT(trim_sz <= cookie->dmac_size);
4039509Smrj 	ASSERT(trim_sz <= dma->dp_maxxfer);
4040509Smrj 
4041509Smrj 	/* save cookie size since we need it later and we might change it */
4042509Smrj 	dmac_size = cookie->dmac_size;
4043509Smrj 
4044509Smrj 	/*
4045509Smrj 	 * if we're not trimming the entire cookie, setup the current window to
4046509Smrj 	 * account for the trim.
4047509Smrj 	 */
4048509Smrj 	if (trim_sz < cookie->dmac_size) {
4049509Smrj 		(*windowp)->wd_cookie_cnt++;
4050509Smrj 		(*windowp)->wd_trim.tr_trim_last = B_TRUE;
4051509Smrj 		(*windowp)->wd_trim.tr_last_cookie = cookie;
40525084Sjohnlev 		(*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4053509Smrj 		(*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4054509Smrj 		(*windowp)->wd_size = dma->dp_maxxfer;
4055509Smrj 
4056509Smrj 		/*
4057509Smrj 		 * set the adjusted cookie size now in case this is the first
4058509Smrj 		 * window. All other windows are taken care of in get win
4059509Smrj 		 */
4060509Smrj 		cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4061509Smrj 	}
4062509Smrj 
4063509Smrj 	/*
4064509Smrj 	 * coffset is the current offset within the cookie, new_offset is the
4065509Smrj 	 * current offset with the entire buffer.
4066509Smrj 	 */
4067509Smrj 	coffset = dmac_size - trim_sz;
4068509Smrj 	new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4069509Smrj 
4070509Smrj 	/* initialize the next window */
4071509Smrj 	(*windowp)++;
4072509Smrj 	rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4073509Smrj 	(*windowp)->wd_cookie_cnt++;
4074509Smrj 	(*windowp)->wd_size = trim_sz;
4075509Smrj 	if (trim_sz < dmac_size) {
4076509Smrj 		(*windowp)->wd_trim.tr_trim_first = B_TRUE;
40775084Sjohnlev 		(*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4078509Smrj 		    coffset;
4079509Smrj 		(*windowp)->wd_trim.tr_first_size = trim_sz;
4080509Smrj 	}
4081509Smrj 
4082509Smrj 	return (DDI_SUCCESS);
4083509Smrj }
4084509Smrj 
4085509Smrj 
4086509Smrj /*ARGSUSED*/
4087509Smrj static int
40887613SVikram.Hegde@Sun.COM rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4089509Smrj     off_t off, size_t len, uint_t cache_flags)
4090509Smrj {
4091509Smrj 	rootnex_sglinfo_t *sinfo;
4092509Smrj 	rootnex_pgmap_t *cbpage;
4093509Smrj 	rootnex_window_t *win;
4094509Smrj 	ddi_dma_impl_t *hp;
4095509Smrj 	rootnex_dma_t *dma;
4096509Smrj 	caddr_t fromaddr;
4097509Smrj 	caddr_t toaddr;
4098509Smrj 	uint_t psize;
4099509Smrj 	off_t offset;
4100509Smrj 	uint_t pidx;
4101509Smrj 	size_t size;
4102509Smrj 	off_t poff;
4103509Smrj 	int e;
4104509Smrj 
4105509Smrj 
4106509Smrj 	hp = (ddi_dma_impl_t *)handle;
4107509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4108509Smrj 	sinfo = &dma->dp_sglinfo;
4109509Smrj 
4110509Smrj 	/*
4111509Smrj 	 * if we don't have any windows, we don't need to sync. A copybuf
4112509Smrj 	 * will cause us to have at least one window.
4113509Smrj 	 */
4114509Smrj 	if (dma->dp_window == NULL) {
4115509Smrj 		return (DDI_SUCCESS);
4116509Smrj 	}
4117509Smrj 
4118509Smrj 	/* This window may not need to be sync'd */
4119509Smrj 	win = &dma->dp_window[dma->dp_current_win];
4120509Smrj 	if (!win->wd_dosync) {
4121509Smrj 		return (DDI_SUCCESS);
4122509Smrj 	}
4123509Smrj 
4124509Smrj 	/* handle off and len special cases */
4125509Smrj 	if ((off == 0) || (rootnex_sync_ignore_params)) {
4126509Smrj 		offset = win->wd_offset;
4127509Smrj 	} else {
4128509Smrj 		offset = off;
4129509Smrj 	}
4130509Smrj 	if ((len == 0) || (rootnex_sync_ignore_params)) {
4131509Smrj 		size = win->wd_size;
4132509Smrj 	} else {
4133509Smrj 		size = len;
4134509Smrj 	}
4135509Smrj 
4136509Smrj 	/* check the sync args to make sure they make a little sense */
4137509Smrj 	if (rootnex_sync_check_parms) {
4138509Smrj 		e = rootnex_valid_sync_parms(hp, win, offset, size,
4139509Smrj 		    cache_flags);
4140509Smrj 		if (e != DDI_SUCCESS) {
4141509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4142509Smrj 			return (DDI_FAILURE);
4143509Smrj 		}
4144509Smrj 	}
4145509Smrj 
4146509Smrj 	/*
4147509Smrj 	 * special case the first page to handle the offset into the page. The
4148509Smrj 	 * offset to the current page for our buffer is the offset into the
4149509Smrj 	 * first page of the buffer plus our current offset into the buffer
4150509Smrj 	 * itself, masked of course.
4151509Smrj 	 */
4152509Smrj 	poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4153509Smrj 	psize = MIN((MMU_PAGESIZE - poff), size);
4154509Smrj 
4155509Smrj 	/* go through all the pages that we want to sync */
4156509Smrj 	while (size > 0) {
4157509Smrj 		/*
4158509Smrj 		 * Calculate the page index relative to the start of the buffer.
4159509Smrj 		 * The index to the current page for our buffer is the offset
4160509Smrj 		 * into the first page of the buffer plus our current offset
4161509Smrj 		 * into the buffer itself, shifted of course...
4162509Smrj 		 */
4163509Smrj 		pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4164509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4165509Smrj 
4166509Smrj 		/*
4167509Smrj 		 * if this page uses the copy buffer, we need to sync it,
4168509Smrj 		 * otherwise, go on to the next page.
4169509Smrj 		 */
4170509Smrj 		cbpage = &dma->dp_pgmap[pidx];
4171509Smrj 		ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4172509Smrj 		    (cbpage->pm_uses_copybuf == B_FALSE));
4173509Smrj 		if (cbpage->pm_uses_copybuf) {
4174509Smrj 			/* cbaddr and kaddr should be page aligned */
4175509Smrj 			ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4176509Smrj 			    MMU_PAGEOFFSET) == 0);
4177509Smrj 			ASSERT(((uintptr_t)cbpage->pm_kaddr &
4178509Smrj 			    MMU_PAGEOFFSET) == 0);
4179509Smrj 
4180509Smrj 			/*
4181509Smrj 			 * if we're copying for the device, we are going to
4182509Smrj 			 * copy from the drivers buffer and to the rootnex
4183509Smrj 			 * allocated copy buffer.
4184509Smrj 			 */
4185509Smrj 			if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4186509Smrj 				fromaddr = cbpage->pm_kaddr + poff;
4187509Smrj 				toaddr = cbpage->pm_cbaddr + poff;
4188509Smrj 				DTRACE_PROBE2(rootnex__sync__dev,
4189509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4190509Smrj 
4191509Smrj 			/*
4192509Smrj 			 * if we're copying for the cpu/kernel, we are going to
4193509Smrj 			 * copy from the rootnex allocated copy buffer to the
4194509Smrj 			 * drivers buffer.
4195509Smrj 			 */
4196509Smrj 			} else {
4197509Smrj 				fromaddr = cbpage->pm_cbaddr + poff;
4198509Smrj 				toaddr = cbpage->pm_kaddr + poff;
4199509Smrj 				DTRACE_PROBE2(rootnex__sync__cpu,
4200509Smrj 				    dev_info_t *, dma->dp_dip, size_t, psize);
4201509Smrj 			}
4202509Smrj 
4203509Smrj 			bcopy(fromaddr, toaddr, psize);
4204509Smrj 		}
4205509Smrj 
4206509Smrj 		/*
4207509Smrj 		 * decrement size until we're done, update our offset into the
4208509Smrj 		 * buffer, and get the next page size.
4209509Smrj 		 */
4210509Smrj 		size -= psize;
4211509Smrj 		offset += psize;
4212509Smrj 		psize = MIN(MMU_PAGESIZE, size);
4213509Smrj 
4214509Smrj 		/* page offset is zero for the rest of this loop */
4215509Smrj 		poff = 0;
4216509Smrj 	}
4217509Smrj 
4218509Smrj 	return (DDI_SUCCESS);
4219509Smrj }
4220509Smrj 
42217613SVikram.Hegde@Sun.COM /*
42227613SVikram.Hegde@Sun.COM  * rootnex_dma_sync()
42237613SVikram.Hegde@Sun.COM  *    called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
42247613SVikram.Hegde@Sun.COM  *    We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
42257613SVikram.Hegde@Sun.COM  *    is set, ddi_dma_sync() returns immediately passing back success.
42267613SVikram.Hegde@Sun.COM  */
42277613SVikram.Hegde@Sun.COM /*ARGSUSED*/
42287613SVikram.Hegde@Sun.COM static int
42297613SVikram.Hegde@Sun.COM rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
42307613SVikram.Hegde@Sun.COM     off_t off, size_t len, uint_t cache_flags)
42317613SVikram.Hegde@Sun.COM {
42327613SVikram.Hegde@Sun.COM #if !defined(__xpv)
423310216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
42347613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
42357613SVikram.Hegde@Sun.COM 		    cache_flags));
42367613SVikram.Hegde@Sun.COM 	}
42377613SVikram.Hegde@Sun.COM #endif
42387613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_sync(dip, rdip, handle, off, len,
42397613SVikram.Hegde@Sun.COM 	    cache_flags));
42407613SVikram.Hegde@Sun.COM }
4241509Smrj 
4242509Smrj /*
4243509Smrj  * rootnex_valid_sync_parms()
4244509Smrj  *    checks the parameters passed to sync to verify they are correct.
4245509Smrj  */
4246509Smrj static int
4247509Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4248509Smrj     off_t offset, size_t size, uint_t cache_flags)
4249509Smrj {
4250509Smrj 	off_t woffset;
4251509Smrj 
4252509Smrj 
4253509Smrj 	/*
4254509Smrj 	 * the first part of the test to make sure the offset passed in is
4255509Smrj 	 * within the window.
4256509Smrj 	 */
4257509Smrj 	if (offset < win->wd_offset) {
4258509Smrj 		return (DDI_FAILURE);
4259509Smrj 	}
4260509Smrj 
4261509Smrj 	/*
4262509Smrj 	 * second and last part of the test to make sure the offset and length
4263509Smrj 	 * passed in is within the window.
4264509Smrj 	 */
4265509Smrj 	woffset = offset - win->wd_offset;
4266509Smrj 	if ((woffset + size) > win->wd_size) {
4267509Smrj 		return (DDI_FAILURE);
4268509Smrj 	}
4269509Smrj 
4270509Smrj 	/*
4271509Smrj 	 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4272509Smrj 	 * be set too.
4273509Smrj 	 */
4274509Smrj 	if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4275509Smrj 	    (hp->dmai_rflags & DDI_DMA_WRITE)) {
4276509Smrj 		return (DDI_SUCCESS);
4277509Smrj 	}
4278509Smrj 
4279509Smrj 	/*
4280509Smrj 	 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4281509Smrj 	 * should be set. Also DDI_DMA_READ should be set in the flags.
4282509Smrj 	 */
4283509Smrj 	if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4284509Smrj 	    (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4285509Smrj 	    (hp->dmai_rflags & DDI_DMA_READ)) {
4286509Smrj 		return (DDI_SUCCESS);
4287509Smrj 	}
4288509Smrj 
4289509Smrj 	return (DDI_FAILURE);
4290509Smrj }
4291509Smrj 
4292509Smrj 
4293509Smrj /*ARGSUSED*/
4294509Smrj static int
42957613SVikram.Hegde@Sun.COM rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4296509Smrj     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4297509Smrj     uint_t *ccountp)
4298509Smrj {
4299509Smrj 	rootnex_window_t *window;
4300509Smrj 	rootnex_trim_t *trim;
4301509Smrj 	ddi_dma_impl_t *hp;
4302509Smrj 	rootnex_dma_t *dma;
4303509Smrj #if !defined(__amd64)
4304509Smrj 	rootnex_sglinfo_t *sinfo;
4305509Smrj 	rootnex_pgmap_t *pmap;
4306509Smrj 	uint_t pidx;
4307509Smrj 	uint_t pcnt;
4308509Smrj 	off_t poff;
4309509Smrj 	int i;
4310509Smrj #endif
4311509Smrj 
4312509Smrj 
4313509Smrj 	hp = (ddi_dma_impl_t *)handle;
4314509Smrj 	dma = (rootnex_dma_t *)hp->dmai_private;
4315509Smrj #if !defined(__amd64)
4316509Smrj 	sinfo = &dma->dp_sglinfo;
4317509Smrj #endif
4318509Smrj 
4319509Smrj 	/* If we try and get a window which doesn't exist, return failure */
4320509Smrj 	if (win >= hp->dmai_nwin) {
4321509Smrj 		ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4322509Smrj 		return (DDI_FAILURE);
4323509Smrj 	}
4324509Smrj 
4325509Smrj 	/*
4326509Smrj 	 * if we don't have any windows, and they're asking for the first
4327509Smrj 	 * window, setup the cookie pointer to the first cookie in the bind.
4328509Smrj 	 * setup our return values, then increment the cookie since we return
4329509Smrj 	 * the first cookie on the stack.
4330509Smrj 	 */
4331509Smrj 	if (dma->dp_window == NULL) {
4332509Smrj 		if (win != 0) {
4333509Smrj 			ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4334509Smrj 			return (DDI_FAILURE);
4335509Smrj 		}
4336509Smrj 		hp->dmai_cookie = dma->dp_cookies;
4337509Smrj 		*offp = 0;
4338509Smrj 		*lenp = dma->dp_dma.dmao_size;
4339509Smrj 		*ccountp = dma->dp_sglinfo.si_sgl_size;
4340509Smrj 		*cookiep = hp->dmai_cookie[0];
4341509Smrj 		hp->dmai_cookie++;
4342509Smrj 		return (DDI_SUCCESS);
4343509Smrj 	}
4344509Smrj 
4345509Smrj 	/* sync the old window before moving on to the new one */
4346509Smrj 	window = &dma->dp_window[dma->dp_current_win];
4347509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
43488215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4349509Smrj 		    DDI_DMA_SYNC_FORCPU);
4350509Smrj 	}
4351509Smrj 
4352509Smrj #if !defined(__amd64)
4353509Smrj 	/*
4354509Smrj 	 * before we move to the next window, if we need to re-map, unmap all
4355509Smrj 	 * the pages in this window.
4356509Smrj 	 */
4357509Smrj 	if (dma->dp_cb_remaping) {
4358509Smrj 		/*
4359509Smrj 		 * If we switch to this window again, we'll need to map in
4360509Smrj 		 * on the fly next time.
4361509Smrj 		 */
4362509Smrj 		window->wd_remap_copybuf = B_TRUE;
4363509Smrj 
4364509Smrj 		/*
4365509Smrj 		 * calculate the page index into the buffer where this window
4366509Smrj 		 * starts, and the number of pages this window takes up.
4367509Smrj 		 */
4368509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4369509Smrj 		    MMU_PAGESHIFT;
4370509Smrj 		poff = (sinfo->si_buf_offset + window->wd_offset) &
4371509Smrj 		    MMU_PAGEOFFSET;
4372509Smrj 		pcnt = mmu_btopr(window->wd_size + poff);
4373509Smrj 		ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4374509Smrj 
4375509Smrj 		/* unmap pages which are currently mapped in this window */
4376509Smrj 		for (i = 0; i < pcnt; i++) {
4377509Smrj 			if (dma->dp_pgmap[pidx].pm_mapped) {
4378509Smrj 				hat_unload(kas.a_hat,
4379509Smrj 				    dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4380509Smrj 				    HAT_UNLOAD);
4381509Smrj 				dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4382509Smrj 			}
4383509Smrj 			pidx++;
4384509Smrj 		}
4385509Smrj 	}
4386509Smrj #endif
4387509Smrj 
4388509Smrj 	/*
4389509Smrj 	 * Move to the new window.
4390509Smrj 	 * NOTE: current_win must be set for sync to work right
4391509Smrj 	 */
4392509Smrj 	dma->dp_current_win = win;
4393509Smrj 	window = &dma->dp_window[win];
4394509Smrj 
4395509Smrj 	/* if needed, adjust the first and/or last cookies for trim */
4396509Smrj 	trim = &window->wd_trim;
4397509Smrj 	if (trim->tr_trim_first) {
43985084Sjohnlev 		window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4399509Smrj 		window->wd_first_cookie->dmac_size = trim->tr_first_size;
4400509Smrj #if !defined(__amd64)
4401509Smrj 		window->wd_first_cookie->dmac_type =
4402509Smrj 		    (window->wd_first_cookie->dmac_type &
4403509Smrj 		    ROOTNEX_USES_COPYBUF) + window->wd_offset;
4404509Smrj #endif
4405509Smrj 		if (trim->tr_first_copybuf_win) {
4406509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4407509Smrj 			    trim->tr_first_cbaddr;
4408509Smrj #if !defined(__amd64)
4409509Smrj 			dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4410509Smrj 			    trim->tr_first_kaddr;
4411509Smrj #endif
4412509Smrj 		}
4413509Smrj 	}
4414509Smrj 	if (trim->tr_trim_last) {
44155084Sjohnlev 		trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4416509Smrj 		trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4417509Smrj 		if (trim->tr_last_copybuf_win) {
4418509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4419509Smrj 			    trim->tr_last_cbaddr;
4420509Smrj #if !defined(__amd64)
4421509Smrj 			dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4422509Smrj 			    trim->tr_last_kaddr;
4423509Smrj #endif
4424509Smrj 		}
4425509Smrj 	}
4426509Smrj 
4427509Smrj 	/*
4428509Smrj 	 * setup the cookie pointer to the first cookie in the window. setup
4429509Smrj 	 * our return values, then increment the cookie since we return the
4430509Smrj 	 * first cookie on the stack.
4431509Smrj 	 */
4432509Smrj 	hp->dmai_cookie = window->wd_first_cookie;
4433509Smrj 	*offp = window->wd_offset;
4434509Smrj 	*lenp = window->wd_size;
4435509Smrj 	*ccountp = window->wd_cookie_cnt;
4436509Smrj 	*cookiep = hp->dmai_cookie[0];
4437509Smrj 	hp->dmai_cookie++;
4438509Smrj 
4439509Smrj #if !defined(__amd64)
4440509Smrj 	/* re-map copybuf if required for this window */
4441509Smrj 	if (dma->dp_cb_remaping) {
4442509Smrj 		/*
4443509Smrj 		 * calculate the page index into the buffer where this
4444509Smrj 		 * window starts.
4445509Smrj 		 */
4446509Smrj 		pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4447509Smrj 		    MMU_PAGESHIFT;
4448509Smrj 		ASSERT(pidx < sinfo->si_max_pages);
4449509Smrj 
4450509Smrj 		/*
4451509Smrj 		 * the first page can get unmapped if it's shared with the
4452509Smrj 		 * previous window. Even if the rest of this window is already
4453509Smrj 		 * mapped in, we need to still check this one.
4454509Smrj 		 */
4455509Smrj 		pmap = &dma->dp_pgmap[pidx];
4456509Smrj 		if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4457509Smrj 			if (pmap->pm_pp != NULL) {
4458509Smrj 				pmap->pm_mapped = B_TRUE;
4459509Smrj 				i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4460509Smrj 			} else if (pmap->pm_vaddr != NULL) {
4461509Smrj 				pmap->pm_mapped = B_TRUE;
4462509Smrj 				i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4463509Smrj 				    pmap->pm_kaddr);
4464509Smrj 			}
4465509Smrj 		}
4466509Smrj 		pidx++;
4467509Smrj 
4468509Smrj 		/* map in the rest of the pages if required */
4469509Smrj 		if (window->wd_remap_copybuf) {
4470509Smrj 			window->wd_remap_copybuf = B_FALSE;
4471509Smrj 
4472509Smrj 			/* figure out many pages this window takes up */
4473509Smrj 			poff = (sinfo->si_buf_offset + window->wd_offset) &
4474509Smrj 			    MMU_PAGEOFFSET;
4475509Smrj 			pcnt = mmu_btopr(window->wd_size + poff);
4476509Smrj 			ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4477509Smrj 
4478509Smrj 			/* map pages which require it */
4479509Smrj 			for (i = 1; i < pcnt; i++) {
4480509Smrj 				pmap = &dma->dp_pgmap[pidx];
4481509Smrj 				if (pmap->pm_uses_copybuf) {
4482509Smrj 					ASSERT(pmap->pm_mapped == B_FALSE);
4483509Smrj 					if (pmap->pm_pp != NULL) {
4484509Smrj 						pmap->pm_mapped = B_TRUE;
4485509Smrj 						i86_pp_map(pmap->pm_pp,
4486509Smrj 						    pmap->pm_kaddr);
4487509Smrj 					} else if (pmap->pm_vaddr != NULL) {
4488509Smrj 						pmap->pm_mapped = B_TRUE;
4489509Smrj 						i86_va_map(pmap->pm_vaddr,
4490509Smrj 						    sinfo->si_asp,
4491509Smrj 						    pmap->pm_kaddr);
4492509Smrj 					}
4493509Smrj 				}
4494509Smrj 				pidx++;
4495509Smrj 			}
4496509Smrj 		}
4497509Smrj 	}
4498509Smrj #endif
4499509Smrj 
4500509Smrj 	/* if the new window uses the copy buffer, sync it for the device */
4501509Smrj 	if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
45028215SVikram.Hegde@Sun.COM 		(void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4503509Smrj 		    DDI_DMA_SYNC_FORDEV);
4504509Smrj 	}
4505509Smrj 
4506509Smrj 	return (DDI_SUCCESS);
4507509Smrj }
4508509Smrj 
45097613SVikram.Hegde@Sun.COM /*
45107613SVikram.Hegde@Sun.COM  * rootnex_dma_win()
45117613SVikram.Hegde@Sun.COM  *    called from ddi_dma_getwin()
45127613SVikram.Hegde@Sun.COM  */
45137613SVikram.Hegde@Sun.COM /*ARGSUSED*/
45147613SVikram.Hegde@Sun.COM static int
45157613SVikram.Hegde@Sun.COM rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
45167613SVikram.Hegde@Sun.COM     uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
45177613SVikram.Hegde@Sun.COM     uint_t *ccountp)
45187613SVikram.Hegde@Sun.COM {
45197613SVikram.Hegde@Sun.COM #if !defined(__xpv)
452010216SVikram.Hegde@Sun.COM 	if (IOMMU_USED(rdip)) {
45217613SVikram.Hegde@Sun.COM 		return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
45227613SVikram.Hegde@Sun.COM 		    cookiep, ccountp));
45237613SVikram.Hegde@Sun.COM 	}
45247613SVikram.Hegde@Sun.COM #endif
45257613SVikram.Hegde@Sun.COM 
45267613SVikram.Hegde@Sun.COM 	return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
45277613SVikram.Hegde@Sun.COM 	    cookiep, ccountp));
45287613SVikram.Hegde@Sun.COM }
4529509Smrj 
4530509Smrj /*
4531509Smrj  * ************************
4532509Smrj  *  obsoleted dma routines
4533509Smrj  * ************************
4534509Smrj  */
4535509Smrj 
453610216SVikram.Hegde@Sun.COM /*
453710216SVikram.Hegde@Sun.COM  * rootnex_dma_map()
453810216SVikram.Hegde@Sun.COM  *    called from ddi_dma_setup()
453910216SVikram.Hegde@Sun.COM  * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode.
454010216SVikram.Hegde@Sun.COM  */
4541509Smrj /* ARGSUSED */
4542509Smrj static int
454310216SVikram.Hegde@Sun.COM rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip,
45447613SVikram.Hegde@Sun.COM     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
4545509Smrj {
4546509Smrj #if defined(__amd64)
4547509Smrj 	/*
4548509Smrj 	 * this interface is not supported in 64-bit x86 kernel. See comment in
4549509Smrj 	 * rootnex_dma_mctl()
4550509Smrj 	 */
4551509Smrj 	return (DDI_DMA_NORESOURCES);
4552509Smrj 
4553509Smrj #else /* 32-bit x86 kernel */
4554509Smrj 	ddi_dma_handle_t *lhandlep;
4555509Smrj 	ddi_dma_handle_t lhandle;
4556509Smrj 	ddi_dma_cookie_t cookie;
4557509Smrj 	ddi_dma_attr_t dma_attr;
4558509Smrj 	ddi_dma_lim_t *dma_lim;
4559509Smrj 	uint_t ccnt;
4560509Smrj 	int e;
4561509Smrj 
4562509Smrj 
4563509Smrj 	/*
4564509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4565509Smrj 	 * we'll use local state. Otherwise, use the handle pointer passed in.
4566509Smrj 	 */
4567509Smrj 	if (handlep == NULL) {
4568509Smrj 		lhandlep = &lhandle;
4569509Smrj 	} else {
4570509Smrj 		lhandlep = handlep;
4571509Smrj 	}
4572509Smrj 
4573509Smrj 	/* convert the limit structure to a dma_attr one */
4574509Smrj 	dma_lim = dmareq->dmar_limits;
4575509Smrj 	dma_attr.dma_attr_version = DMA_ATTR_V0;
4576509Smrj 	dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo;
4577509Smrj 	dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi;
4578509Smrj 	dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer;
4579509Smrj 	dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max;
4580509Smrj 	dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max;
4581509Smrj 	dma_attr.dma_attr_granular = dma_lim->dlim_granular;
4582509Smrj 	dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen;
4583509Smrj 	dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize;
4584509Smrj 	dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes;
4585509Smrj 	dma_attr.dma_attr_align = MMU_PAGESIZE;
4586509Smrj 	dma_attr.dma_attr_flags = 0;
4587509Smrj 
4588509Smrj 	e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp,
4589509Smrj 	    dmareq->dmar_arg, lhandlep);
4590509Smrj 	if (e != DDI_SUCCESS) {
4591509Smrj 		return (e);
4592509Smrj 	}
4593509Smrj 
4594509Smrj 	e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt);
4595509Smrj 	if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
4596509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4597509Smrj 		return (e);
4598509Smrj 	}
4599509Smrj 
4600509Smrj 	/*
4601509Smrj 	 * if the driver is just testing to see if it's possible to do the bind,
4602509Smrj 	 * free up the local state and return the result.
4603509Smrj 	 */
4604509Smrj 	if (handlep == NULL) {
4605509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep);
4606509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, *lhandlep);
4607509Smrj 		if (e == DDI_DMA_MAPPED) {
4608509Smrj 			return (DDI_DMA_MAPOK);
46090Sstevel@tonic-gate 		} else {
4610509Smrj 			return (DDI_DMA_NOMAPPING);
4611509Smrj 		}
4612509Smrj 	}
4613509Smrj 
4614509Smrj 	return (e);
4615509Smrj #endif /* defined(__amd64) */
4616509Smrj }
4617509Smrj 
46187613SVikram.Hegde@Sun.COM /*
461910216SVikram.Hegde@Sun.COM  * rootnex_dma_mctl()
462010216SVikram.Hegde@Sun.COM  *
462110216SVikram.Hegde@Sun.COM  * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode.
46227613SVikram.Hegde@Sun.COM  */
46237613SVikram.Hegde@Sun.COM /* ARGSUSED */
46247613SVikram.Hegde@Sun.COM static int
462510216SVikram.Hegde@Sun.COM rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4626509Smrj     enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
4627509Smrj     uint_t cache_flags)
4628509Smrj {
4629509Smrj #if defined(__amd64)
4630509Smrj 	/*
4631509Smrj 	 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
4632509Smrj 	 * common implementation in genunix, so they no longer have x86
4633509Smrj 	 * specific functionality which called into dma_ctl.
4634509Smrj 	 *
4635509Smrj 	 * The rest of the obsoleted interfaces were never supported in the
4636509Smrj 	 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
4637509Smrj 	 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
4638509Smrj 	 * implementation issues.
4639509Smrj 	 *
4640509Smrj 	 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
4641509Smrj 	 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
4642509Smrj 	 * reflect that now too...
4643509Smrj 	 *
4644509Smrj 	 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
4645509Smrj 	 * not going to put this functionality into the 64-bit x86 kernel now.
4646509Smrj 	 * It wasn't ported to the 64-bit kernel for s10, no reason to change
4647509Smrj 	 * that in a future release.
4648509Smrj 	 */
4649509Smrj 	return (DDI_FAILURE);
4650509Smrj 
4651509Smrj #else /* 32-bit x86 kernel */
4652509Smrj 	ddi_dma_cookie_t lcookie;
4653509Smrj 	ddi_dma_cookie_t *cookie;
4654509Smrj 	rootnex_window_t *window;
4655509Smrj 	ddi_dma_impl_t *hp;
4656509Smrj 	rootnex_dma_t *dma;
4657509Smrj 	uint_t nwin;
4658509Smrj 	uint_t ccnt;
4659509Smrj 	size_t len;
4660509Smrj 	off_t off;
4661509Smrj 	int e;
4662509Smrj 
4663509Smrj 
4664509Smrj 	/*
4665509Smrj 	 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
4666509Smrj 	 * hacky since were optimizing for the current interfaces and so we can
4667509Smrj 	 * cleanup the mess in genunix. Hopefully we will remove the this
4668509Smrj 	 * obsoleted routines someday soon.
4669509Smrj 	 */
4670509Smrj 
4671509Smrj 	switch (request) {
4672509Smrj 
4673509Smrj 	case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */
4674509Smrj 		hp = (ddi_dma_impl_t *)handle;
4675509Smrj 		cookie = (ddi_dma_cookie_t *)objpp;
4676509Smrj 
4677509Smrj 		/*
4678509Smrj 		 * convert segment to cookie. We don't distinguish between the
4679509Smrj 		 * two :-)
4680509Smrj 		 */
4681509Smrj 		*cookie = *hp->dmai_cookie;
4682509Smrj 		*lenp = cookie->dmac_size;
4683509Smrj 		*offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF;
4684509Smrj 		return (DDI_SUCCESS);
4685509Smrj 
4686509Smrj 	case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */
4687509Smrj 		hp = (ddi_dma_impl_t *)handle;
4688509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4689509Smrj 
4690509Smrj 		if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) {
4691509Smrj 			return (DDI_DMA_STALE);
46920Sstevel@tonic-gate 		}
4693509Smrj 
4694509Smrj 		/* handle the case where we don't have any windows */
4695509Smrj 		if (dma->dp_window == NULL) {
4696509Smrj 			/*
4697509Smrj 			 * if seg == NULL, and we don't have any windows,
4698509Smrj 			 * return the first cookie in the sgl.
4699509Smrj 			 */
4700509Smrj 			if (*lenp == NULL) {
4701509Smrj 				dma->dp_current_cookie = 0;
4702509Smrj 				hp->dmai_cookie = dma->dp_cookies;
4703509Smrj 				*objpp = (caddr_t)handle;
4704509Smrj 				return (DDI_SUCCESS);
4705509Smrj 
4706509Smrj 			/* if we have more cookies, go to the next cookie */
4707509Smrj 			} else {
4708509Smrj 				if ((dma->dp_current_cookie + 1) >=
4709509Smrj 				    dma->dp_sglinfo.si_sgl_size) {
4710509Smrj 					return (DDI_DMA_DONE);
4711509Smrj 				}
4712509Smrj 				dma->dp_current_cookie++;
4713509Smrj 				hp->dmai_cookie++;
4714509Smrj 				return (DDI_SUCCESS);
4715509Smrj 			}
4716509Smrj 		}
4717509Smrj 
4718509Smrj 		/* We have one or more windows */
4719509Smrj 		window = &dma->dp_window[dma->dp_current_win];
4720509Smrj 
4721509Smrj 		/*
4722509Smrj 		 * if seg == NULL, return the first cookie in the current
4723509Smrj 		 * window
4724509Smrj 		 */
4725509Smrj 		if (*lenp == NULL) {
4726509Smrj 			dma->dp_current_cookie = 0;
4727683Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4728509Smrj 
4729509Smrj 		/*
4730509Smrj 		 * go to the next cookie in the window then see if we done with
4731509Smrj 		 * this window.
4732509Smrj 		 */
4733509Smrj 		} else {
4734509Smrj 			if ((dma->dp_current_cookie + 1) >=
4735509Smrj 			    window->wd_cookie_cnt) {
4736509Smrj 				return (DDI_DMA_DONE);
4737509Smrj 			}
4738509Smrj 			dma->dp_current_cookie++;
4739509Smrj 			hp->dmai_cookie++;
4740509Smrj 		}
4741509Smrj 		*objpp = (caddr_t)handle;
4742509Smrj 		return (DDI_SUCCESS);
4743509Smrj 
4744509Smrj 	case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */
4745509Smrj 		hp = (ddi_dma_impl_t *)handle;
4746509Smrj 		dma = (rootnex_dma_t *)hp->dmai_private;
4747509Smrj 
4748509Smrj 		if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) {
4749509Smrj 			return (DDI_DMA_STALE);
4750509Smrj 		}
4751509Smrj 
4752509Smrj 		/* if win == NULL, return the first window in the bind */
4753509Smrj 		if (*offp == NULL) {
4754509Smrj 			nwin = 0;
4755509Smrj 
4756509Smrj 		/*
4757509Smrj 		 * else, go to the next window then see if we're done with all
4758509Smrj 		 * the windows.
4759509Smrj 		 */
4760509Smrj 		} else {
4761509Smrj 			nwin = dma->dp_current_win + 1;
4762509Smrj 			if (nwin >= hp->dmai_nwin) {
4763509Smrj 				return (DDI_DMA_DONE);
4764509Smrj 			}
4765509Smrj 		}
4766509Smrj 
4767509Smrj 		/* switch to the next window */
4768509Smrj 		e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len,
4769509Smrj 		    &lcookie, &ccnt);
4770509Smrj 		ASSERT(e == DDI_SUCCESS);
4771509Smrj 		if (e != DDI_SUCCESS) {
4772509Smrj 			return (DDI_DMA_STALE);
4773509Smrj 		}
4774509Smrj 
4775509Smrj 		/* reset the cookie back to the first cookie in the window */
4776509Smrj 		if (dma->dp_window != NULL) {
4777509Smrj 			window = &dma->dp_window[dma->dp_current_win];
4778509Smrj 			hp->dmai_cookie = window->wd_first_cookie;
4779509Smrj 		} else {
4780509Smrj 			hp->dmai_cookie = dma->dp_cookies;
4781509Smrj 		}
4782509Smrj 
4783509Smrj 		*objpp = (caddr_t)handle;
4784509Smrj 		return (DDI_SUCCESS);
4785509Smrj 
4786509Smrj 	case DDI_DMA_FREE: /* ddi_dma_free() */
4787509Smrj 		(void) rootnex_dma_unbindhdl(dip, rdip, handle);
4788509Smrj 		(void) rootnex_dma_freehdl(dip, rdip, handle);
4789509Smrj 		if (rootnex_state->r_dvma_call_list_id) {
4790509Smrj 			ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
4791509Smrj 		}
4792509Smrj 		return (DDI_SUCCESS);
4793509Smrj 
4794509Smrj 	case DDI_DMA_IOPB_ALLOC:	/* get contiguous DMA-able memory */
4795509Smrj 	case DDI_DMA_SMEM_ALLOC:	/* get contiguous DMA-able memory */
4796509Smrj 		/* should never get here, handled in genunix */
4797509Smrj 		ASSERT(0);
4798509Smrj 		return (DDI_FAILURE);
4799509Smrj 
4800509Smrj 	case DDI_DMA_KVADDR:
4801509Smrj 	case DDI_DMA_GETERR:
4802509Smrj 	case DDI_DMA_COFF:
4803509Smrj 		return (DDI_FAILURE);
48040Sstevel@tonic-gate 	}
4805509Smrj 
4806509Smrj 	return (DDI_FAILURE);
4807509Smrj #endif /* defined(__amd64) */
48080Sstevel@tonic-gate }
48091414Scindi 
48107613SVikram.Hegde@Sun.COM /*
48111865Sdilpreet  * *********
48121865Sdilpreet  *  FMA Code
48131865Sdilpreet  * *********
48141865Sdilpreet  */
48151865Sdilpreet 
48161865Sdilpreet /*
48171865Sdilpreet  * rootnex_fm_init()
48181865Sdilpreet  *    FMA init busop
48191865Sdilpreet  */
48201865Sdilpreet /* ARGSUSED */
48211865Sdilpreet static int
48221865Sdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
48231865Sdilpreet     ddi_iblock_cookie_t *ibc)
48241865Sdilpreet {
48251865Sdilpreet 	*ibc = rootnex_state->r_err_ibc;
48261865Sdilpreet 
48271865Sdilpreet 	return (ddi_system_fmcap);
48281865Sdilpreet }
48291865Sdilpreet 
48301865Sdilpreet /*
48311865Sdilpreet  * rootnex_dma_check()
48321865Sdilpreet  *    Function called after a dma fault occurred to find out whether the
48331865Sdilpreet  *    fault address is associated with a driver that is able to handle faults
48341865Sdilpreet  *    and recover from faults.
48351865Sdilpreet  */
48361865Sdilpreet /* ARGSUSED */
48371414Scindi static int
48381865Sdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
48391865Sdilpreet     const void *not_used)
48401414Scindi {
48411865Sdilpreet 	rootnex_window_t *window;
48421865Sdilpreet 	uint64_t start_addr;
48431865Sdilpreet 	uint64_t fault_addr;
48441865Sdilpreet 	ddi_dma_impl_t *hp;
48451865Sdilpreet 	rootnex_dma_t *dma;
48461865Sdilpreet 	uint64_t end_addr;
48471865Sdilpreet 	size_t csize;
48481865Sdilpreet 	int i;
48491865Sdilpreet 	int j;
48501865Sdilpreet 
48511865Sdilpreet 
48521865Sdilpreet 	/* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
48531865Sdilpreet 	hp = (ddi_dma_impl_t *)handle;
48541865Sdilpreet 	ASSERT(hp);
48551865Sdilpreet 
48561865Sdilpreet 	dma = (rootnex_dma_t *)hp->dmai_private;
48571865Sdilpreet 
48581865Sdilpreet 	/* Get the address that we need to search for */
48591865Sdilpreet 	fault_addr = *(uint64_t *)addr;
48601865Sdilpreet 
48611865Sdilpreet 	/*
48621865Sdilpreet 	 * if we don't have any windows, we can just walk through all the
48631865Sdilpreet 	 * cookies.
48641865Sdilpreet 	 */
48651865Sdilpreet 	if (dma->dp_window == NULL) {
48661865Sdilpreet 		/* for each cookie */
48671865Sdilpreet 		for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
48681865Sdilpreet 			/*
48691865Sdilpreet 			 * if the faulted address is within the physical address
48701865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
48711865Sdilpreet 			 */
48721865Sdilpreet 			if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
48731865Sdilpreet 			    (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
48741865Sdilpreet 			    dma->dp_cookies[i].dmac_size))) {
48751865Sdilpreet 				return (DDI_FM_NONFATAL);
48761865Sdilpreet 			}
48771865Sdilpreet 		}
48781865Sdilpreet 
48791865Sdilpreet 		/* fault_addr not within this DMA handle */
48801865Sdilpreet 		return (DDI_FM_UNKNOWN);
48811865Sdilpreet 	}
48821865Sdilpreet 
48831865Sdilpreet 	/* we have mutiple windows, walk through each window */
48841865Sdilpreet 	for (i = 0; i < hp->dmai_nwin; i++) {
48851865Sdilpreet 		window = &dma->dp_window[i];
48861865Sdilpreet 
48871865Sdilpreet 		/* Go through all the cookies in the window */
48881865Sdilpreet 		for (j = 0; j < window->wd_cookie_cnt; j++) {
48891865Sdilpreet 
48901865Sdilpreet 			start_addr = window->wd_first_cookie[j].dmac_laddress;
48911865Sdilpreet 			csize = window->wd_first_cookie[j].dmac_size;
48921865Sdilpreet 
48931865Sdilpreet 			/*
48941865Sdilpreet 			 * if we are trimming the first cookie in the window,
48951865Sdilpreet 			 * and this is the first cookie, adjust the start
48961865Sdilpreet 			 * address and size of the cookie to account for the
48971865Sdilpreet 			 * trim.
48981865Sdilpreet 			 */
48991865Sdilpreet 			if (window->wd_trim.tr_trim_first && (j == 0)) {
49001865Sdilpreet 				start_addr = window->wd_trim.tr_first_paddr;
49011865Sdilpreet 				csize = window->wd_trim.tr_first_size;
49021865Sdilpreet 			}
49031865Sdilpreet 
49041865Sdilpreet 			/*
49051865Sdilpreet 			 * if we are trimming the last cookie in the window,
49061865Sdilpreet 			 * and this is the last cookie, adjust the start
49071865Sdilpreet 			 * address and size of the cookie to account for the
49081865Sdilpreet 			 * trim.
49091865Sdilpreet 			 */
49101865Sdilpreet 			if (window->wd_trim.tr_trim_last &&
49111865Sdilpreet 			    (j == (window->wd_cookie_cnt - 1))) {
49121865Sdilpreet 				start_addr = window->wd_trim.tr_last_paddr;
49131865Sdilpreet 				csize = window->wd_trim.tr_last_size;
49141865Sdilpreet 			}
49151865Sdilpreet 
49161865Sdilpreet 			end_addr = start_addr + csize;
49171865Sdilpreet 
49181865Sdilpreet 			/*
49191865Sdilpreet 			 * if the faulted address is within the physical address
49201865Sdilpreet 			 * range of the cookie, return DDI_FM_NONFATAL.
49211865Sdilpreet 			 */
49221865Sdilpreet 			if ((fault_addr >= start_addr) &&
49231865Sdilpreet 			    (fault_addr <= end_addr)) {
49241865Sdilpreet 				return (DDI_FM_NONFATAL);
49251865Sdilpreet 			}
49261865Sdilpreet 		}
49271865Sdilpreet 	}
49281865Sdilpreet 
49291865Sdilpreet 	/* fault_addr not within this DMA handle */
49301865Sdilpreet 	return (DDI_FM_UNKNOWN);
49311414Scindi }
4932